You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by kl...@apache.org on 2016/10/26 20:10:25 UTC
[01/15] incubator-geode git commit: GEODE-538: Add check for
persistent data recovery [Forced Update!]
Repository: incubator-geode
Updated Branches:
refs/heads/feature/GEODE-2012 026ca407b -> 5ab02de6c (forced update)
GEODE-538: Add check for persistent data recovery
PartitionedRegion.getNodeForBucketReadOrLoad can return an invalid node
if persistent data recovery is in process and a get() targets a bucket
that
hasn't been recoverd yet. This can result in returning an incorrect
value (null) or throwing ConflictingPersistentDataException from a get()
or put() on the region.
This change adds a check for persistent recovery to be completed
before creating the new bucket. If recovery isn't complete then the
operation on the region will fail with a PartitionOfflineException.
Queries on a region while persistent recovery is in progress can also
result in incorrect results so a similar check is added to
DefaultQuery.checkQueryOnPR.
This closes #264
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/11ef3ebb
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/11ef3ebb
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/11ef3ebb
Branch: refs/heads/feature/GEODE-2012
Commit: 11ef3ebbe30a8340f57776bf4063684b91ccd0a3
Parents: 7511ffa
Author: Ken Howe <kh...@pivotal.io>
Authored: Thu Oct 6 15:02:24 2016 -0700
Committer: Anil <ag...@pivotal.io>
Committed: Wed Oct 19 15:49:33 2016 -0700
----------------------------------------------------------------------
.../org/apache/geode/cache/query/Query.java | 12 +
.../cache/query/internal/DefaultQuery.java | 6 +-
.../internal/cache/PRHARedundancyProvider.java | 9 +-
.../geode/internal/cache/PartitionedRegion.java | 18 +-
.../geode/internal/i18n/LocalizedStrings.java | 1 +
.../partitioned/PRBasicQueryDUnitTest.java | 221 ++++++++++
.../query/partitioned/PRQueryDUnitHelper.java | 185 +++++++++
...tentColocatedPartitionedRegionDUnitTest.java | 411 ++++++++++++++++++-
8 files changed, 844 insertions(+), 19 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11ef3ebb/geode-core/src/main/java/org/apache/geode/cache/query/Query.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/Query.java b/geode-core/src/main/java/org/apache/geode/cache/query/Query.java
index e27687d..670b262 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/Query.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/Query.java
@@ -89,6 +89,9 @@ public interface Query {
* @throws QueryExecutionLowMemoryException
* If the query gets canceled due to low memory conditions and
* the resource manager critical heap percentage has been set
+ * @throws PartitionOfflineException
+ * If persistent data recovery is not complete for a partitioned
+ * region referred to in the query.
*/
public Object execute()
throws FunctionDomainException, TypeMismatchException, NameResolutionException,
@@ -150,6 +153,9 @@ public interface Query {
* @throws QueryExecutionLowMemoryException
* If the query gets canceled due to low memory conditions and
* the resource manager critical heap percentage has been set
+ * @throws PartitionOfflineException
+ * If persistent data recovery is not complete for a partitioned
+ * region referred to in the query.
*
*/
public Object execute(Object[] params)
@@ -220,6 +226,9 @@ public interface Query {
* @throws QueryExecutionLowMemoryException
* If the query gets canceled due to low memory conditions and
* the resource manager critical heap percentage has been set
+ * @throws PartitionOfflineException
+ * If persistent data recovery is not complete for a partitioned
+ * region referred to in the query.
*/
public Object execute(RegionFunctionContext context)
throws FunctionDomainException, TypeMismatchException, NameResolutionException,
@@ -291,6 +300,9 @@ public interface Query {
* @throws QueryExecutionLowMemoryException
* If the query gets canceled due to low memory conditions and
* the resource manager critical heap percentage has been set
+ * @throws PartitionOfflineException
+ * If persistent data recovery is not complete for a partitioned
+ * region referred to in the query.
*/
public Object execute(RegionFunctionContext context, Object[] params)
throws FunctionDomainException, TypeMismatchException, NameResolutionException,
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11ef3ebb/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java
index 58df390..8175d82 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java
@@ -27,11 +27,14 @@ import org.apache.geode.cache.client.internal.UserAttributes;
import org.apache.geode.cache.execute.Function;
import org.apache.geode.cache.execute.RegionFunctionContext;
import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.cache.persistence.PartitionOfflineException;
+import org.apache.geode.cache.persistence.PersistentID;
import org.apache.geode.cache.query.*;
import org.apache.geode.cache.query.internal.cq.InternalCqQuery;
import org.apache.geode.distributed.internal.DistributionConfig;
import org.apache.geode.internal.NanoTimer;
import org.apache.geode.internal.cache.*;
+import org.apache.geode.internal.cache.partitioned.RegionAdvisor;
import org.apache.geode.internal.i18n.LocalizedStrings;
import java.util.*;
@@ -581,7 +584,7 @@ public class DefaultQuery implements Query {
}
- private QueryExecutor checkQueryOnPR(Object[] parameters) throws RegionNotFoundException {
+ private QueryExecutor checkQueryOnPR(Object[] parameters) throws RegionNotFoundException, PartitionOfflineException {
// check for PartititionedRegions. If a PartitionedRegion is referred to in the query,
// then the following restrictions apply:
@@ -601,6 +604,7 @@ public class DefaultQuery implements Query {
throw new RegionNotFoundException(LocalizedStrings.DefaultQuery_REGION_NOT_FOUND_0.toLocalizedString(regionPath));
}
if (rgn instanceof QueryExecutor) {
+ ((PartitionedRegion)rgn).checkPROffline();
prs.add((QueryExecutor)rgn);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11ef3ebb/geode-core/src/main/java/org/apache/geode/internal/cache/PRHARedundancyProvider.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PRHARedundancyProvider.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PRHARedundancyProvider.java
index cfedb67..6245c37 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PRHARedundancyProvider.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PRHARedundancyProvider.java
@@ -24,6 +24,7 @@ import org.apache.geode.cache.PartitionedRegionStorageException;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.RegionDestroyedException;
import org.apache.geode.cache.persistence.PartitionOfflineException;
+import org.apache.geode.cache.persistence.PersistentID;
import org.apache.geode.distributed.DistributedMember;
import org.apache.geode.distributed.internal.DM;
import org.apache.geode.distributed.internal.DistributionConfig;
@@ -495,16 +496,20 @@ public class PRHARedundancyProvider
* redundancy.
* @throws PartitionedRegionException
* if d-lock can not be acquired to create bucket.
- *
+ * @throws PartitionOfflineException
+ * if persistent data recovery is not complete for a partitioned
+ * region referred to in the query.
*/
public InternalDistributedMember
createBucketAtomically(final int bucketId,
final int newBucketSize,
final long startTime,
final boolean finishIncompleteCreation, String partitionName) throws PartitionedRegionStorageException,
- PartitionedRegionException
+ PartitionedRegionException, PartitionOfflineException
{
final boolean isDebugEnabled = logger.isDebugEnabled();
+
+ prRegion.checkPROffline();
// If there are insufficient stores throw *before* we try acquiring the
// (very expensive) bucket lock or the (somewhat expensive) monitor on this
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11ef3ebb/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
index baab79f..f7ecdaf 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
@@ -28,6 +28,8 @@ import org.apache.geode.cache.client.internal.*;
import org.apache.geode.cache.execute.*;
import org.apache.geode.cache.partition.PartitionListener;
import org.apache.geode.cache.partition.PartitionNotAvailableException;
+import org.apache.geode.cache.persistence.PartitionOfflineException;
+import org.apache.geode.cache.persistence.PersistentID;
import org.apache.geode.cache.query.*;
import org.apache.geode.cache.query.internal.*;
import org.apache.geode.cache.query.internal.index.*;
@@ -1397,6 +1399,21 @@ public class PartitionedRegion extends LocalRegion implements
new UpdateAttributesProcessor(this).distribute(false);
}
+ /**
+ * Throw an exception if persistent data recovery from disk is not complete
+ * for this region.
+ *
+ * @throws PartitionOfflineException
+ */
+ public void checkPROffline() throws PartitionOfflineException {
+ if (getDataPolicy().withPersistence() && !recoveredFromDisk) {
+ Set<PersistentID> persistIds = new HashSet(getRegionAdvisor().advisePersistentMembers().values());
+ persistIds.removeAll(getRegionAdvisor().adviseInitializedPersistentMembers().values());
+ throw new PartitionOfflineException(persistIds, LocalizedStrings.PRHARedundancyProvider_PARTITIONED_REGION_0_OFFLINE_HAS_UNRECOVERED_PERSISTENT_DATA_1
+ .toLocalizedString(new Object[] { getFullPath(), persistIds}));
+ }
+ }
+
public final void updatePRConfig(PartitionRegionConfig prConfig,
boolean putOnlyIfUpdated) {
final Set<Node> nodes = prConfig.getNodes();
@@ -3057,7 +3074,6 @@ public class PartitionedRegion extends LocalRegion implements
final RetryTimeKeeper snoozer) {
final boolean isDebugEnabled = logger.isDebugEnabled();
-// InternalDistributedSystem ids = (InternalDistributedSystem)this.cache.getDistributedSystem();
RetryTimeKeeper localSnoozer = snoozer;
// Prevent early access to buckets that are not completely created/formed
// and
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11ef3ebb/geode-core/src/main/java/org/apache/geode/internal/i18n/LocalizedStrings.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/i18n/LocalizedStrings.java b/geode-core/src/main/java/org/apache/geode/internal/i18n/LocalizedStrings.java
index 8bfdd68..7d762b8 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/i18n/LocalizedStrings.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/i18n/LocalizedStrings.java
@@ -702,6 +702,7 @@ public class LocalizedStrings {
public static final StringId AbstractDistributionConfig_CLIENT_CONFLATION_PROP_NAME = new StringId(1839, "Client override for server queue conflation setting");
public static final StringId PRHARRedundancyProvider_ALLOCATE_ENOUGH_MEMBERS_TO_HOST_BUCKET = new StringId(1840, "allocate enough members to host bucket.");
public static final StringId PRHARedundancyProvider_TIME_OUT_WAITING_0_MS_FOR_CREATION_OF_BUCKET_FOR_PARTITIONED_REGION_1_MEMBERS_REQUESTED_TO_CREATE_THE_BUCKET_ARE_2 = new StringId(1841, "Time out waiting {0} ms for creation of bucket for partitioned region {1}. Members requested to create the bucket are: {2}");
+ public static final StringId PRHARedundancyProvider_PARTITIONED_REGION_0_OFFLINE_HAS_UNRECOVERED_PERSISTENT_DATA_1 = new StringId(1842, "Partitioned Region {0} is offline due to unrecovered persistent data, {1}");
public static final StringId PUT_0_FAILED_TO_PUT_ENTRY_FOR_REGION_1_KEY_2_VALUE_3 = new StringId(1843, "{0}: Failed to put entry for region {1} key {2} value {3}");
public static final StringId PUT_0_UNEXPECTED_EXCEPTION = new StringId(1844, "{0}: Unexpected Exception");
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11ef3ebb/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRBasicQueryDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRBasicQueryDUnitTest.java b/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRBasicQueryDUnitTest.java
index 8ef907a..224a7e0 100755
--- a/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRBasicQueryDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRBasicQueryDUnitTest.java
@@ -29,6 +29,7 @@ import static org.apache.geode.cache.query.Utils.*;
import org.apache.geode.cache.Cache;
import org.apache.geode.cache.Region;
+import org.apache.geode.cache.persistence.PartitionOfflineException;
import org.apache.geode.cache.query.Index;
import org.apache.geode.cache.query.IndexType;
import org.apache.geode.cache.query.Query;
@@ -38,6 +39,7 @@ import org.apache.geode.cache.query.data.Portfolio;
import org.apache.geode.cache.query.data.PortfolioData;
import org.apache.geode.cache30.CacheSerializableRunnable;
import org.apache.geode.internal.cache.PartitionedRegionDUnitTestCase;
+import org.apache.geode.test.dunit.AsyncInvocation;
import org.apache.geode.test.dunit.Host;
import org.apache.geode.test.dunit.LogWriterUtils;
import org.apache.geode.test.dunit.VM;
@@ -67,6 +69,8 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
}
}
+ private final static int MAX_SYNC_WAIT = 30 * 1000;
+
PRQueryDUnitHelper PRQHelp = new PRQueryDUnitHelper();
final String name = "Portfolios";
@@ -153,6 +157,223 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
"PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
}
+ /**
+ * A basic dunit test that <br>
+ * 1. Creates a PR and colocated child region Accessor and Data Store with redundantCopies = 0.
+ * 2. Populates the region with test data.
+ * 3. Fires a query on accessor VM and verifies the result.
+ * 4. Shuts down the caches, then restarts them asynchronously
+ * 5. Attempt the query while the regions are being recovered
+ * @throws Exception
+ */
+ @Test
+ public void testColocatedPRQueryDuringRecovery() throws Exception
+ {
+ Host host = Host.getHost(0);
+ VM vm0 = host.getVM(0);
+ VM vm1 = host.getVM(1);
+ setCacheInVMs(vm0, vm1);
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Querying PR Test with DACK Started");
+
+ // Creting PR's on the participating VM's
+ // Creating Accessor node on the VM0.
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Creating the Accessor node in the PR");
+
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForColocatedPRCreate(name,
+ redundancy, PortfolioData.class, true));
+ // Creating local region on vm0 to compare the results of query.
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Successfully created the Accessor node in the PR");
+
+ // Creating the Datastores Nodes in the VM1.
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest:testColocatedPRBasicQuerying ----- Creating the Datastore node in the PR");
+ vm1.invoke(PRQHelp.getCacheSerializableRunnableForColocatedPRCreate(name,
+ redundancy, PortfolioData.class, true));
+
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Successfully Created the Datastore node in the PR");
+
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Successfully Created PR's across all VM's");
+
+ // Generating portfolio object array to be populated across the PR's & Local
+ // Regions
+
+ final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
+ // Putting the data into the PR's created
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
+ cnt, cntDest));
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRDuplicatePuts(name, portfolio,
+ cnt, cntDest));
+
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Inserted Portfolio data across PR's");
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
+ portfolio, cnt, cntDest));
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRDuplicatePuts(localName,
+ portfolio, cnt, cntDest));
+
+ // querying the VM for data and comparing the result with query result of
+ // local region.
+ // querying the VM for data
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
+ name, localName));
+
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Querying PR's 1st pass ENDED");
+
+ // Shut everything down and then restart to test queries during recovery
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForCloseCache());
+ vm1.invoke(PRQHelp.getCacheSerializableRunnableForCloseCache());
+
+ // Re-create the regions - only create the parent regions on the datastores
+ setCacheInVMs(vm0, vm1);
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Creating the Accessor node in the PR");
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForColocatedParentCreate(name,
+ redundancy, PortfolioData.class, true));
+
+ // Creating local region on vm0 to compare the results of query.
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Successfully created the Accessor node in the PR");
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest:testColocatedPRBasicQuerying: re-creating the Datastore node in the PR");
+ vm1.invoke(PRQHelp.getCacheSerializableRunnableForColocatedParentCreate(name,
+ redundancy, PortfolioData.class, true));
+
+ // Now start the child regions asynchronously so queries will happen during persistent recovery
+ AsyncInvocation vm0PR = vm0.invokeAsync(PRQHelp.getCacheSerializableRunnableForColocatedChildCreate(name,
+ redundancy, PortfolioData.class, true));
+ AsyncInvocation vm1PR = vm1.invokeAsync(PRQHelp.getCacheSerializableRunnableForColocatedChildCreate(name,
+ redundancy, PortfolioData.class, true));
+
+ // delay the query to let the recovery get underway
+ Thread.sleep(100);
+
+ try {
+ // This is a repeat of the original query from before closing and restarting the datastores. This time
+ // it should fail due to persistent recovery that has not completed.
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(name, localName, true));
+ fail("Expected PartitionOfflineException when queryiong a region with offline colocated child");
+ } catch (Exception e) {
+ if (!(e.getCause() instanceof PartitionOfflineException)) {
+ e.printStackTrace();
+ throw e;
+ }
+ }
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Querying PR's 2nd pass (after restarting regions) ENDED");
+ }
+
+ /**
+ * A basic dunit test that <br>
+ * 1. Creates a PR and colocated child region Accessor and Data Store with redundantCopies = 0.
+ * 2. Populates the region with test data.
+ * 3. Fires a query on accessor VM and verifies the result.
+ * 4. Shuts down the caches, then restarts them asynchronously, but don't restart the child region
+ * 5. Attempt the query while the region offline because of the missing child region
+ * @throws Exception
+ */
+ @SuppressWarnings("rawtypes")
+ @Test
+ public void testColocatedPRQueryDuringRecoveryWithMissingColocatedChild() throws Exception
+ {
+ Host host = Host.getHost(0);
+ VM vm0 = host.getVM(0);
+ VM vm1 = host.getVM(1);
+ setCacheInVMs(vm0, vm1);
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Querying PR Test with DACK Started");
+
+ // Creting PR's on the participating VM's
+ // Creating Accessor node on the VM0.
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Creating the Accessor node in the PR");
+
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForColocatedPRCreate(name,
+ redundancy, PortfolioData.class, true));
+ // Creating local region on vm0 to compare the results of query.
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Successfully created the Accessor node in the PR");
+
+ // Creating the Datastores Nodes in the VM1.
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest:testColocatedPRBasicQuerying ----- Creating the Datastore node in the PR");
+ vm1.invoke(PRQHelp.getCacheSerializableRunnableForColocatedPRCreate(name,
+ redundancy, PortfolioData.class, true));
+
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Successfully Created the Datastore node in the PR");
+
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Successfully Created PR's across all VM's");
+
+ // Generating portfolio object array to be populated across the PR's & Local
+ // Regions
+
+ final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
+ // Putting the data into the PR's created
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
+ cnt, cntDest));
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRDuplicatePuts(name, portfolio,
+ cnt, cntDest));
+
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Inserted Portfolio data across PR's");
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
+ portfolio, cnt, cntDest));
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRDuplicatePuts(localName,
+ portfolio, cnt, cntDest));
+
+ // querying the VM for data and comparing the result with query result of
+ // local region.
+ // querying the VM for data
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
+ name, localName));
+
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Querying PR's 1st pass ENDED");
+
+ // Shut everything down and then restart to test queries during recovery
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForCloseCache());
+ vm1.invoke(PRQHelp.getCacheSerializableRunnableForCloseCache());
+
+ // Re-create the only the parent region
+ setCacheInVMs(vm0, vm1);
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Creating the Accessor node in the PR");
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForColocatedParentCreate(name,
+ redundancy, PortfolioData.class, true));
+
+ // Creating local region on vm0 to compare the results of query.
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Successfully created the Accessor node in the PR");
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest:testColocatedPRBasicQuerying ----- re-creating the Datastore node in the PR");
+ vm1.invoke(PRQHelp.getCacheSerializableRunnableForColocatedParentCreate(name,
+ redundancy, PortfolioData.class, true));
+
+ try {
+ // This is a repeat of the original query from before closing and restarting the datastores. This time
+ // it should fail due to persistent recovery that has not completed.
+ vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(name, localName, true));
+ fail("Expected PartitionOfflineException when queryiong a region with offline colocated child");
+ } catch (Exception e) {
+ if (!(e.getCause() instanceof PartitionOfflineException)) {
+ throw e;
+ }
+ }
+ LogWriterUtils.getLogWriter()
+ .info("PRQBasicQueryDUnitTest#testColocatedPRBasicQuerying: Querying PR's 2nd pass (after restarting regions) ENDED");
+ }
+
@Test
public void testPRCountStarQuery() throws Exception
{
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11ef3ebb/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRQueryDUnitHelper.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRQueryDUnitHelper.java b/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRQueryDUnitHelper.java
index cfb4190..9dc90fd 100755
--- a/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRQueryDUnitHelper.java
+++ b/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRQueryDUnitHelper.java
@@ -39,6 +39,7 @@ import org.apache.geode.cache.CacheClosedException;
import org.apache.geode.cache.CacheException;
import org.apache.geode.cache.CacheFactory;
import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.DiskStore;
import org.apache.geode.cache.EntryExistsException;
import org.apache.geode.cache.EntryNotFoundException;
import org.apache.geode.cache.PartitionAttributes;
@@ -249,6 +250,190 @@ public class PRQueryDUnitHelper implements Serializable {
return (CacheSerializableRunnable)createPrRegion;
}
+ /**
+ * This function creates a colocated pair of PR's given the scope & the
+ * redundancy parameters for the parent *
+ *
+ * @param regionName
+ * @param redundancy
+ * @param constraint
+ * @param makePersistent
+ * @return cacheSerializable object
+ */
+ public CacheSerializableRunnable getCacheSerializableRunnableForColocatedPRCreate(
+ final String regionName, final int redundancy, final Class constraint, boolean makePersistent) {
+
+ final String childRegionName = regionName + "Child";
+ final String diskName = "disk";
+ SerializableRunnable createPrRegion;
+ createPrRegion = new CacheSerializableRunnable(regionName) {
+ @Override
+ public void run2() throws CacheException
+ {
+
+ Cache cache = getCache();
+ Region partitionedregion = null;
+ Region childRegion = null;
+ AttributesFactory attr = new AttributesFactory();
+ attr.setValueConstraint(constraint);
+ if (makePersistent) {
+ DiskStore ds = cache.findDiskStore(diskName);
+ if (ds == null) {
+ ds = cache.createDiskStoreFactory().setDiskDirs(JUnit4CacheTestCase.getDiskDirs())
+ .create(diskName);
+ }
+ attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+ attr.setDiskStoreName(diskName);
+ } else {
+ attr.setDataPolicy(DataPolicy.PARTITION);
+ attr.setDiskStoreName(null);
+ }
+
+ PartitionAttributesFactory paf = new PartitionAttributesFactory();
+ paf.setRedundantCopies(redundancy);
+ attr.setPartitionAttributes(paf.create());
+
+ // parent region
+ partitionedregion = cache.createRegion(regionName, attr.create());
+ assertNotNull(
+ "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region "
+ + regionName + " not in cache", cache.getRegion(regionName));
+ assertNotNull(
+ "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region ref null",
+ partitionedregion);
+ assertTrue(
+ "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region ref claims to be destroyed",
+ !partitionedregion.isDestroyed());
+
+ // child region
+ attr.setValueConstraint(constraint);
+ paf.setColocatedWith(regionName);
+ attr.setPartitionAttributes(paf.create());
+ childRegion = cache.createRegion(childRegionName, attr.create());
+ }
+ };
+
+ return (CacheSerializableRunnable)createPrRegion;
+ }
+
+ /**
+ * This function creates the parent region of colocated pair of PR's given the scope & the
+ * redundancy parameters for the parent *
+ *
+ * @param regionName
+ * @param redundancy
+ * @param constraint
+ * @param makePersistent
+ * @return cacheSerializable object
+ */
+ public CacheSerializableRunnable getCacheSerializableRunnableForColocatedParentCreate(
+ final String regionName, final int redundancy, final Class constraint, boolean makePersistent) {
+
+ final String childRegionName = regionName + "Child";
+ final String diskName = "disk";
+ SerializableRunnable createPrRegion;
+ createPrRegion = new CacheSerializableRunnable(regionName + "-NoChildRegion") {
+ @Override
+ public void run2() throws CacheException
+ {
+
+ Cache cache = getCache();
+ Region partitionedregion = null;
+ Region childRegion = null;
+ AttributesFactory attr = new AttributesFactory();
+ attr.setValueConstraint(constraint);
+ if (makePersistent) {
+ DiskStore ds = cache.findDiskStore(diskName);
+ if (ds == null) {
+ ds = cache.createDiskStoreFactory().setDiskDirs(JUnit4CacheTestCase.getDiskDirs())
+ .create(diskName);
+ }
+ attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+ attr.setDiskStoreName(diskName);
+ } else {
+ attr.setDataPolicy(DataPolicy.PARTITION);
+ attr.setDiskStoreName(null);
+ }
+
+ PartitionAttributesFactory paf = new PartitionAttributesFactory();
+ paf.setRedundantCopies(redundancy);
+ attr.setPartitionAttributes(paf.create());
+
+ // parent region
+ partitionedregion = cache.createRegion(regionName, attr.create());
+ assertNotNull(
+ "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region "
+ + regionName + " not in cache", cache.getRegion(regionName));
+ assertNotNull(
+ "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region ref null",
+ partitionedregion);
+ assertTrue(
+ "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region ref claims to be destroyed",
+ !partitionedregion.isDestroyed());
+ }
+ };
+
+ return (CacheSerializableRunnable)createPrRegion;
+ }
+
+ /**
+ * This function creates the parent region of colocated pair of PR's given the scope & the
+ * redundancy parameters for the parent *
+ *
+ * @param regionName
+ * @param redundancy
+ * @param constraint
+ * @param isPersistent
+ * @return cacheSerializable object
+ */
+ public CacheSerializableRunnable getCacheSerializableRunnableForColocatedChildCreate(
+ final String regionName, final int redundancy, final Class constraint, boolean isPersistent) {
+
+ final String childRegionName = regionName + "Child";
+ final String diskName = "disk";
+ SerializableRunnable createPrRegion;
+ createPrRegion = new CacheSerializableRunnable(regionName + "-ChildRegion") {
+ @Override
+ public void run2() throws CacheException
+ {
+
+ Cache cache = getCache();
+ Region partitionedregion = null;
+ Region childRegion = null;
+ AttributesFactory attr = new AttributesFactory();
+ attr.setValueConstraint(constraint);
+ if (isPersistent) {
+ DiskStore ds = cache.findDiskStore(diskName);
+ if (ds == null) {
+// ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs())
+ ds = cache.createDiskStoreFactory().setDiskDirs(org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase.getDiskDirs())
+ .create(diskName);
+ }
+ attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+ attr.setDiskStoreName(diskName);
+ } else {
+ attr.setDataPolicy(DataPolicy.PARTITION);
+ attr.setDiskStoreName(null);
+ }
+
+ PartitionAttributesFactory paf = new PartitionAttributesFactory();
+ paf.setRedundantCopies(redundancy);
+ attr.setPartitionAttributes(paf.create());
+
+ // skip parent region creation
+ // partitionedregion = cache.createRegion(regionName, attr.create());
+
+ // child region
+ attr.setValueConstraint(constraint);
+ paf.setColocatedWith(regionName);
+ attr.setPartitionAttributes(paf.create());
+ childRegion = cache.createRegion(childRegionName, attr.create());
+ }
+ };
+
+ return (CacheSerializableRunnable)createPrRegion;
+ }
+
public CacheSerializableRunnable getCacheSerializableRunnableForPRCreateLimitedBuckets(
final String regionName, final int redundancy, final int buckets) {
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/11ef3ebb/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
index 0a25228..c15d545 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
@@ -50,7 +50,6 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import com.jayway.awaitility.core.ConditionTimeoutException;
-import org.junit.experimental.categories.Category;
import org.apache.geode.admin.internal.AdminDistributedSystemImpl;
import org.apache.geode.cache.AttributesFactory;
@@ -64,6 +63,7 @@ import org.apache.geode.cache.Region;
import org.apache.geode.cache.control.RebalanceOperation;
import org.apache.geode.cache.control.RebalanceResults;
import org.apache.geode.cache.persistence.PartitionOfflineException;
+import org.apache.geode.distributed.internal.DistributionConfig;
import org.apache.geode.distributed.internal.DistributionManager;
import org.apache.geode.distributed.internal.DistributionMessage;
import org.apache.geode.distributed.internal.DistributionMessageObserver;
@@ -72,11 +72,14 @@ import org.apache.geode.internal.FileUtil;
import org.apache.geode.internal.cache.ColocationLogger;
import org.apache.geode.internal.cache.InitialImageOperation.RequestImageMessage;
import org.apache.geode.internal.cache.PartitionedRegion;
+import org.apache.geode.internal.cache.PartitionedRegionHelper;
import org.apache.geode.internal.cache.control.InternalResourceManager;
import org.apache.geode.internal.cache.control.InternalResourceManager.ResourceObserver;
import org.apache.geode.test.dunit.Assert;
import org.apache.geode.test.dunit.AsyncInvocation;
import org.apache.geode.test.dunit.IgnoredException;
+import org.apache.geode.test.dunit.LogWriterUtils;
+import org.apache.geode.test.dunit.RMIException;
import org.apache.geode.test.dunit.Host;
import org.apache.geode.test.dunit.SerializableCallable;
import org.apache.geode.test.dunit.SerializableRunnable;
@@ -2088,7 +2091,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
};
//runnable to create PRs
- SerializableRunnable createPRs = new SerializableRunnable("region1") {
+ SerializableRunnable createPRs = new SerializableRunnable("createPRs") {
public void run() {
Cache cache = getCache();
@@ -2112,7 +2115,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
};
//runnable to close the cache.
- SerializableRunnable closeCache = new SerializableRunnable("region1") {
+ SerializableRunnable closeCache = new SerializableRunnable("closeCache") {
public void run() {
closeCache();
}
@@ -2120,7 +2123,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
//Runnable to do a bunch of puts handle exceptions
//due to the fact that member is offline.
- SerializableRunnable doABunchOfPuts = new SerializableRunnable("region1") {
+ SerializableRunnable doABunchOfPuts = new SerializableRunnable("doABunchOfPuts") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion(PR_REGION_NAME);
@@ -2200,7 +2203,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
@Category(FlakyTest.class) // GEODE-506: time sensitive, async actions with 30 sec max
@Test
public void testRebalanceWithOfflineChildRegion() throws Throwable {
- SerializableRunnable createParentPR = new SerializableRunnable() {
+ SerializableRunnable createParentPR = new SerializableRunnable("createParentPR") {
public void run() {
Cache cache = getCache();
@@ -2220,7 +2223,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
}
};
- SerializableRunnable createChildPR = new SerializableRunnable() {
+ SerializableRunnable createChildPR = new SerializableRunnable("createChildPR") {
public void run() {
Cache cache = getCache();
@@ -2325,7 +2328,6 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
};
vm1.invoke(addHook);
-// vm1.invoke(addHook);
AsyncInvocation async0;
AsyncInvocation async1;
AsyncInvocation async2;
@@ -2335,7 +2337,6 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
async1 = vm1.invokeAsync(createPRs);
vm1.invoke(waitForHook);
-// vm1.invoke(waitForHook);
//Now create the parent region on vm-2. vm-2 did not
//previous host the child region.
@@ -2347,7 +2348,6 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
} finally {
vm1.invoke(removeHook);
-// vm1.invoke(removeHook);
}
async0.getResult(MAX_WAIT);
@@ -2473,6 +2473,188 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
closeCache();
}
+ @Test
+ public void testParentRegionGetWithOfflineChildRegion() throws Throwable {
+
+ SerializableRunnable createParentPR = new SerializableRunnable("createParentPR") {
+ public void run() {
+ String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
+ try {
+ Cache cache = getCache();
+ DiskStore ds = cache.findDiskStore("disk");
+ if (ds == null) {
+ ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
+ }
+ AttributesFactory af = new AttributesFactory();
+ PartitionAttributesFactory paf = new PartitionAttributesFactory();
+ paf.setRedundantCopies(0);
+ paf.setRecoveryDelay(0);
+ af.setPartitionAttributes(paf.create());
+ af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+ af.setDiskStoreName("disk");
+ cache.createRegion(PR_REGION_NAME, af.create());
+ } finally {
+ System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
+ }
+ }
+ };
+
+ SerializableRunnable createChildPR = new SerializableRunnable("createChildPR") {
+ public void run() throws InterruptedException {
+ String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
+ try {
+ Cache cache = getCache();
+ AttributesFactory af = new AttributesFactory();
+ PartitionAttributesFactory paf = new PartitionAttributesFactory();
+ paf.setRedundantCopies(0);
+ paf.setRecoveryDelay(0);
+ paf.setColocatedWith(PR_REGION_NAME);
+ af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+ af.setDiskStoreName("disk");
+ af.setPartitionAttributes(paf.create());
+ // delay child region creations to cause a delay in persistent recovery
+ Thread.sleep(100);
+ cache.createRegion("region2", af.create());
+ } finally {
+ System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
+ }
+ }
+ };
+
+ boolean caughtException = false;
+ try {
+ // Expect a get() on the un-recovered (due to offline child) parent region to fail
+ regionGetWithOfflineChild(createParentPR, createChildPR, false);
+ } catch (Exception e) {
+ caughtException = true;
+ assertTrue(e instanceof RMIException);
+ assertTrue(e.getCause() instanceof PartitionOfflineException);
+ }
+ if (!caughtException) {
+ fail("Expected TimeoutException from remote");
+ }
+ }
+
+ @Test
+ public void testParentRegionGetWithRecoveryInProgress() throws Throwable {
+ SerializableRunnable createParentPR = new SerializableRunnable("createParentPR") {
+ public void run() {
+ String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
+ try {
+ Cache cache = getCache();
+ DiskStore ds = cache.findDiskStore("disk");
+ if (ds == null) {
+ ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
+ }
+ AttributesFactory af = new AttributesFactory();
+ PartitionAttributesFactory paf = new PartitionAttributesFactory();
+ paf.setRedundantCopies(0);
+ paf.setRecoveryDelay(0);
+ af.setPartitionAttributes(paf.create());
+ af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+ af.setDiskStoreName("disk");
+ cache.createRegion(PR_REGION_NAME, af.create());
+ } finally {
+ System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
+ System.out.println("oldRetryTimeout = " + oldRetryTimeout); }
+ }
+ };
+
+ SerializableRunnable createChildPR = new SerializableRunnable("createChildPR") {
+ public void run() throws InterruptedException {
+ String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
+ try {
+ Cache cache = getCache();
+ AttributesFactory af = new AttributesFactory();
+ PartitionAttributesFactory paf = new PartitionAttributesFactory();
+ paf.setRedundantCopies(0);
+ paf.setRecoveryDelay(0);
+ paf.setColocatedWith(PR_REGION_NAME);
+ af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+ af.setDiskStoreName("disk");
+ af.setPartitionAttributes(paf.create());
+ cache.createRegion("region2", af.create());
+ } finally {
+ System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
+ }
+ }
+ };
+
+ boolean caughtException = false;
+ try {
+ // Expect a get() on the un-recovered (due to offline child) parent region to fail
+ regionGetWithOfflineChild(createParentPR, createChildPR, false);
+ } catch (Exception e) {
+ caughtException = true;
+ assertTrue(e instanceof RMIException);
+ assertTrue(e.getCause() instanceof PartitionOfflineException);
+ }
+ if (!caughtException) {
+ fail("Expected TimeoutException from remote");
+ }
+ }
+
+ @Test
+ public void testParentRegionPutWithRecoveryInProgress() throws Throwable {
+ SerializableRunnable createParentPR = new SerializableRunnable("createParentPR") {
+ public void run() {
+ String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
+ System.out.println("oldRetryTimeout = " + oldRetryTimeout);
+ try {
+ Cache cache = getCache();
+ DiskStore ds = cache.findDiskStore("disk");
+ if (ds == null) {
+ ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
+ }
+ AttributesFactory af = new AttributesFactory();
+ PartitionAttributesFactory paf = new PartitionAttributesFactory();
+ paf.setRedundantCopies(0);
+ paf.setRecoveryDelay(0);
+ af.setPartitionAttributes(paf.create());
+ af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+ af.setDiskStoreName("disk");
+ cache.createRegion(PR_REGION_NAME, af.create());
+ } finally {
+ System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
+ }
+ }
+ };
+
+ SerializableRunnable createChildPR = new SerializableRunnable("createChildPR") {
+ public void run() throws InterruptedException {
+ String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
+ try {
+ Cache cache = getCache();
+ AttributesFactory af = new AttributesFactory();
+ PartitionAttributesFactory paf = new PartitionAttributesFactory();
+ paf.setRedundantCopies(0);
+ paf.setRecoveryDelay(0);
+ paf.setColocatedWith(PR_REGION_NAME);
+ af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+ af.setDiskStoreName("disk");
+ af.setPartitionAttributes(paf.create());
+ Thread.sleep(1000);
+ cache.createRegion("region2", af.create());
+ } finally {
+ System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
+ }
+ }
+ };
+
+ boolean caughtException = false;
+ try {
+ // Expect a get() on the un-recovered (due to offline child) parent region to fail
+ regionGetWithOfflineChild(createParentPR, createChildPR, false);
+ } catch (Exception e) {
+ caughtException = true;
+ assertTrue(e instanceof RMIException);
+ assertTrue(e.getCause() instanceof PartitionOfflineException);
+ }
+ if (!caughtException) {
+ fail("Expected TimeoutException from remote");
+ }
+ }
+
/**
* Create three PRs on a VM, named region1, region2, and region3.
* The colocated with attribute describes which region region3
@@ -2523,15 +2705,15 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
vm1.invoke(createParentPR);
vm0.invoke(createChildPR);
vm1.invoke(createChildPR);
-
+
//Create some buckets.
createData(vm0, 0, NUM_BUCKETS, "a");
createData(vm0, 0, NUM_BUCKETS, "a", "region2");
-
+
//Close the members
closeCache(vm1);
closeCache(vm0);
-
+
//Recreate the parent region. Try to make sure that
//the member with the latest copy of the buckets
//is the one that decides to throw away it's copy
@@ -2540,18 +2722,17 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
AsyncInvocation async1 = vm1.invokeAsync(createParentPR);
async0.getResult(MAX_WAIT);
async1.getResult(MAX_WAIT);
-
//Now create the parent region on vm-2. vm-2 did not
//previous host the child region.
vm2.invoke(createParentPR);
-
+
//Rebalance the parent region.
//This should not move any buckets, because
//we haven't recovered the child region
RebalanceResults rebalanceResults = rebalance(vm2);
assertEquals(0, rebalanceResults.getTotalBucketTransfersCompleted());
-
+
//Recreate the child region.
async1 = vm1.invokeAsync(createChildPR);
async0 = vm0.invokeAsync(createChildPR);
@@ -2568,6 +2749,206 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends PersistentPar
createData(vm0, 0, NUM_BUCKETS, "c", "region2");
}
+ /**
+ * Create a colocated pair of persistent regions and populate them with data. Shut down the servers and then
+ * restart them and check the data.
+ * <p>
+ * On the restart, try region operations ({@code get()}) on the parent region before or during persistent recovery.
+ * The {@code concurrentCheckData} argument determines whether the operation from the parent region occurs before
+ * or concurrent with the child region creation and recovery.
+ *
+ * @param createParentPR {@link SerializableRunnable} for creating the parent region on one member
+ * @param createChildPR {@link SerializableRunnable} for creating the child region on one member
+ * @param concurrentCheckData
+ * @throws Throwable
+ */
+ public void regionGetWithOfflineChild(
+ SerializableRunnable createParentPR,
+ SerializableRunnable createChildPR,
+ boolean concurrentCheckData) throws Throwable {
+ Host host = Host.getHost(0);
+ final VM vm0 = host.getVM(0);
+ VM vm1 = host.getVM(1);
+ VM vm2 = host.getVM(2);
+
+ //Create the PRs on two members
+ vm0.invoke(createParentPR);
+ vm1.invoke(createParentPR);
+ vm0.invoke(createChildPR);
+ vm1.invoke(createChildPR);
+
+ //Create some buckets.
+ createData(vm0, 0, NUM_BUCKETS, "a");
+ createData(vm0, 0, NUM_BUCKETS, "a", "region2");
+
+ //Close the members
+ closeCache(vm1);
+ closeCache(vm0);
+
+ SerializableRunnable checkDataOnParent = (new SerializableRunnable("checkDataOnParent") {
+ @Override
+ public void run() {
+ Cache cache = getCache();
+ Region region = cache.getRegion(PR_REGION_NAME);
+
+ for (int i = 0; i < NUM_BUCKETS; i++) {
+ assertEquals("For key " + i, "a", region.get(i));
+ }
+ }
+ });
+
+ try {
+ //Recreate the parent region. Try to make sure that
+ //the member with the latest copy of the buckets
+ //is the one that decides to throw away it's copy
+ //by starting it last.
+ AsyncInvocation async0 = vm0.invokeAsync(createParentPR);
+ AsyncInvocation async1 = vm1.invokeAsync(createParentPR);
+ async0.getResult(MAX_WAIT);
+ async1.getResult(MAX_WAIT);
+ //Now create the parent region on vm-2. vm-2 did not
+ //previously host the child region.
+ vm2.invoke(createParentPR);
+
+ AsyncInvocation async2 = null;
+ AsyncInvocation asyncCheck = null;
+ if (concurrentCheckData) {
+ //Recreate the child region.
+ async1 = vm1.invokeAsync(createChildPR);
+ async0 = vm0.invokeAsync(createChildPR);
+ async2 = vm2.invokeAsync(new SerializableRunnable("delay") {
+ @Override
+ public void run() throws InterruptedException {
+ Thread.sleep(100);
+ vm2.invoke(createChildPR);
+ }
+ });
+
+ asyncCheck = vm0.invokeAsync(checkDataOnParent);
+ } else {
+ vm0.invoke(checkDataOnParent);
+ }
+ async0.getResult(MAX_WAIT);
+ async1.getResult(MAX_WAIT);
+ async2.getResult(MAX_WAIT);
+ asyncCheck.getResult(MAX_WAIT);
+ //Validate the data
+ checkData(vm0, 0, NUM_BUCKETS, "a");
+ checkData(vm0, 0, NUM_BUCKETS, "a", "region2");
+ //Make sure we can actually use the buckets in the child region.
+ createData(vm0, 0, NUM_BUCKETS, "c", "region2");
+ } finally {
+ //Close the members
+ closeCache(vm1);
+ closeCache(vm0);
+ closeCache(vm2);
+ }
+ }
+ /**
+ * Create a colocated pair of persistent regions and populate them with data. Shut down the servers and then
+ * restart them.
+ * <p>
+ * On the restart, try region operations ({@code put()}) on the parent region before or during persistent recovery.
+ * The {@code concurrentCreatekData} argument determines whether the operation from the parent region occurs before
+ * or concurrent with the child region creation and recovery.
+ *
+ * @param createParentPR {@link SerializableRunnable} for creating the parent region on one member
+ * @param createChildPR {@link SerializableRunnable} for creating the child region on one member
+ * @param concurrentCreateData
+ * @throws Throwable
+ */
+ public void regionPutWithOfflineChild(
+ SerializableRunnable createParentPR,
+ SerializableRunnable createChildPR,
+ boolean concurrentCreateData) throws Throwable {
+ Host host = Host.getHost(0);
+ final VM vm0 = host.getVM(0);
+ VM vm1 = host.getVM(1);
+ VM vm2 = host.getVM(2);
+
+ SerializableRunnable checkDataOnParent = (new SerializableRunnable("checkDataOnParent") {
+ @Override
+ public void run() {
+ Cache cache = getCache();
+ Region region = cache.getRegion(PR_REGION_NAME);
+
+ for (int i = 0; i < NUM_BUCKETS; i++) {
+ assertEquals("For key " + i, "a", region.get(i));
+ }
+ }
+ });
+
+ SerializableRunnable createDataOnParent = new SerializableRunnable("createDataOnParent") {
+
+ public void run() {
+ Cache cache = getCache();
+ LogWriterUtils.getLogWriter().info("creating data in " + PR_REGION_NAME);
+ Region region = cache.getRegion(PR_REGION_NAME);
+
+ for (int i = 0; i < NUM_BUCKETS; i++) {
+ region.put(i, "c");
+ assertEquals("For key " + i, "c", region.get(i));
+ }
+ }
+ };
+
+ //Create the PRs on two members
+ vm0.invoke(createParentPR);
+ vm1.invoke(createParentPR);
+ vm0.invoke(createChildPR);
+ vm1.invoke(createChildPR);
+
+ //Create some buckets.
+ createData(vm0, 0, NUM_BUCKETS, "a");
+ createData(vm0, 0, NUM_BUCKETS, "a", "region2");
+
+ //Close the members
+ closeCache(vm1);
+ closeCache(vm0);
+
+ try {
+ //Recreate the parent region. Try to make sure that
+ //the member with the latest copy of the buckets
+ //is the one that decides to throw away it's copy
+ //by starting it last.
+ AsyncInvocation async0 = vm0.invokeAsync(createParentPR);
+ AsyncInvocation async1 = vm1.invokeAsync(createParentPR);
+ async0.getResult(MAX_WAIT);
+ async1.getResult(MAX_WAIT);
+ //Now create the parent region on vm-2. vm-2 did not
+ //previous host the child region.
+ vm2.invoke(createParentPR);
+
+ AsyncInvocation async2 = null;
+ AsyncInvocation asyncPut = null;
+ if (concurrentCreateData) {
+ //Recreate the child region.
+ async1 = vm1.invokeAsync(createChildPR);
+ async0 = vm0.invokeAsync(createChildPR);
+ async2 = vm2.invokeAsync(createChildPR);
+
+ Thread.sleep(100);
+ asyncPut = vm0.invokeAsync(createDataOnParent);
+ } else {
+ vm0.invoke(createDataOnParent);
+ }
+ async0.getResult(MAX_WAIT);
+ async1.getResult(MAX_WAIT);
+ async2.getResult(MAX_WAIT);
+ asyncPut.getResult(MAX_WAIT);
+ //Validate the data
+ checkData(vm0, 0, NUM_BUCKETS, "c");
+ checkData(vm0, 0, NUM_BUCKETS, "a", "region2");
+ //Make sure we can actually use the buckets in the child region.
+ createData(vm0, 0, NUM_BUCKETS, "c", "region2");
+ } finally {
+ //Close the members
+ closeCache(vm1);
+ closeCache(vm0);
+ closeCache(vm2);
+ }
+ }
+
private RebalanceResults rebalance(VM vm) {
return (RebalanceResults) vm.invoke(new SerializableCallable() {
[15/15] incubator-geode git commit: Allow sample rate of MAX_VALUE
and add awaitInitialization method
Posted by kl...@apache.org.
Allow sample rate of MAX_VALUE and add awaitInitialization method
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/64ef1ddc
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/64ef1ddc
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/64ef1ddc
Branch: refs/heads/feature/GEODE-2012
Commit: 64ef1ddcb4a9ece397e76fa77e439753d7ba4d61
Parents: 3b2f128
Author: Kirk Lund <kl...@apache.org>
Authored: Thu Oct 20 12:26:13 2016 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Wed Oct 26 10:19:14 2016 -0700
----------------------------------------------------------------------
.../internal/statistics/HostStatSampler.java | 19 +++++++++++++++----
1 file changed, 15 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/64ef1ddc/geode-core/src/main/java/org/apache/geode/internal/statistics/HostStatSampler.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/statistics/HostStatSampler.java b/geode-core/src/main/java/org/apache/geode/internal/statistics/HostStatSampler.java
index 020b5ab..2ec32e7 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/statistics/HostStatSampler.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/statistics/HostStatSampler.java
@@ -269,8 +269,10 @@ public abstract class HostStatSampler
synchronized(HostStatSampler.class) {
if (statThread != null) {
try {
- int msToWait = getSampleRate() + 100;
- statThread.join(msToWait);
+ if (getSampleRate() < Integer.MAX_VALUE) {
+ int msToWait = getSampleRate() + 100;
+ statThread.join(msToWait);
+ }
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
@@ -366,9 +368,18 @@ public abstract class HostStatSampler
* @since GemFire 7.0
*/
public final boolean waitForInitialization(long ms) throws InterruptedException {
- return this.statSamplerInitializedLatch.await(ms);
+ return awaitInitialization(ms, TimeUnit.MILLISECONDS);
}
-
+
+ /**
+ * Awaits the initialization of special statistics.
+ *
+ * @see #initSpecialStats
+ */
+ public final boolean awaitInitialization(final long timeout, final TimeUnit unit) throws InterruptedException {
+ return this.statSamplerInitializedLatch.await(timeout, unit);
+ }
+
public final void changeArchive(File newFile) {
this.sampleCollector.changeArchive(newFile, NanoTimer.getTime());
}
[07/15] incubator-geode git commit: Merge remote-tracking branch
'origin/develop' into feature/GEODE-2019
Posted by kl...@apache.org.
Merge remote-tracking branch 'origin/develop' into feature/GEODE-2019
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/bc060f95
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/bc060f95
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/bc060f95
Branch: refs/heads/feature/GEODE-2012
Commit: bc060f95c1f889fe5c6261006fe8a2591bfe6187
Parents: 139c0a3 7e659b2
Author: Karen Miller <km...@pivotal.io>
Authored: Thu Oct 20 16:37:25 2016 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Thu Oct 20 16:37:25 2016 -0700
----------------------------------------------------------------------
.../LauncherLifecycleCommandsDUnitTest.java | 22 ++++++
.../geode/distributed/ServerLauncher.java | 78 ++++++++++++++------
.../membership/gms/membership/GMSJoinLeave.java | 7 +-
.../cli/commands/LauncherLifecycleCommands.java | 29 +++++++-
.../internal/cli/commands/ShellCommands.java | 48 +++---------
.../internal/cli/i18n/CliStrings.java | 13 +++-
.../management/internal/cli/shell/Gfsh.java | 41 ++++++----
.../geode.apache.org/schema/cache/cache-1.0.xsd | 4 +
.../PRColocatedEquiJoinDUnitTest.java | 2 +
.../cli/commands/golden-help-offline.properties | 9 +++
10 files changed, 172 insertions(+), 81 deletions(-)
----------------------------------------------------------------------
[04/15] incubator-geode git commit: GEODE-2022: Marking
testRRPRLocalQueryingWithHetroIndexes as flaky
Posted by kl...@apache.org.
GEODE-2022: Marking testRRPRLocalQueryingWithHetroIndexes as flaky
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/7e659b23
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/7e659b23
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/7e659b23
Branch: refs/heads/feature/GEODE-2012
Commit: 7e659b2381eae3847d1c7e29bb1b37df518dde35
Parents: 59df3d9
Author: Dan Smith <up...@apache.org>
Authored: Thu Oct 20 13:07:35 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Thu Oct 20 13:09:12 2016 -0700
----------------------------------------------------------------------
.../cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java | 2 ++
1 file changed, 2 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/7e659b23/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java b/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java
index c9e5084..3e8250b 100644
--- a/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java
@@ -19,6 +19,7 @@
*/
package org.apache.geode.cache.query.partitioned;
+import org.apache.geode.test.junit.categories.FlakyTest;
import org.junit.experimental.categories.Category;
import org.junit.Test;
@@ -1341,6 +1342,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
@Test
+ @Category(FlakyTest.class) // GEODE-2022
public void testRRPRLocalQueryingWithHetroIndexes() throws Exception {
Host host = Host.getHost(0);
[06/15] incubator-geode git commit: GEODE-2019 Add automated
rebalance documentation
Posted by kl...@apache.org.
GEODE-2019 Add automated rebalance documentation
Revise content header to match the header in the subnav.
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/139c0a36
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/139c0a36
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/139c0a36
Branch: refs/heads/feature/GEODE-2012
Commit: 139c0a36ff0aba3c5f8f9b964d0109d18a10ca6e
Parents: f1be596
Author: Karen Miller <km...@pivotal.io>
Authored: Thu Oct 20 14:18:27 2016 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Thu Oct 20 14:18:27 2016 -0700
----------------------------------------------------------------------
.../developing/partitioned_regions/automated_rebalance.html.md.erb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/139c0a36/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb b/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
index d4ca2a6..387275d 100644
--- a/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
+++ b/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
@@ -1,5 +1,5 @@
---
-title: Automated Rebalance
+title: Automated Rebalancing of Partitioned Region Data
---
Automated rebalance triggers a rebalance
[13/15] incubator-geode git commit: Add await method
Posted by kl...@apache.org.
Add await method
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/34834ce0
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/34834ce0
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/34834ce0
Branch: refs/heads/feature/GEODE-2012
Commit: 34834ce00787dfa27915bf687c28550bc170d3a5
Parents: 5d80ad4
Author: Kirk Lund <kl...@apache.org>
Authored: Thu Oct 20 12:27:34 2016 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Wed Oct 26 10:19:14 2016 -0700
----------------------------------------------------------------------
.../geode/internal/util/concurrent/StoppableCountDownLatch.java | 5 +++++
1 file changed, 5 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/34834ce0/geode-core/src/main/java/org/apache/geode/internal/util/concurrent/StoppableCountDownLatch.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/util/concurrent/StoppableCountDownLatch.java b/geode-core/src/main/java/org/apache/geode/internal/util/concurrent/StoppableCountDownLatch.java
index 1f0ac34..a3052078 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/util/concurrent/StoppableCountDownLatch.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/util/concurrent/StoppableCountDownLatch.java
@@ -77,6 +77,11 @@ public class StoppableCountDownLatch {
return latch.await(msTimeout, TimeUnit.MILLISECONDS);
}
+ public boolean await(final long timeout, final TimeUnit unit) throws InterruptedException {
+ stopper.checkCancelInProgress(null);
+ return latch.await(timeout, unit);
+ }
+
public synchronized void countDown() {
latch.countDown();
}
[08/15] incubator-geode git commit: GEODE-2019 Adding missing apache
license header
Posted by kl...@apache.org.
GEODE-2019 Adding missing apache license header
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/af55d929
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/af55d929
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/af55d929
Branch: refs/heads/feature/GEODE-2012
Commit: af55d9292c2eacf9db43909b9c68c32b927b21de
Parents: bc060f9
Author: Dan Smith <up...@apache.org>
Authored: Thu Oct 20 17:11:25 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Thu Oct 20 17:11:25 2016 -0700
----------------------------------------------------------------------
.../automated_rebalance.html.md.erb | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/af55d929/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb b/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
index 387275d..37b7dce 100644
--- a/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
+++ b/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
@@ -1,6 +1,22 @@
---
title: Automated Rebalancing of Partitioned Region Data
---
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
Automated rebalance triggers a rebalance
(see [Rebalancing Partitioned Region Data](rebalancing_pr_data.html))
[14/15] incubator-geode git commit: Remove double spaces
Posted by kl...@apache.org.
Remove double spaces
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/3b2f1283
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/3b2f1283
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/3b2f1283
Branch: refs/heads/feature/GEODE-2012
Commit: 3b2f1283294f790857ec8eecc70b1bc4b6927edb
Parents: 56917a2
Author: Kirk Lund <kl...@apache.org>
Authored: Thu Oct 20 12:25:35 2016 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Wed Oct 26 10:19:14 2016 -0700
----------------------------------------------------------------------
.../main/java/org/apache/geode/internal/i18n/LocalizedStrings.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/3b2f1283/geode-core/src/main/java/org/apache/geode/internal/i18n/LocalizedStrings.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/i18n/LocalizedStrings.java b/geode-core/src/main/java/org/apache/geode/internal/i18n/LocalizedStrings.java
index 7d762b8..aa00b73 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/i18n/LocalizedStrings.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/i18n/LocalizedStrings.java
@@ -664,7 +664,7 @@ public class LocalizedStrings {
public static final StringId ManagerLogWriter_COULD_NOT_DELETE_INACTIVE__0___1_ = new StringId(1794, "Could not delete inactive {0} \"{1}\".");
public static final StringId ManagerLogWriter_COULD_NOT_FREE_SPACE_IN_0_DIRECTORY_THE_SPACE_USED_IS_1_WHICH_EXCEEDS_THE_CONFIGURED_LIMIT_OF_2 = new StringId(1795, "Could not free space in {0} directory. The space used is {1} which exceeds the configured limit of {2}.");
- public static final StringId ManagerLogWriter_DELETED_INACTIVE__0___1_ = new StringId(1797, "Deleted inactive {0} \"{1}\".");
+ public static final StringId ManagerLogWriter_DELETED_INACTIVE__0___1_ = new StringId(1797, "Deleted inactive {0} \"{1}\".");
public static final StringId ManagerLogWriter_SWITCHING_TO_LOG__0 = new StringId(1798, "Switching to log {0}");
[10/15] incubator-geode git commit: GEODE-2021: Non colocated gets in
a transaction should get TransactionDataNotColocatedException
Posted by kl...@apache.org.
GEODE-2021: Non colocated gets in a transaction should get TransactionDataNotColocatedException
Throw TransactionDataNotColocatedException when get locally failed with BucketNotFoundException
Added a dunit test with two transactions with gets that will use TXStateStub or TXState based on data location.
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/56917a26
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/56917a26
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/56917a26
Branch: refs/heads/feature/GEODE-2012
Commit: 56917a26a8916b83f0cec6e85285b5040ff66ee6
Parents: fadd92b
Author: eshu <es...@pivotal.io>
Authored: Fri Oct 21 11:43:36 2016 -0700
Committer: eshu <es...@pivotal.io>
Committed: Fri Oct 21 11:43:36 2016 -0700
----------------------------------------------------------------------
.../geode/internal/cache/PartitionedRegion.java | 6 +
.../apache/geode/disttx/PRDistTXDUnitTest.java | 5 +
.../disttx/PRDistTXWithVersionsDUnitTest.java | 5 +
.../cache/execute/PRColocationDUnitTest.java | 6 +-
.../cache/execute/PRTransactionDUnitTest.java | 131 ++++++++++++++++++-
5 files changed, 151 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/56917a26/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
index f7ecdaf..df52764 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
@@ -4105,6 +4105,12 @@ public class PartitionedRegion extends LocalRegion implements
retryTime.waitToRetryNode();
}
} else {
+ if (prce instanceof BucketNotFoundException) {
+ TransactionException ex = new TransactionDataNotColocatedException(LocalizedStrings.
+ PartitionedRegion_KEY_0_NOT_COLOCATED_WITH_TRANSACTION.toLocalizedString(key));
+ ex.initCause(prce);
+ throw ex;
+ }
Throwable cause = prce.getCause();
if (cause instanceof PrimaryBucketException) {
throw (PrimaryBucketException)cause;
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/56917a26/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXDUnitTest.java b/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXDUnitTest.java
index f36085b..68a83f1 100644
--- a/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXDUnitTest.java
@@ -37,6 +37,11 @@ public class PRDistTXDUnitTest extends PRTransactionDUnitTest {
return props;
}
+ @Ignore("[DISTTX] TODO test overridden and intentionally left blank as it does not apply to disttx.")
+ @Test
+ public void testTxWithNonColocatedGet() {
+ }
+
@Ignore("[DISTTX] TODO test overridden and intentionally left blank as they fail.")
@Test
public void testBasicPRTransactionRedundancy0() {
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/56917a26/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXWithVersionsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXWithVersionsDUnitTest.java b/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXWithVersionsDUnitTest.java
index 268c2ed..d692468 100644
--- a/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXWithVersionsDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/disttx/PRDistTXWithVersionsDUnitTest.java
@@ -37,6 +37,11 @@ public class PRDistTXWithVersionsDUnitTest extends PRTransactionWithVersionsDUni
return props;
}
+ @Ignore("[DISTTX] TODO test overridden and intentionally left blank as it does not apply to disttx.")
+ @Test
+ public void testTxWithNonColocatedGet() {
+ }
+
@Ignore("[DISTTX] TODO test overridden and intentionally left blank as they fail.")
@Override
@Test
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/56917a26/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRColocationDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRColocationDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRColocationDUnitTest.java
index 1b8d2d1..f6ee565 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRColocationDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRColocationDUnitTest.java
@@ -2388,11 +2388,15 @@ public class PRColocationDUnitTest extends JUnit4CacheTestCase {
assertTrue("Region should have failed to close. regionName = " + partitionedRegionName , exceptionThrown);
}
public static void putCustomerPartitionedRegion(String partitionedRegionName) {
+ putCustomerPartitionedRegion(partitionedRegionName, 10);
+ }
+
+ public static void putCustomerPartitionedRegion(String partitionedRegionName, int numOfRecord) {
assertNotNull(basicGetCache());
Region partitionedregion = basicGetCache().getRegion(Region.SEPARATOR
+ partitionedRegionName);
assertNotNull(partitionedregion);
- for (int i = 1; i <= 10; i++) {
+ for (int i = 1; i <= numOfRecord; i++) {
CustId custid = new CustId(i);
Customer customer = new Customer("name" + i, "Address" + i);
try {
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/56917a26/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRTransactionDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRTransactionDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRTransactionDUnitTest.java
index 516c240..332ec01 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRTransactionDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PRTransactionDUnitTest.java
@@ -25,10 +25,12 @@ import static org.junit.Assert.*;
import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
import org.apache.geode.test.junit.categories.DistributedTest;
+import org.assertj.core.api.Assertions;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
+import java.util.List;
import java.util.Set;
import util.TestException;
@@ -45,6 +47,7 @@ import org.apache.geode.cache.execute.FunctionException;
import org.apache.geode.cache.execute.FunctionService;
import org.apache.geode.cache.util.CacheListenerAdapter;
import org.apache.geode.internal.NanoTimer;
+import org.apache.geode.internal.cache.ForceReattemptException;
import org.apache.geode.internal.cache.PartitionedRegion;
import org.apache.geode.internal.cache.TXManagerImpl;
import org.apache.geode.internal.cache.execute.data.CustId;
@@ -53,10 +56,13 @@ import org.apache.geode.internal.cache.execute.data.Order;
import org.apache.geode.internal.cache.execute.data.OrderId;
import org.apache.geode.internal.cache.execute.data.Shipment;
import org.apache.geode.internal.cache.execute.data.ShipmentId;
+import org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException;
+import org.apache.geode.internal.logging.LogService;
import org.apache.geode.test.dunit.Assert;
import org.apache.geode.test.dunit.Invoke;
import org.apache.geode.test.dunit.LogWriterUtils;
import org.apache.geode.test.dunit.SerializableCallable;
+import org.apache.geode.test.dunit.SerializableRunnable;
/**
* Test for co-located PR transactions.
@@ -316,18 +322,141 @@ public class PRTransactionDUnitTest extends PRColocationDUnitTest {
}
protected void createPRWithCoLocation(String prName, String coLocatedWith) {
+ setAttributes(prName, coLocatedWith);
+ createPartitionedRegion(attributeObjects);
+ }
+
+ protected void setAttributes(String prName, String coLocatedWith) {
this.regionName = prName;
this.colocatedWith = coLocatedWith;
this.isPartitionResolver = new Boolean(true);
this.attributeObjects = new Object[] { regionName, redundancy, localMaxmemory,
totalNumBuckets, colocatedWith, isPartitionResolver, getEnableConcurrency() };
- createPartitionedRegion(attributeObjects);
}
protected boolean getEnableConcurrency() {
return false;
}
+ /**
+ * This method executes a transaction with get on non colocated entries and
+ * expects the transaction to fail with TransactionDataNotColocatedException.
+ * @param bucketRedundancy redundancy for the colocated PRs
+ */
+ protected void baiscPRTXWithNonColocatedGet(int bucketRedundancy) {
+ dataStore1.invoke(runGetCache);
+ dataStore2.invoke(runGetCache);
+ redundancy = new Integer(bucketRedundancy);
+ localMaxmemory = new Integer(50);
+ totalNumBuckets = new Integer(2);
+
+ setAttributes(CustomerPartitionedRegionName, null);
+
+ dataStore1.invoke(PRColocationDUnitTest.class, "createPR", this.attributeObjects);
+ dataStore2.invoke(PRColocationDUnitTest.class, "createPR", this.attributeObjects);
+
+ // Put the customer 1-2 in CustomerPartitionedRegion
+ dataStore1.invoke(() -> PRColocationDUnitTest.putCustomerPartitionedRegion(CustomerPartitionedRegionName, 2));
+
+ dataStore1.invoke(verifyNonColocated);
+ dataStore2.invoke(verifyNonColocated);
+
+ dataStore1.invoke(getTx);
+ }
+
+
+ @SuppressWarnings("serial")
+ private SerializableRunnable verifyNonColocated = new SerializableRunnable("verifyNonColocated") {
+ @Override
+ public void run() throws PRLocallyDestroyedException, ForceReattemptException {
+ containsKeyLocally();
+ }
+ };
+
+ @SuppressWarnings("serial")
+ private SerializableRunnable getTx = new SerializableRunnable("getTx") {
+ @Override
+ public void run() {
+ performGetTx();
+ }
+ };
+
+
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ private void containsKeyLocally() throws PRLocallyDestroyedException, ForceReattemptException {
+ PartitionedRegion pr = (PartitionedRegion) basicGetCache().getRegion(Region.SEPARATOR + CustomerPartitionedRegionName);
+
+ CustId cust1 = new CustId(1);
+ CustId cust2 = new CustId(2);
+ int bucketId1 = pr.getKeyInfo(cust1).getBucketId();
+ int bucketId2 = pr.getKeyInfo(cust2).getBucketId();
+
+ List<Integer> localPrimaryBucketList = pr.getLocalPrimaryBucketsListTestOnly();
+ Set localBucket1Keys;
+ Set localBucket2Keys;
+ assertTrue(localPrimaryBucketList.size() == 1);
+ for (int bucketId: localPrimaryBucketList) {
+ if (bucketId == bucketId1) {
+ //primary bucket has cust1
+ localBucket1Keys = pr.getDataStore().getKeysLocally(bucketId1, false);
+ for (Object key: localBucket1Keys) {
+ LogService.getLogger().info("local key set contains " + key);
+ }
+ assertTrue(localBucket1Keys.size() == 1);
+ } else {
+ localBucket2Keys = pr.getDataStore().getKeysLocally(bucketId2, false);
+ for (Object key: localBucket2Keys) {
+ LogService.getLogger().info("local key set contains " + key);
+ }
+ assertTrue(localBucket2Keys.size() == 1);
+ }
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private void performGetTx() {
+ PartitionedRegion pr = (PartitionedRegion) basicGetCache().getRegion(Region.SEPARATOR + CustomerPartitionedRegionName);
+ CacheTransactionManager mgr = pr.getCache().getCacheTransactionManager();
+ CustId cust1 = new CustId(1);
+ CustId cust2 = new CustId(2);
+ int bucketId1 = pr.getKeyInfo(cust1).getBucketId();
+ List<Integer> localPrimaryBucketList = pr.getLocalPrimaryBucketsListTestOnly();
+ assertTrue(localPrimaryBucketList.size() == 1);
+ boolean isCust1Local = (Integer)localPrimaryBucketList.get(0) == bucketId1;
+
+ //touch first get on remote node -- using TXStateStub
+ Assertions.assertThatThrownBy(()-> getTx(!isCust1Local, mgr, pr, cust1, cust2))
+ .isInstanceOf(TransactionDataNotColocatedException.class);
+
+ //touch first get on local node-- using TXState
+ Assertions.assertThatThrownBy(()-> getTx(isCust1Local, mgr, pr, cust1, cust2))
+ .isInstanceOf(TransactionDataNotColocatedException.class);
+ }
+
+ private void getTx(boolean doCust1First, CacheTransactionManager mgr, PartitionedRegion pr, CustId cust1, CustId cust2) {
+ CustId first = doCust1First ? cust1 : cust2;
+ CustId second = !doCust1First ? cust1 : cust2;
+
+ mgr.begin();
+ boolean doRollback = true;
+ try {
+ pr.get(first);
+ pr.get(second);
+ doRollback = false;
+ } finally {
+ if (doRollback) {
+ mgr.rollback();
+ } else {
+ mgr.commit();
+ }
+ }
+ }
+
+ @Test
+ public void testTxWithNonColocatedGet() {
+ baiscPRTXWithNonColocatedGet(0);
+ }
+
@Test
public void testPRTXInCacheListenerRedundancy0() {
basicPRTXInCacheListener(0);
[05/15] incubator-geode git commit: GEODE-388: Marking
dynamic-region-factory as deprecated in the xml.
Posted by kl...@apache.org.
GEODE-388: Marking dynamic-region-factory as deprecated in the xml.
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/59df3d93
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/59df3d93
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/59df3d93
Branch: refs/heads/feature/GEODE-2012
Commit: 59df3d93e7c51e43685356de82b074531966015f
Parents: b2e7768
Author: Dan Smith <up...@apache.org>
Authored: Wed Oct 19 10:49:07 2016 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Thu Oct 20 13:09:12 2016 -0700
----------------------------------------------------------------------
.../META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd | 4 ++++
1 file changed, 4 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/59df3d93/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd
----------------------------------------------------------------------
diff --git a/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd b/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd
index adf734c..d3d83f1 100755
--- a/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd
+++ b/geode-core/src/main/resources/META-INF/schemas/geode.apache.org/schema/cache/cache-1.0.xsd
@@ -1245,7 +1245,11 @@ As of 6.5 disk-dirs is deprecated on region-attributes. Use disk-store-name inst
</xsd:complexType>
<xsd:complexType name="dynamic-region-factory-type">
<xsd:annotation>
+ <xsd:appinfo>deprecated</xsd:appinfo>
<xsd:documentation>
+ dynamic-region-factory is deprecated. Use functions to create regions dynamically
+ instead.
+
A "dynamic-region-factory" element configures a dynamic region factory for
this cache. If this optional element is missing then the cache does not
support dynamic regions.
[11/15] incubator-geode git commit: Import SuppressWarnings
Posted by kl...@apache.org.
Import SuppressWarnings
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/5d80ad4a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/5d80ad4a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/5d80ad4a
Branch: refs/heads/feature/GEODE-2012
Commit: 5d80ad4a480b88e31de6aac8dafafb361b01a976
Parents: 64ef1dd
Author: Kirk Lund <kl...@apache.org>
Authored: Thu Oct 20 12:27:08 2016 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Wed Oct 26 10:19:14 2016 -0700
----------------------------------------------------------------------
.../apache/geode/internal/statistics/StatArchiveWriter.java | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5d80ad4a/geode-core/src/main/java/org/apache/geode/internal/statistics/StatArchiveWriter.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/statistics/StatArchiveWriter.java b/geode-core/src/main/java/org/apache/geode/internal/statistics/StatArchiveWriter.java
index b8f44e2..8f47ee4 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/statistics/StatArchiveWriter.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/statistics/StatArchiveWriter.java
@@ -33,6 +33,8 @@ import java.net.UnknownHostException;
import java.util.*;
import java.util.zip.GZIPOutputStream;
+import edu.umd.cs.findbugs.annotations.SuppressWarnings;
+
/**
* StatArchiveWriter provides APIs to write statistic snapshots to an archive
* file.
@@ -150,7 +152,7 @@ public class StatArchiveWriter implements StatArchiveFormat, SampleHandler {
}
}
- @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE", justification="Best effort attempt to delete a GFS file without any samples.")
+ @SuppressWarnings(value="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE", justification="Best effort attempt to delete a GFS file without any samples.")
private static void deleteFileIfPossible(File file) {
file.delete();
}
@@ -270,7 +272,7 @@ public class StatArchiveWriter implements StatArchiveFormat, SampleHandler {
}
}
- @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", justification="This is only for debugging and there is never more than one instance being traced because there is only one stat sampler.")
+ @SuppressWarnings(value="ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", justification="This is only for debugging and there is never more than one instance being traced because there is only one stat sampler.")
public void allocatedResourceInstance(ResourceInstance statResource) {
if (logger.isTraceEnabled(LogMarker.STATISTICS)) {
logger.trace(LogMarker.STATISTICS, "StatArchiveWriter#allocatedResourceInstance statResource={}", statResource);
[03/15] incubator-geode git commit: GEODE-2019 Add automated
rebalance documentation
Posted by kl...@apache.org.
GEODE-2019 Add automated rebalance documentation
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/f1be596a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/f1be596a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/f1be596a
Branch: refs/heads/feature/GEODE-2012
Commit: f1be596a322911525908524663cc0e5cad17a2bb
Parents: 11ef3eb
Author: Karen Miller <km...@pivotal.io>
Authored: Thu Oct 20 10:23:12 2016 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Thu Oct 20 10:23:12 2016 -0700
----------------------------------------------------------------------
.../source/subnavs/geode-subnav.erb | 3 +
.../automated_rebalance.html.md.erb | 66 ++++++++++++++++++++
.../chapter_overview.html.md.erb | 5 ++
3 files changed, 74 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f1be596a/geode-book/master_middleman/source/subnavs/geode-subnav.erb
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/subnavs/geode-subnav.erb b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
index 53e9118..2373f4b 100644
--- a/geode-book/master_middleman/source/subnavs/geode-subnav.erb
+++ b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
@@ -959,6 +959,9 @@ limitations under the License.
<a href="/docs/developing/partitioned_regions/rebalancing_pr_data.html">Rebalancing Partitioned Region Data</a>
</li>
<li>
+ <a href="/docs/developing/partitioned_regions/automated_rebalance.html">Automated Rebalancing of Partitioned Region Data</a>
+ </li>
+ <li>
<a href="/docs/developing/partitioned_regions/checking_region_redundancy.html">Checking Redundancy in Partitioned Regions</a>
</li>
<li>
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f1be596a/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb b/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
new file mode 100644
index 0000000..d4ca2a6
--- /dev/null
+++ b/geode-docs/developing/partitioned_regions/automated_rebalance.html.md.erb
@@ -0,0 +1,66 @@
+---
+title: Automated Rebalance
+---
+
+Automated rebalance triggers a rebalance
+(see [Rebalancing Partitioned Region Data](rebalancing_pr_data.html))
+operation based on a
+time schedule.
+At the scheduled intervals, the balance of the partitioned regions
+is evaluated based on configured criteria.
+One criterion is a minimum threshhold for number of bytes that
+would be transferred if the rebalance takes place.
+The other criterion uses the ratio of the number of bytes
+that would be transferred to the total number of bytes in the
+regions.
+If the evaluation indicates the system is out of balance,
+the rebalance transfer is initiated.
+
+To enable automated rebalance, specify the rebalance manager
+in the `<initializer>` attribute within the `<cache>` configuration
+of the `cache.xml` file:
+
+``` pre
+<class-name> org.apache.geode.cache.util.AutoBalancer </class-name>
+```
+
+The time schedule that triggers an evaluation and possible rebalance
+uses a cron-based specification in
+the `<initializer>` attribute within the `<cache>` configuration
+of the `cache.xml` file.
+This scheduling specification is required.
+Specify the cron expression in the Spring format.
+This example specification triggers each Saturday at 3am:
+
+``` pre
+<parameter name="schedule"> 0 0 3 ? * SAT </parameter>
+```
+
+This example specification triggers once each day at 4am:
+
+``` pre
+<parameter name="schedule"> 0 0 4 * * ?</parameter>
+```
+
+The automated rebalance specifications that specify criteria for
+triggering the rebalance are optional and have reasonable default values.
+
+One criterion is a minimum number of bytes that would be transferred
+if the rebalance were to take place.
+The specification is in units of bytes; here is the specification
+for the default value of 100MB:
+
+``` pre
+<parameter name="minimum-size"> 104857600</parameter>
+```
+
+Another criterion represents the ratio of bytes that would be transferred
+to the total number of bytes in the partitioned regions,
+represented as an integer percentage.
+The default is 10 percent.
+This example specificies 15 percent:
+
+``` pre
+<parameter name="size-threshold-percent"> 15 </parameter>
+```
+
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/f1be596a/geode-docs/developing/partitioned_regions/chapter_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/developing/partitioned_regions/chapter_overview.html.md.erb b/geode-docs/developing/partitioned_regions/chapter_overview.html.md.erb
index c92921b..e450ee5 100644
--- a/geode-docs/developing/partitioned_regions/chapter_overview.html.md.erb
+++ b/geode-docs/developing/partitioned_regions/chapter_overview.html.md.erb
@@ -49,6 +49,11 @@ In addition to basic region management, partitioned regions include options for
In a distributed system with minimal contention to the concurrent threads reading or updating from the members, you can use rebalancing to dynamically increase or decrease your data and processing capacity.
+- **[Automated Rebalancing of Partitioned Region Data](../../developing/partitioned_regions/automated_rebalance.html)**
+
+ The automated rebalance feature triggers a rebalance operation
+based on a time schedule.
+
- **[Checking Redundancy in Partitioned Regions](../../developing/partitioned_regions/checking_region_redundancy.html)**
Under some circumstances, it can be important to verify that your partitioned region data is redundant and that upon member restart, redundancy has been recovered properly across partitioned region members.
[02/15] incubator-geode git commit: GEODE-1959: prompt for password
when starting a server if username is specified
Posted by kl...@apache.org.
GEODE-1959: prompt for password when starting a server if username is specified
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/b2e77685
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/b2e77685
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/b2e77685
Branch: refs/heads/feature/GEODE-2012
Commit: b2e77685907c51b1af346e6f8b8da3f5b598b361
Parents: 11ef3eb
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Tue Oct 18 09:13:26 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Wed Oct 19 21:16:23 2016 -0700
----------------------------------------------------------------------
.../LauncherLifecycleCommandsDUnitTest.java | 22 ++++++
.../geode/distributed/ServerLauncher.java | 78 ++++++++++++++------
.../membership/gms/membership/GMSJoinLeave.java | 7 +-
.../cli/commands/LauncherLifecycleCommands.java | 29 +++++++-
.../internal/cli/commands/ShellCommands.java | 48 +++---------
.../internal/cli/i18n/CliStrings.java | 13 +++-
.../management/internal/cli/shell/Gfsh.java | 41 ++++++----
.../cli/commands/golden-help-offline.properties | 9 +++
8 files changed, 166 insertions(+), 81 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b2e77685/geode-assembly/src/test/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java b/geode-assembly/src/test/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java
index 490e309..933d152 100644
--- a/geode-assembly/src/test/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java
+++ b/geode-assembly/src/test/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java
@@ -437,6 +437,28 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
}
@Test
+ public void testStartServerFailsFastOnMissingPassword() throws IOException {
+
+ CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_SERVER);
+
+ String pathName = getClass().getSimpleName().concat("_").concat(getTestMethodName());
+ final File workingDirectory = temporaryFolder.newFolder(pathName);
+
+ command.addOption(CliStrings.START_SERVER__NAME, pathName);
+ command.addOption(CliStrings.START_SERVER__DIR, workingDirectory.getCanonicalPath());
+ command.addOption(CliStrings.START_SERVER__USERNAME, "test");
+
+ CommandResult result = executeCommand(command.toString());
+
+ assertNotNull(result);
+ assertEquals(Result.Status.ERROR, result.getStatus());
+
+ String resultString = toString(result);
+
+ assertTrue(resultString, resultString.contains("password must be specified"));
+ }
+
+ @Test
public void test005StartServerFailsFastOnMissingGemFireSecurityPropertiesFile() throws IOException {
String gemfireSecuritiesPropertiesFile = "/path/to/missing/gemfire-securities.properties";
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b2e77685/geode-core/src/main/java/org/apache/geode/distributed/ServerLauncher.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/ServerLauncher.java b/geode-core/src/main/java/org/apache/geode/distributed/ServerLauncher.java
index a3d3845..088b670 100755
--- a/geode-core/src/main/java/org/apache/geode/distributed/ServerLauncher.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/ServerLauncher.java
@@ -19,6 +19,32 @@ package org.apache.geode.distributed;
import static org.apache.geode.distributed.ConfigurationProperties.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.ServiceLoader;
+import java.util.TreeMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+
+import joptsimple.OptionException;
+import joptsimple.OptionParser;
+import joptsimple.OptionSet;
+
import org.apache.geode.SystemFailure;
import org.apache.geode.cache.Cache;
import org.apache.geode.cache.partition.PartitionRegionHelper;
@@ -27,14 +53,31 @@ import org.apache.geode.distributed.internal.DefaultServerLauncherCacheProvider;
import org.apache.geode.distributed.internal.DistributionConfig;
import org.apache.geode.distributed.internal.InternalDistributedSystem;
import org.apache.geode.internal.GemFireVersion;
-import org.apache.geode.internal.net.SocketCreator;
-import org.apache.geode.internal.cache.*;
+import org.apache.geode.internal.cache.AbstractCacheServer;
+import org.apache.geode.internal.cache.CacheConfig;
+import org.apache.geode.internal.cache.CacheServerLauncher;
+import org.apache.geode.internal.cache.GemFireCacheImpl;
+import org.apache.geode.internal.cache.PartitionedRegion;
import org.apache.geode.internal.cache.tier.sockets.CacheServerHelper;
import org.apache.geode.internal.i18n.LocalizedStrings;
import org.apache.geode.internal.lang.ObjectUtils;
import org.apache.geode.internal.lang.StringUtils;
import org.apache.geode.internal.lang.SystemUtils;
-import org.apache.geode.internal.process.*;
+import org.apache.geode.internal.net.SocketCreator;
+import org.apache.geode.internal.process.ClusterConfigurationNotAvailableException;
+import org.apache.geode.internal.process.ConnectionFailedException;
+import org.apache.geode.internal.process.ControlNotificationHandler;
+import org.apache.geode.internal.process.ControllableProcess;
+import org.apache.geode.internal.process.FileAlreadyExistsException;
+import org.apache.geode.internal.process.MBeanInvocationFailedException;
+import org.apache.geode.internal.process.PidUnavailableException;
+import org.apache.geode.internal.process.ProcessController;
+import org.apache.geode.internal.process.ProcessControllerFactory;
+import org.apache.geode.internal.process.ProcessControllerParameters;
+import org.apache.geode.internal.process.ProcessLauncherContext;
+import org.apache.geode.internal.process.ProcessType;
+import org.apache.geode.internal.process.StartupStatusListener;
+import org.apache.geode.internal.process.UnableToControlProcessException;
import org.apache.geode.internal.util.IOUtils;
import org.apache.geode.lang.AttachAPINotFoundException;
import org.apache.geode.management.internal.cli.i18n.CliStrings;
@@ -42,25 +85,8 @@ import org.apache.geode.management.internal.cli.json.GfJsonArray;
import org.apache.geode.management.internal.cli.json.GfJsonException;
import org.apache.geode.management.internal.cli.json.GfJsonObject;
import org.apache.geode.pdx.PdxSerializer;
-import joptsimple.OptionException;
-import joptsimple.OptionParser;
-import joptsimple.OptionSet;
-
-import javax.management.MalformedObjectNameException;
-import javax.management.ObjectName;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.*;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
-
-import static org.apache.geode.distributed.ConfigurationProperties.SERVER_BIND_ADDRESS;
+import org.apache.geode.security.AuthenticationRequiredException;
+import org.apache.geode.security.GemFireSecurityException;
/**
* The ServerLauncher class is a launcher class with main method to start a GemFire Server (implying a GemFire Cache
@@ -730,6 +756,14 @@ public class ServerLauncher extends AbstractLauncher<String> {
return new ServerState(this, Status.ONLINE);
}
+ catch(AuthenticationRequiredException e){
+ failOnStart(e);
+ throw new AuthenticationRequiredException("user/password required. Please start your server with --user and --password. "+ e.getMessage());
+ }
+ catch(GemFireSecurityException e){
+ failOnStart(e);
+ throw new GemFireSecurityException(e.getMessage());
+ }
catch (IOException e) {
failOnStart(e);
throw new RuntimeException(LocalizedStrings.Launcher_Command_START_IO_ERROR_MESSAGE.toLocalizedString(
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b2e77685/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/membership/GMSJoinLeave.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/membership/GMSJoinLeave.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/membership/GMSJoinLeave.java
index 89a9a37..f5198bb 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/membership/GMSJoinLeave.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/membership/GMSJoinLeave.java
@@ -41,6 +41,8 @@ import java.util.concurrent.Future;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.logging.log4j.Logger;
+
import org.apache.geode.GemFireConfigException;
import org.apache.geode.SystemConnectException;
import org.apache.geode.distributed.DistributedMember;
@@ -70,8 +72,8 @@ import org.apache.geode.distributed.internal.membership.gms.messages.ViewAckMess
import org.apache.geode.distributed.internal.tcpserver.TcpClient;
import org.apache.geode.internal.Version;
import org.apache.geode.internal.i18n.LocalizedStrings;
+import org.apache.geode.security.AuthenticationRequiredException;
import org.apache.geode.security.GemFireSecurityException;
-import org.apache.logging.log4j.Logger;
/**
* GMSJoinLeave handles membership communication with other processes in the
@@ -413,6 +415,9 @@ public class GMSJoinLeave implements JoinLeave, MessageHandler {
|| failReason.contains("15806")) {
throw new SystemConnectException(failReason);
}
+ else if(failReason.contains("Failed to find credentials")){
+ throw new AuthenticationRequiredException(failReason);
+ }
throw new GemFireSecurityException(failReason);
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b2e77685/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java
index 4ffe082..892a92d 100755
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java
@@ -17,6 +17,7 @@
package org.apache.geode.management.internal.cli.commands;
import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.apache.geode.management.internal.cli.i18n.CliStrings.START_SERVER__PASSWORD;
import java.awt.Desktop;
import java.io.BufferedReader;
@@ -115,6 +116,7 @@ import org.apache.geode.management.internal.cli.util.VisualVmNotFoundException;
import org.apache.geode.management.internal.configuration.domain.SharedConfigurationStatus;
import org.apache.geode.management.internal.configuration.messages.SharedConfigurationStatusRequest;
import org.apache.geode.management.internal.configuration.messages.SharedConfigurationStatusResponse;
+import org.apache.geode.management.internal.security.ResourceConstants;
import org.apache.geode.security.AuthenticationFailedException;
/**
@@ -1498,11 +1500,28 @@ public class LauncherLifecycleCommands extends AbstractCommandsSupport {
@CliOption(key = CliStrings.START_SERVER__HTTP_SERVICE_BIND_ADDRESS,
unspecifiedDefaultValue = CacheServer.HTTP_SERVICE_DEFAULT_BIND_ADDRESS,
help = CliStrings.START_SERVER__HTTP_SERVICE_BIND_ADDRESS__HELP)
- final String httpServiceBindAddress)
+ final String httpServiceBindAddress,
+ @CliOption(key = CliStrings.START_SERVER__USERNAME,
+ unspecifiedDefaultValue = "",
+ help = CliStrings.START_SERVER__USERNAME__HELP)
+ final String userName,
+ @CliOption(key = START_SERVER__PASSWORD,
+ unspecifiedDefaultValue = "",
+ help = CliStrings.START_SERVER__PASSWORD__HELP)
+ String passwordToUse)
// NOTICE: keep the parameters in alphabetical order based on their CliStrings.START_SERVER_* text
{
-
try {
+ // prompt for password is username is specified in the command
+ if (!StringUtils.isBlank(userName)) {
+ if (StringUtils.isBlank(passwordToUse)) {
+ passwordToUse = getGfsh().readPassword(START_SERVER__PASSWORD + ": ");
+ }
+ if (StringUtils.isBlank(passwordToUse)) {
+ return ResultBuilder.createConnectionErrorResult(CliStrings.START_SERVER__MSG__PASSWORD_MUST_BE_SPECIFIED);
+ }
+ }
+
if (workingDirectory == null) {
// attempt to use or make sub-directory using memberName...
File serverWorkingDirectory = new File(memberName);
@@ -1560,10 +1579,14 @@ public class LauncherLifecycleCommands extends AbstractCommandsSupport {
gemfireProperties.setProperty(USE_CLUSTER_CONFIGURATION, StringUtils.valueOf(requestSharedConfiguration, Boolean.TRUE.toString()));
gemfireProperties.setProperty(LOCK_MEMORY, StringUtils.valueOf(lockMemory, StringUtils.EMPTY_STRING));
gemfireProperties.setProperty(OFF_HEAP_MEMORY_SIZE, StringUtils.valueOf(offHeapMemorySize, StringUtils.EMPTY_STRING));
-
gemfireProperties.setProperty(START_DEV_REST_API, StringUtils.valueOf(startRestApi, StringUtils.EMPTY_STRING));
gemfireProperties.setProperty(HTTP_SERVICE_PORT, StringUtils.valueOf(httpServicePort, StringUtils.EMPTY_STRING));
gemfireProperties.setProperty(HTTP_SERVICE_BIND_ADDRESS, StringUtils.valueOf(httpServiceBindAddress, StringUtils.EMPTY_STRING));
+ // if username is specified in the command line, it will overwrite what's set in the properties file
+ if(!StringUtils.isBlank(userName)){
+ gemfireProperties.setProperty(ResourceConstants.USER_NAME, userName);
+ gemfireProperties.setProperty(ResourceConstants.PASSWORD, passwordToUse);
+ }
// read the OSProcess enable redirect system property here -- TODO: replace with new GFSH argument
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b2e77685/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ShellCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ShellCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ShellCommands.java
index 792a8ab..ee09167 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ShellCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ShellCommands.java
@@ -88,14 +88,6 @@ import org.apache.geode.management.internal.web.shell.HttpOperationInvoker;
import org.apache.geode.management.internal.web.shell.RestHttpOperationInvoker;
import org.apache.geode.security.AuthenticationFailedException;
-import org.springframework.shell.core.CommandMarker;
-import org.springframework.shell.core.ExitShellRequest;
-import org.springframework.shell.core.annotation.CliAvailabilityIndicator;
-import org.springframework.shell.core.annotation.CliCommand;
-import org.springframework.shell.core.annotation.CliOption;
-
-import static org.apache.geode.distributed.ConfigurationProperties.*;
-
/**
*
* @since GemFire 7.0
@@ -200,7 +192,7 @@ public class ShellCommands implements CommandMarker {
try {
if (userName != null && userName.length() > 0) {
if (passwordToUse == null || passwordToUse.length() == 0) {
- passwordToUse = this.readPassword(gfsh, "password: ");
+ passwordToUse = gfsh.readPassword(CliStrings.CONNECT__PASSWORD + ": ");
}
if (passwordToUse == null || passwordToUse.length() == 0) {
return ResultBuilder.createConnectionErrorResult(CliStrings.CONNECT__MSG__JMX_PASSWORD_MUST_BE_SPECIFIED);
@@ -282,8 +274,8 @@ public class ShellCommands implements CommandMarker {
// otherwise, prompt for username and password and retry the conenction
try {
- userName = this.readText(gfsh, "username: ");
- passwordToUse = this.readPassword(gfsh, "password: ");
+ userName = gfsh.readText(CliStrings.CONNECT__USERNAME + ": ");
+ passwordToUse = gfsh.readPassword(CliStrings.CONNECT__PASSWORD + ": ");
return httpConnect(sslConfigProps, useSsl, url, userName, passwordToUse);
}
catch (IOException ioe) {
@@ -370,8 +362,8 @@ public class ShellCommands implements CommandMarker {
// otherwise, prompt for username and password and retry the conenction
try {
- userName = this.readText(gfsh, "username: ");
- passwordToUse = this.readPassword(gfsh, "password: ");
+ userName = gfsh.readText(CliStrings.CONNECT__USERNAME + ": ");
+ passwordToUse = gfsh.readPassword(CliStrings.CONNECT__PASSWORD + ": ");
return jmxConnect(sslConfigProps, hostPortToConnect, null, useSsl, userName, passwordToUse, gfSecurityPropertiesPath, true);
}
catch (IOException ioe) {
@@ -522,7 +514,7 @@ public class ShellCommands implements CommandMarker {
if (numTimesPrompted > 0) {
//NOTE: sslConfigProps map was empty
- keystoreToUse = readText(gfshInstance, CliStrings.CONNECT__KEY_STORE + ": ");
+ keystoreToUse = gfshInstance.readText(CliStrings.CONNECT__KEY_STORE + ": ");
}
if (keystoreToUse != null && keystoreToUse.length() > 0) {
if (keystorePasswordToUse == null || keystorePasswordToUse.length() == 0) {
@@ -530,7 +522,7 @@ public class ShellCommands implements CommandMarker {
keystorePasswordToUse = sslConfigProps.get(Gfsh.SSL_KEYSTORE_PASSWORD);
if (keystorePasswordToUse == null || keystorePasswordToUse.length() == 0) {
// not even in properties file, prompt user for it
- keystorePasswordToUse = readPassword(gfshInstance, CliStrings.CONNECT__KEY_STORE_PASSWORD + ": ");
+ keystorePasswordToUse = gfshInstance.readPassword(CliStrings.CONNECT__KEY_STORE_PASSWORD + ": ");
sslConfigProps.put(Gfsh.SSL_KEYSTORE_PASSWORD, keystorePasswordToUse);
}
}
@@ -541,7 +533,7 @@ public class ShellCommands implements CommandMarker {
}
if (numTimesPrompted > 0) {
- truststoreToUse = readText(gfshInstance, CliStrings.CONNECT__TRUST_STORE + ": ");
+ truststoreToUse = gfshInstance.readText(CliStrings.CONNECT__TRUST_STORE + ": ");
}
if (truststoreToUse != null && truststoreToUse.length() > 0) {
if (truststorePasswordToUse == null || truststorePasswordToUse.length() == 0) {
@@ -549,7 +541,7 @@ public class ShellCommands implements CommandMarker {
truststorePasswordToUse = sslConfigProps.get(Gfsh.SSL_TRUSTSTORE_PASSWORD);
if (truststorePasswordToUse == null || truststorePasswordToUse.length() == 0) {
// not even in properties file, prompt user for it
- truststorePasswordToUse = readPassword(gfshInstance, CliStrings.CONNECT__TRUST_STORE_PASSWORD + ": ");
+ truststorePasswordToUse = gfshInstance.readPassword(CliStrings.CONNECT__TRUST_STORE_PASSWORD + ": ");
sslConfigProps.put(Gfsh.SSL_TRUSTSTORE_PASSWORD, truststorePasswordToUse);
}
}
@@ -560,7 +552,7 @@ public class ShellCommands implements CommandMarker {
}
if (numTimesPrompted > 0) {
- sslCiphersToUse = readText(gfshInstance, CliStrings.CONNECT__SSL_CIPHERS + ": ");
+ sslCiphersToUse = gfshInstance.readText(CliStrings.CONNECT__SSL_CIPHERS + ": ");
}
if (sslCiphersToUse != null && sslCiphersToUse.length() > 0) {
//sslConfigProps.put(DistributionConfig.CLUSTER_SSL_CIPHERS_NAME, sslCiphersToUse);
@@ -568,7 +560,7 @@ public class ShellCommands implements CommandMarker {
}
if (numTimesPrompted > 0) {
- sslProtocolsToUse = readText(gfshInstance, CliStrings.CONNECT__SSL_PROTOCOLS + ": ");
+ sslProtocolsToUse = gfshInstance.readText(CliStrings.CONNECT__SSL_PROTOCOLS + ": ");
}
if (sslProtocolsToUse != null && sslProtocolsToUse.length() > 0) {
//sslConfigProps.put(DistributionConfig.CLUSTER_SSL_PROTOCOLS_NAME, sslProtocolsToUse);
@@ -585,24 +577,6 @@ public class ShellCommands implements CommandMarker {
return CliStrings.format(CliStrings.GFSH__PLEASE_CHECK_LOGS_AT_0, logFilePath);
}
- private String readText(Gfsh gfsh, String textToPrompt) throws IOException {
- if (!gfsh.isHeadlessMode() || !gfsh.isQuietMode()) {
- return gfsh.interact(textToPrompt);
- }
- else {
- return null;
- }
- }
-
- private String readPassword(Gfsh gfsh, String textToPrompt) throws IOException {
- if (!gfsh.isHeadlessMode() || !gfsh.isQuietMode()) {
- return gfsh.readWithMask(textToPrompt, '*');
- }
- else {
- return null;
- }
- }
-
/* package-private */
static Map<String, String> loadPropertiesFromURL(URL gfSecurityPropertiesUrl) {
Map<String, String> propsMap = Collections.emptyMap();
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b2e77685/geode-core/src/main/java/org/apache/geode/management/internal/cli/i18n/CliStrings.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/i18n/CliStrings.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/i18n/CliStrings.java
index 51887cf..0a6330a 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/i18n/CliStrings.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/i18n/CliStrings.java
@@ -16,6 +16,10 @@
*/
package org.apache.geode.management.internal.cli.i18n;
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+
+import java.text.MessageFormat;
+
import org.apache.geode.cache.PartitionAttributesFactory;
import org.apache.geode.cache.server.CacheServer;
import org.apache.geode.distributed.ConfigurationProperties;
@@ -24,10 +28,6 @@ import org.apache.geode.distributed.internal.SharedConfiguration;
import org.apache.geode.internal.cache.xmlcache.CacheXml;
import org.apache.geode.management.internal.cli.shell.Gfsh;
-import java.text.MessageFormat;
-
-import static org.apache.geode.distributed.ConfigurationProperties.*;
-
/**-
* * Contains 'String' constants used as key to the Localized strings to be used
* in classes under <code>org.apache.geode.management.internal.cli</code>
@@ -2203,6 +2203,11 @@ public class CliStrings {
public static final String START_SERVER__HTTP_SERVICE_PORT__HELP = "Port on which HTTP Service will listen on";
public static final String START_SERVER__HTTP_SERVICE_BIND_ADDRESS = "http-service-bind-address";
public static final String START_SERVER__HTTP_SERVICE_BIND_ADDRESS__HELP = "The IP address on which the HTTP Service will be bound. By default, the Server is bound to all local addresses.";
+ public static final String START_SERVER__USERNAME = "user";
+ public static final String START_SERVER__USERNAME__HELP = "User name to securely connect to the cluster. If the --password parameter is not specified then it will be prompted for.";
+ public static final String START_SERVER__PASSWORD = "password";
+ public static final String START_SERVER__PASSWORD__HELP = "Password to securely connect to the cluster.";
+ public static final String START_SERVER__MSG__PASSWORD_MUST_BE_SPECIFIED = "password must be specified.";
/**
* Creates a MessageFormat with the given pattern and uses it to format the given argument.
*
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b2e77685/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/Gfsh.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/Gfsh.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/Gfsh.java
index 467682d..e729f20 100755
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/Gfsh.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/Gfsh.java
@@ -20,7 +20,6 @@ import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
-import java.io.InputStream;
import java.io.PrintStream;
import java.net.URL;
import java.text.MessageFormat;
@@ -37,6 +36,18 @@ import java.util.logging.Level;
import java.util.logging.LogManager;
import java.util.logging.Logger;
+import jline.Terminal;
+import jline.console.ConsoleReader;
+import org.springframework.shell.core.AbstractShell;
+import org.springframework.shell.core.CommandMarker;
+import org.springframework.shell.core.Converter;
+import org.springframework.shell.core.ExecutionStrategy;
+import org.springframework.shell.core.ExitShellRequest;
+import org.springframework.shell.core.JLineLogHandler;
+import org.springframework.shell.core.JLineShell;
+import org.springframework.shell.core.Parser;
+import org.springframework.shell.event.ShellStatus.Status;
+
import org.apache.geode.internal.Banner;
import org.apache.geode.internal.GemFireVersion;
import org.apache.geode.internal.lang.ClassUtils;
@@ -62,19 +73,6 @@ import org.apache.geode.management.internal.cli.shell.jline.GfshUnsupportedTermi
import org.apache.geode.management.internal.cli.shell.unsafe.GfshSignalHandler;
import org.apache.geode.management.internal.cli.util.CommentSkipHelper;
-import org.springframework.shell.core.AbstractShell;
-import org.springframework.shell.core.CommandMarker;
-import org.springframework.shell.core.Converter;
-import org.springframework.shell.core.ExecutionStrategy;
-import org.springframework.shell.core.ExitShellRequest;
-import org.springframework.shell.core.JLineLogHandler;
-import org.springframework.shell.core.JLineShell;
-import org.springframework.shell.core.Parser;
-import org.springframework.shell.event.ShellStatus.Status;
-
-import jline.Terminal;
-import jline.console.ConsoleReader;
-
/**
* Extends an interactive shell provided by <a
* href="https://github.com/SpringSource/spring-shell">Spring Shell</a> library.
@@ -324,6 +322,21 @@ public class Gfsh extends JLineShell {
return signalHandler;
}
+ public String readPassword(String textToPrompt) throws IOException {
+ if(isHeadlessMode && isQuietMode())
+ return null;
+
+ return readWithMask(textToPrompt, '*');
+ }
+
+ public String readText(String textToPrompt) throws IOException {
+ if(isHeadlessMode && isQuietMode())
+ return null;
+
+ return interact(textToPrompt);
+
+ }
+
/**
* Starts this GemFire Shell with console.
*/
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/b2e77685/geode-core/src/test/resources/org/apache/geode/management/internal/cli/commands/golden-help-offline.properties
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/org/apache/geode/management/internal/cli/commands/golden-help-offline.properties b/geode-core/src/test/resources/org/apache/geode/management/internal/cli/commands/golden-help-offline.properties
index 28083f3..40c28d2 100644
--- a/geode-core/src/test/resources/org/apache/geode/management/internal/cli/commands/golden-help-offline.properties
+++ b/geode-core/src/test/resources/org/apache/geode/management/internal/cli/commands/golden-help-offline.properties
@@ -2522,6 +2522,7 @@ SYNTAX\n\
\ \ \ \ [--server-port=value] [--socket-buffer-size=value] [--spring-xml-location=value]\n\
\ \ \ \ [--statistic-archive-file=value] [--use-cluster-configuration(=value)?]\n\
\ \ \ \ [--start-rest-api(=value)?] [--http-service-port=value] [--http-service-bind-address=value]\n\
+\ \ \ \ [--user=value] [--password=value]\n\
PARAMETERS\n\
\ \ \ \ assign-buckets\n\
\ \ \ \ \ \ \ \ Whether to assign buckets to the partitioned regions of the cache on server start.\n\
@@ -2735,6 +2736,14 @@ PARAMETERS\n\
\ \ \ \ \ \ \ \ The IP address on which the HTTP Service will be bound. By default, the Server is bound to\n\
\ \ \ \ \ \ \ \ all local addresses.\n\
\ \ \ \ \ \ \ \ Required: false\n\
+\ \ \ \ user\n\
+\ \ \ \ \ \ \ \ User name to securely connect to the cluster. If the --password parameter is not specified\n\
+\ \ \ \ \ \ \ \ then it will be prompted for.\n\
+\ \ \ \ \ \ \ \ Required: false\n\
+\ \ \ \ password\n\
+\ \ \ \ \ \ \ \ Password to securely connect to the cluster.\n\
+\ \ \ \ \ \ \ \ Required: false\n\
+
start-vsd.help=\
NAME\n\
[09/15] incubator-geode git commit: GEODE-2020: for rest api get
request, use utf-8 as response encoding.
Posted by kl...@apache.org.
GEODE-2020: for rest api get request, use utf-8 as response encoding.
* add more test assertions.
* fix legacy tests
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/fadd92b0
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/fadd92b0
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/fadd92b0
Branch: refs/heads/feature/GEODE-2012
Commit: fadd92b0556ac6d3a48ffccbf64100fd94689e62
Parents: af55d92
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Thu Oct 20 15:28:50 2016 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Fri Oct 21 10:37:54 2016 -0700
----------------------------------------------------------------------
.../rest/internal/web/GeodeRestClient.java | 148 +++++++
.../web/RestSecurityIntegrationTest.java | 410 ++++++-------------
.../web/controllers/CommonCrudController.java | 6 +-
.../controllers/FunctionAccessController.java | 2 +-
.../web/controllers/PdxBasedCrudController.java | 4 +-
.../web/controllers/QueryAccessController.java | 4 +-
6 files changed, 290 insertions(+), 284 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/fadd92b0/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/GeodeRestClient.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/GeodeRestClient.java b/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/GeodeRestClient.java
new file mode 100644
index 0000000..c83cebb
--- /dev/null
+++ b/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/GeodeRestClient.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.geode.rest.internal.web;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.MalformedURLException;
+import java.nio.charset.StandardCharsets;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpHost;
+import org.apache.http.HttpResponse;
+import org.apache.http.auth.AuthScope;
+import org.apache.http.auth.UsernamePasswordCredentials;
+import org.apache.http.client.AuthCache;
+import org.apache.http.client.ClientProtocolException;
+import org.apache.http.client.CredentialsProvider;
+import org.apache.http.client.methods.HttpDelete;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpHead;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpPut;
+import org.apache.http.client.methods.HttpRequestBase;
+import org.apache.http.client.protocol.HttpClientContext;
+import org.apache.http.entity.StringEntity;
+import org.apache.http.impl.auth.BasicScheme;
+import org.apache.http.impl.client.BasicAuthCache;
+import org.apache.http.impl.client.BasicCredentialsProvider;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClients;
+import org.json.JSONTokener;
+import org.junit.Assert;
+
+public class GeodeRestClient {
+
+ public final static String PROTOCOL = "http";
+ public final static String HOSTNAME = "localhost";
+ public final static String CONTEXT = "/geode/v1";
+
+ private int restPort = 0;
+ public GeodeRestClient(int restPort){
+ this.restPort = restPort;
+ }
+
+ public HttpResponse doHEAD(String query, String username, String password) throws MalformedURLException {
+ HttpHead httpHead = new HttpHead(CONTEXT + query);
+ return doRequest(httpHead, username, password);
+ }
+
+ public HttpResponse doPost(String query, String username, String password, String body) throws MalformedURLException {
+ HttpPost httpPost = new HttpPost(CONTEXT + query);
+ httpPost.addHeader("content-type", "application/json");
+ httpPost.setEntity(new StringEntity(body, StandardCharsets.UTF_8));
+ return doRequest(httpPost, username, password);
+ }
+
+ public HttpResponse doPut(String query, String username, String password, String body) throws MalformedURLException {
+ HttpPut httpPut = new HttpPut(CONTEXT + query);
+ httpPut.addHeader("content-type", "application/json");
+ httpPut.setEntity(new StringEntity(body, StandardCharsets.UTF_8));
+ return doRequest(httpPut, username, password);
+ }
+
+ public HttpResponse doGet(String uri, String username, String password) throws MalformedURLException {
+ HttpGet getRequest = new HttpGet(CONTEXT + uri);
+ return doRequest(getRequest, username, password);
+ }
+ public HttpResponse doGet(String uri) throws MalformedURLException {
+ return doGet(uri, null, null);
+ }
+
+ public HttpResponse doDelete(String uri, String username, String password) throws MalformedURLException {
+ HttpDelete httpDelete = new HttpDelete(CONTEXT + uri);
+ return doRequest(httpDelete, username, password);
+ }
+
+ public static String getContentType(HttpResponse response){
+ return response.getEntity().getContentType().getValue();
+ }
+
+ /**
+ * Retrieve the status code of the HttpResponse
+ *
+ * @param response The HttpResponse message received from the server
+ *
+ * @return a numeric value
+ */
+ public static int getCode(HttpResponse response) {
+ return response.getStatusLine().getStatusCode();
+ }
+
+ public static JSONTokener getResponseBody(HttpResponse response) throws IOException {
+ HttpEntity entity = response.getEntity();
+ InputStream content = entity.getContent();
+ BufferedReader reader = new BufferedReader(new InputStreamReader(content));
+ String line;
+ StringBuilder str = new StringBuilder();
+ while ((line = reader.readLine()) != null) {
+ str.append(line);
+ }
+ return new JSONTokener(str.toString());
+ }
+
+ private HttpResponse doRequest(HttpRequestBase request, String username, String password) throws MalformedURLException {
+ HttpHost targetHost = new HttpHost(HOSTNAME,restPort, PROTOCOL);
+ CloseableHttpClient httpclient = HttpClients.custom().build();
+ HttpClientContext clientContext = HttpClientContext.create();
+ // if username is null, do not put in authentication
+ if (username != null) {
+ CredentialsProvider credsProvider = new BasicCredentialsProvider();
+ credsProvider.setCredentials(new AuthScope(targetHost.getHostName(), targetHost.getPort()), new UsernamePasswordCredentials(username, password));
+ httpclient = HttpClients.custom().setDefaultCredentialsProvider(credsProvider).build();
+ AuthCache authCache = new BasicAuthCache();
+ BasicScheme basicAuth = new BasicScheme();
+ authCache.put(targetHost, basicAuth);
+ clientContext.setCredentialsProvider(credsProvider);
+ clientContext.setAuthCache(authCache);
+ }
+
+ try {
+ return httpclient.execute(targetHost, request, clientContext);
+ } catch (ClientProtocolException e) {
+ e.printStackTrace();
+ Assert.fail("Rest GET should not have thrown ClientProtocolException!");
+ } catch (IOException e) {
+ e.printStackTrace();
+ Assert.fail("Rest GET Request should not have thrown IOException!");
+ }
+ return null;
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/fadd92b0/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java b/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java
index 6e91894..5f66f3b 100644
--- a/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java
+++ b/geode-assembly/src/test/java/org/apache/geode/rest/internal/web/RestSecurityIntegrationTest.java
@@ -19,42 +19,16 @@ package org.apache.geode.rest.internal.web;
import static org.apache.geode.distributed.ConfigurationProperties.*;
import static org.junit.Assert.*;
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.net.MalformedURLException;
-import java.nio.charset.StandardCharsets;
import java.util.Properties;
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpHost;
import org.apache.http.HttpResponse;
-import org.apache.http.auth.AuthScope;
-import org.apache.http.auth.UsernamePasswordCredentials;
-import org.apache.http.client.AuthCache;
-import org.apache.http.client.ClientProtocolException;
-import org.apache.http.client.CredentialsProvider;
-import org.apache.http.client.methods.HttpDelete;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.client.methods.HttpHead;
-import org.apache.http.client.methods.HttpPost;
-import org.apache.http.client.methods.HttpPut;
-import org.apache.http.client.methods.HttpRequestBase;
-import org.apache.http.client.protocol.HttpClientContext;
-import org.apache.http.entity.StringEntity;
-import org.apache.http.impl.auth.BasicScheme;
-import org.apache.http.impl.client.BasicAuthCache;
-import org.apache.http.impl.client.BasicCredentialsProvider;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClients;
import org.json.JSONArray;
import org.json.JSONObject;
-import org.json.JSONTokener;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
+import org.springframework.http.MediaType;
import org.apache.geode.cache.RegionShortcut;
import org.apache.geode.internal.AvailablePortHelper;
@@ -69,10 +43,6 @@ public class RestSecurityIntegrationTest {
protected static final String REGION_NAME = "AuthRegion";
- public final static String PROTOCOL = "http";
- public final static String HOSTNAME = "localhost";
- public final static String CONTEXT = "/geode/v1";
-
private static int restPort = AvailablePortHelper.getRandomAvailableTCPPort();
static Properties properties = new Properties() {{
setProperty(SampleSecurityManager.SECURITY_JSON, "org/apache/geode/management/internal/security/clientServer.json");
@@ -84,6 +54,7 @@ public class RestSecurityIntegrationTest {
@ClassRule
public static ServerStarter serverStarter = new ServerStarter(properties);
+ private final GeodeRestClient restClient = new GeodeRestClient(restPort);
@BeforeClass
public static void before() throws Exception {
@@ -95,95 +66,99 @@ public class RestSecurityIntegrationTest {
public void testFunctions() throws Exception {
String json = "{\"@type\":\"double\",\"@value\":210}";
- HttpResponse response = doGet("/functions", "unknown-user", "1234567");
- assertEquals(401, getCode(response));
- response = doGet("/functions", "stranger", "1234567");
- assertEquals(403, getCode(response));
- response = doGet("/functions", "dataReader", "1234567");
- assertTrue(isOK(response));
-
- response = doPost("/functions/AddFreeItemsToOrder", "unknown-user", "1234567", json);
- assertEquals(401, getCode(response));
- response = doPost("/functions/AddFreeItemsToOrder", "dataReader", "1234567", json);
- assertEquals(403, getCode(response));
- response = doPost("/functions/AddFreeItemsToOrder?onRegion=" + REGION_NAME, "dataWriter", "1234567", json);
+ HttpResponse response = restClient.doGet("/functions", "unknown-user", "1234567");
+ assertEquals(401, restClient.getCode(response));
+ response = restClient.doGet("/functions", "stranger", "1234567");
+ assertEquals(403, restClient.getCode(response));
+ response = restClient.doGet("/functions", "dataReader", "1234567");
+ assertEquals(200, restClient.getCode(response));
+ response.getEntity();
+ assertEquals(MediaType.APPLICATION_JSON_UTF8_VALUE, restClient.getContentType(response));
+
+ response = restClient.doPost("/functions/AddFreeItemsToOrder", "unknown-user", "1234567", json);
+ assertEquals(401, restClient.getCode(response));
+ response = restClient.doPost("/functions/AddFreeItemsToOrder", "dataReader", "1234567", json);
+ assertEquals(403, restClient.getCode(response));
+ response = restClient.doPost("/functions/AddFreeItemsToOrder?onRegion=" + REGION_NAME, "dataWriter", "1234567", json);
// because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
- assertEquals(500, getCode(response));
+ assertEquals(500, restClient.getCode(response));
}
@Test
public void testQueries() throws Exception {
- HttpResponse response = doGet("/queries", "unknown-user", "1234567");
- assertEquals(401, getCode(response));
- response = doGet("/queries", "stranger", "1234567");
- assertEquals(403, getCode(response));
- response = doGet("/queries", "dataReader", "1234567");
- assertEquals(200, getCode(response));
+ HttpResponse response = restClient.doGet("/queries", "unknown-user", "1234567");
+ assertEquals(401, restClient.getCode(response));
+ response = restClient.doGet("/queries", "stranger", "1234567");
+ assertEquals(403, restClient.getCode(response));
+ response = restClient.doGet("/queries", "dataReader", "1234567");
+ assertEquals(200, restClient.getCode(response));
+ assertEquals(MediaType.APPLICATION_JSON_UTF8_VALUE, restClient.getContentType(response));
}
@Test
public void testAdhocQuery() throws Exception {
- HttpResponse response = doGet("/queries/adhoc?q=", "unknown-user", "1234567");
- assertEquals(401, getCode(response));
- response = doGet("/queries/adhoc?q=", "stranger", "1234567");
- assertEquals(403, getCode(response));
- response = doGet("/queries/adhoc?q=", "dataReader", "1234567");
+ HttpResponse response = restClient.doGet("/queries/adhoc?q=", "unknown-user", "1234567");
+ assertEquals(401, restClient.getCode(response));
+ response = restClient.doGet("/queries/adhoc?q=", "stranger", "1234567");
+ assertEquals(403, restClient.getCode(response));
+ response = restClient.doGet("/queries/adhoc?q=", "dataReader", "1234567");
// because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
- assertEquals(500, getCode(response));
+ assertEquals(500, restClient.getCode(response));
}
@Test
public void testPostQuery() throws Exception {
- HttpResponse response = doPost("/queries?id=0&q=", "unknown-user", "1234567", "");
- assertEquals(401, getCode(response));
- response = doPost("/queries?id=0&q=", "stranger", "1234567", "");
- assertEquals(403, getCode(response));
- response = doPost("/queries?id=0&q=", "dataReader", "1234567", "");
+ HttpResponse response = restClient.doPost("/queries?id=0&q=", "unknown-user", "1234567", "");
+ assertEquals(401, restClient.getCode(response));
+ response = restClient.doPost("/queries?id=0&q=", "stranger", "1234567", "");
+ assertEquals(403, restClient.getCode(response));
+ response = restClient.doPost("/queries?id=0&q=", "dataReader", "1234567", "");
// because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
- assertEquals(500, getCode(response));
+ assertEquals(500, restClient.getCode(response));
}
@Test
public void testPostQuery2() throws Exception {
- HttpResponse response = doPost("/queries/id", "unknown-user", "1234567", "{\"id\" : \"foo\"}");
- assertEquals(401, getCode(response));
- response = doPost("/queries/id", "stranger", "1234567", "{\"id\" : \"foo\"}");
- assertEquals(403, getCode(response));
- response = doPost("/queries/id", "dataReader", "1234567", "{\"id\" : \"foo\"}");
+ HttpResponse response = restClient.doPost("/queries/id", "unknown-user", "1234567", "{\"id\" : \"foo\"}");
+ assertEquals(401, restClient.getCode(response));
+ response = restClient.doPost("/queries/id", "stranger", "1234567", "{\"id\" : \"foo\"}");
+ assertEquals(403, restClient.getCode(response));
+ response = restClient.doPost("/queries/id", "dataReader", "1234567", "{\"id\" : \"foo\"}");
// because we're only testing the security of the endpoint, not the endpoint functionality, a 500 is acceptable
- assertEquals(500, getCode(response));
+ assertEquals(500, restClient.getCode(response));
}
@Test
public void testPutQuery() throws Exception {
- HttpResponse response = doPut("/queries/id", "unknown-user", "1234567", "{\"id\" : \"foo\"}");
- assertEquals(401, getCode(response));
- response = doPut("/queries/id", "stranger", "1234567", "{\"id\" : \"foo\"}");
- assertEquals(403, getCode(response));
- response = doPut("/queries/id", "dataReader", "1234567", "{\"id\" : \"foo\"}");
+ HttpResponse response = restClient.doPut("/queries/id", "unknown-user", "1234567", "{\"id\" : \"foo\"}");
+ assertEquals(401, restClient.getCode(response));
+ response = restClient.doPut("/queries/id", "stranger", "1234567", "{\"id\" : \"foo\"}");
+ assertEquals(403, restClient.getCode(response));
+ response = restClient.doPut("/queries/id", "dataReader", "1234567", "{\"id\" : \"foo\"}");
// We should get a 404 because we're trying to update a query that doesn't exist
- assertEquals(404, getCode(response));
+ assertEquals(404, restClient.getCode(response));
}
@Test
public void testDeleteQuery() throws Exception {
- HttpResponse response = doDelete("/queries/id", "unknown-user", "1234567");
- assertEquals(401, getCode(response));
- response = doDelete("/queries/id", "stranger", "1234567");
- assertEquals(403, getCode(response));
- response = doDelete("/queries/id", "dataWriter", "1234567");
+ HttpResponse response = restClient.doDelete("/queries/id", "unknown-user", "1234567");
+ assertEquals(401, restClient.getCode(response));
+ response = restClient.doDelete("/queries/id", "stranger", "1234567");
+ assertEquals(403, restClient.getCode(response));
+ response = restClient.doDelete("/queries/id", "dataWriter", "1234567");
// We should get a 404 because we're trying to delete a query that doesn't exist
- assertEquals(404, getCode(response));
+ assertEquals(404, restClient.getCode(response));
}
@Test
public void testServers() throws Exception {
- HttpResponse response = doGet("/servers", "unknown-user", "1234567");
- assertEquals(401, getCode(response));
- response = doGet("/servers", "stranger", "1234567");
- assertEquals(403, getCode(response));
- response = doGet("/servers", "super-user", "1234567");
- assertTrue(isOK(response));
+ HttpResponse response = restClient.doGet("/servers", "unknown-user", "1234567");
+ assertEquals(401, restClient.getCode(response));
+ response = restClient.doGet("/servers", "stranger", "1234567");
+ assertEquals(403, restClient.getCode(response));
+ response = restClient.doGet("/servers", "super-user", "1234567");
+ assertEquals(200, restClient.getCode(response));
+ assertEquals(MediaType.APPLICATION_JSON_UTF8_VALUE, restClient.getContentType(response));
}
/**
@@ -192,27 +167,15 @@ public class RestSecurityIntegrationTest {
*/
@Test
public void testPing() throws Exception {
- HttpResponse response = doHEAD("/ping", "stranger", "1234567");
- assertTrue(isOK(response));
- response = doGet("/ping", "stranger", "1234567");
- assertTrue(isOK(response));
-
- response = doHEAD("/ping", "super-user", "1234567");
- assertTrue(isOK(response));
- response = doGet("/ping", "super-user", "1234567");
- assertTrue(isOK(response));
-
- // TODO - invalid username/password should still respond, but doesn't
- // response = doHEAD("/ping", "unknown-user", "badpassword");
- // assertTrue(isOK(response));
- // response = doGet("/ping", "unknown-user", "badpassword");
- // assertTrue(isOK(response));
-
- // TODO - credentials are currently required and shouldn't be for this endpoint
- // response = doHEAD("/ping", null, null);
- // assertTrue(isOK(response));
- // response = doGet("/ping", null, null);
- // assertTrue(isOK(response));
+ HttpResponse response = restClient.doHEAD("/ping", "stranger", "1234567");
+ assertEquals(200, restClient.getCode(response));
+ response = restClient.doGet("/ping", "stranger", "1234567");
+ assertEquals(200, restClient.getCode(response));
+
+ response = restClient.doHEAD("/ping", "super-user", "1234567");
+ assertEquals(200, restClient.getCode(response));
+ response = restClient.doGet("/ping", "super-user", "1234567");
+ assertEquals(200, restClient.getCode(response));
}
/**
@@ -220,11 +183,11 @@ public class RestSecurityIntegrationTest {
*/
@Test
public void getRegions() throws Exception {
- HttpResponse response = doGet("", "dataReader", "1234567");
- assertEquals("A '200 - OK' was expected", 200, getCode(response));
+ HttpResponse response = restClient.doGet("", "dataReader", "1234567");
+ assertEquals("A '200 - OK' was expected", 200, restClient.getCode(response));
+ assertEquals(MediaType.APPLICATION_JSON_UTF8_VALUE, restClient.getContentType(response));
- assertTrue(isOK(response));
- JSONObject jsonObject = new JSONObject(getResponseBody(response));
+ JSONObject jsonObject = new JSONObject(restClient.getResponseBody(response));
JSONArray regions = jsonObject.getJSONArray("regions");
assertNotNull(regions);
assertTrue(regions.length() > 0);
@@ -233,12 +196,12 @@ public class RestSecurityIntegrationTest {
assertEquals("REPLICATE", region.get("type"));
// List regions with an unknown user - 401
- response = doGet("", "unknown-user", "badpassword");
- assertEquals(401, getCode(response));
+ response = restClient.doGet("", "unknown-user", "badpassword");
+ assertEquals(401, restClient.getCode(response));
// list regions with insufficent rights - 403
- response = doGet("", "authRegionReader", "1234567");
- assertEquals(403, getCode(response));
+ response = restClient.doGet("", "authRegionReader", "1234567");
+ assertEquals(403, restClient.getCode(response));
}
/**
@@ -247,16 +210,17 @@ public class RestSecurityIntegrationTest {
@Test
public void getRegion() throws Exception {
// Test an unknown user - 401 error
- HttpResponse response = doGet("/" + REGION_NAME, "unknown-user", "1234567");
- assertEquals(401, getCode(response));
+ HttpResponse response = restClient.doGet("/" + REGION_NAME, "unknown-user", "1234567");
+ assertEquals(401, restClient.getCode(response));
// Test a user with insufficient rights - 403
- response = doGet("/" + REGION_NAME, "stranger", "1234567");
- assertEquals(403, getCode(response));
+ response = restClient.doGet("/" + REGION_NAME, "stranger", "1234567");
+ assertEquals(403, restClient.getCode(response));
// Test an authorized user - 200
- response = doGet("/" + REGION_NAME, "super-user", "1234567");
- assertTrue(isOK(response));
+ response = restClient.doGet("/" + REGION_NAME, "super-user", "1234567");
+ assertEquals(200, restClient.getCode(response));
+ assertEquals(MediaType.APPLICATION_JSON_UTF8_VALUE, restClient.getContentType(response));
}
/**
@@ -265,16 +229,16 @@ public class RestSecurityIntegrationTest {
@Test
public void headRegion() throws Exception {
// Test an unknown user - 401 error
- HttpResponse response = doHEAD("/" + REGION_NAME, "unknown-user", "1234567");
- assertEquals(401, getCode(response));
+ HttpResponse response = restClient.doHEAD("/" + REGION_NAME, "unknown-user", "1234567");
+ assertEquals(401, restClient.getCode(response));
// Test a user with insufficient rights - 403
- response = doHEAD("/" + REGION_NAME, "stranger", "1234567");
- assertEquals(403, getCode(response));
+ response = restClient.doHEAD("/" + REGION_NAME, "stranger", "1234567");
+ assertEquals(403, restClient.getCode(response));
// Test an authorized user - 200
- response = doHEAD("/" + REGION_NAME, "super-user", "1234567");
- assertTrue(isOK(response));
+ response = restClient.doHEAD("/" + REGION_NAME, "super-user", "1234567");
+ assertEquals(200, restClient.getCode(response));
}
/**
@@ -283,12 +247,12 @@ public class RestSecurityIntegrationTest {
@Test
public void deleteRegion() throws Exception {
// Test an unknown user - 401 error
- HttpResponse response = doDelete("/" + REGION_NAME, "unknown-user", "1234567");
- assertEquals(401, getCode(response));
+ HttpResponse response = restClient.doDelete("/" + REGION_NAME, "unknown-user", "1234567");
+ assertEquals(401, restClient.getCode(response));
// Test a user with insufficient rights - 403
- response = doDelete("/" + REGION_NAME, "dataReader", "1234567");
- assertEquals(403, getCode(response));
+ response = restClient.doDelete("/" + REGION_NAME, "dataReader", "1234567");
+ assertEquals(403, restClient.getCode(response));
}
/**
@@ -297,11 +261,12 @@ public class RestSecurityIntegrationTest {
@Test
public void getRegionKeys() throws Exception {
// Test an authorized user
- HttpResponse response = doGet("/" + REGION_NAME + "/keys", "super-user", "1234567");
- assertTrue(isOK(response));
+ HttpResponse response = restClient.doGet("/" + REGION_NAME + "/keys", "super-user", "1234567");
+ assertEquals(200, restClient.getCode(response));
+ assertEquals(MediaType.APPLICATION_JSON_UTF8_VALUE, restClient.getContentType(response));
// Test an unauthorized user
- response = doGet("/" + REGION_NAME + "/keys", "dataWriter", "1234567");
- assertEquals(403, getCode(response));
+ response = restClient.doGet("/" + REGION_NAME + "/keys", "dataWriter", "1234567");
+ assertEquals(403, restClient.getCode(response));
}
/**
@@ -310,11 +275,13 @@ public class RestSecurityIntegrationTest {
@Test
public void getRegionKey() throws Exception {
// Test an authorized user
- HttpResponse response = doGet("/" + REGION_NAME + "/key1", "key1User", "1234567");
- assertTrue(isOK(response));
+ HttpResponse response = restClient.doGet("/" + REGION_NAME + "/key1", "key1User", "1234567");
+ assertEquals(200, restClient.getCode(response));
+ assertEquals(MediaType.APPLICATION_JSON_UTF8_VALUE, restClient.getContentType(response));
+
// Test an unauthorized user
- response = doGet("/" + REGION_NAME + "/key1", "dataWriter", "1234567");
- assertEquals(403, getCode(response));
+ response = restClient.doGet("/" + REGION_NAME + "/key1", "dataWriter", "1234567");
+ assertEquals(403, restClient.getCode(response));
}
/**
@@ -323,16 +290,16 @@ public class RestSecurityIntegrationTest {
@Test
public void deleteRegionKey() throws Exception {
// Test an unknown user - 401 error
- HttpResponse response = doDelete("/" + REGION_NAME + "/key1", "unknown-user", "1234567");
- assertEquals(401, getCode(response));
+ HttpResponse response = restClient.doDelete("/" + REGION_NAME + "/key1", "unknown-user", "1234567");
+ assertEquals(401, restClient.getCode(response));
// Test a user with insufficient rights - 403
- response = doDelete("/" + REGION_NAME + "/key1", "dataReader", "1234567");
- assertEquals(403, getCode(response));
+ response = restClient.doDelete("/" + REGION_NAME + "/key1", "dataReader", "1234567");
+ assertEquals(403, restClient.getCode(response));
// Test an authorized user - 200
- response = doDelete("/" + REGION_NAME + "/key1", "key1User", "1234567");
- assertTrue(isOK(response));
+ response = restClient.doDelete("/" + REGION_NAME + "/key1", "key1User", "1234567");
+ assertEquals(200, restClient.getCode(response));
}
/**
@@ -341,17 +308,16 @@ public class RestSecurityIntegrationTest {
@Test
public void postRegionKey() throws Exception {
// Test an unknown user - 401 error
- HttpResponse response = doPost("/" + REGION_NAME + "?key9", "unknown", "1234567", "{ \"key9\" : \"foo\" }");
- assertEquals(401, getCode(response));
+ HttpResponse response = restClient.doPost("/" + REGION_NAME + "?key9", "unknown", "1234567", "{ \"key9\" : \"foo\" }");
+ assertEquals(401, restClient.getCode(response));
// Test a user with insufficient rights - 403
- response = doPost("/" + REGION_NAME + "?key9", "dataReader", "1234567", "{ \"key9\" : \"foo\" }");
- assertEquals(403, getCode(response));
+ response = restClient.doPost("/" + REGION_NAME + "?key9", "dataReader", "1234567", "{ \"key9\" : \"foo\" }");
+ assertEquals(403, restClient.getCode(response));
// Test an authorized user - 200
- response = doPost("/" + REGION_NAME + "?key9", "dataWriter", "1234567", "{ \"key9\" : \"foo\" }");
- assertEquals(201, getCode(response));
- assertTrue(isOK(response));
+ response = restClient.doPost("/" + REGION_NAME + "?key9", "dataWriter", "1234567", "{ \"key9\" : \"foo\" }");
+ assertEquals(201, restClient.getCode(response));
}
/**
@@ -363,135 +329,27 @@ public class RestSecurityIntegrationTest {
String json = "{\"@type\":\"com.gemstone.gemfire.web.rest.domain.Order\",\"purchaseOrderNo\":1121,\"customerId\":1012,\"description\":\"Order for XYZ Corp\",\"orderDate\":\"02/10/2014\",\"deliveryDate\":\"02/20/2014\",\"contact\":\"Jelly Bean\",\"email\":\"jelly.bean@example.com\",\"phone\":\"01-2048096\",\"items\":[{\"itemNo\":1,\"description\":\"Product-100\",\"quantity\":12,\"unitPrice\":5,\"totalPrice\":60}],\"totalPrice\":225}";
String casJSON = "{\"@old\":{\"@type\":\"com.gemstone.gemfire.web.rest.domain.Order\",\"purchaseOrderNo\":1121,\"customerId\":1012,\"description\":\"Order for XYZ Corp\",\"orderDate\":\"02/10/2014\",\"deliveryDate\":\"02/20/2014\",\"contact\":\"Jelly Bean\",\"email\":\"jelly.bean@example.com\",\"phone\":\"01-2048096\",\"items\":[{\"itemNo\":1,\"description\":\"Product-100\",\"quantity\":12,\"unitPrice\":5,\"totalPrice\":60}],\"totalPrice\":225},\"@new \":{\"@type\":\"com.gemstone.gemfire.web.rest.domain.Order\",\"purchaseOrderNo\":1121,\"customerId\":1013,\"description\":\"Order for New Corp\",\"orderDate\":\"02/10/2014\",\"deliveryDate\":\"02/25/2014\",\"contact\":\"Vanilla Bean\",\"email\":\"vanillabean@example.com\",\"phone\":\"01-2048096\",\"items\":[{\"itemNo\":12345,\"description\":\"part 123\",\"quantity\":12,\"unitPrice\":29.99,\"totalPrice\":149.95}],\"totalPrice\":149.95}}";
// Test an unknown user - 401 error
- HttpResponse response = doPut("/" + REGION_NAME + "/key1?op=PUT", "unknown-user", "1234567", "{ \"key9\" : \"foo\" }");
- assertEquals(401, getCode(response));
-
- response = doPut("/" + REGION_NAME + "/key1?op=CAS", "unknown-user", "1234567", "{ \"key9\" : \"foo\" }");
- assertEquals(401, getCode(response));
- response = doPut("/" + REGION_NAME + "/key1?op=REPLACE", "unknown-user", "1234567", "{ \"@old\" : \"value1\", \"@new\" : \"CASvalue\" }");
- assertEquals(401, getCode(response));
-
- response = doPut("/" + REGION_NAME + "/key1?op=PUT", "dataReader", "1234567", "{ \"key1\" : \"foo\" }");
- assertEquals(403, getCode(response));
-
- response = doPut("/" + REGION_NAME + "/key1?op=REPLACE", "dataReader", "1234567", "{ \"key1\" : \"foo\" }");
- assertEquals(403, getCode(response));
-
- response = doPut("/" + REGION_NAME + "/key1?op=CAS", "dataReader", "1234567", casJSON);
- assertEquals(403, getCode(response));
-
- response = doPut("/" + REGION_NAME + "/key1?op=PUT", "key1User", "1234567", "{ \"key1\" : \"foo\" }");
- assertEquals(200, getCode(response));
- assertTrue(isOK(response));
-
- response = doPut("/" + REGION_NAME + "/key1?op=REPLACE", "key1User", "1234567", json);
- assertEquals(200, getCode(response));
- assertTrue(isOK(response));
- }
-
- protected HttpResponse doHEAD(String query, String username, String password) throws MalformedURLException {
- HttpHead httpHead = new HttpHead(CONTEXT + query);
- return doRequest(httpHead, username, password);
- }
-
-
- protected HttpResponse doPost(String query, String username, String password, String body) throws MalformedURLException {
- HttpPost httpPost = new HttpPost(CONTEXT + query);
- httpPost.addHeader("content-type", "application/json");
- httpPost.setEntity(new StringEntity(body, StandardCharsets.UTF_8));
- return doRequest(httpPost, username, password);
- }
-
+ HttpResponse response = restClient.doPut("/" + REGION_NAME + "/key1?op=PUT", "unknown-user", "1234567", "{ \"key9\" : \"foo\" }");
+ assertEquals(401, restClient.getCode(response));
- protected HttpResponse doPut(String query, String username, String password, String body) throws MalformedURLException {
- HttpPut httpPut = new HttpPut(CONTEXT + query);
- httpPut.addHeader("content-type", "application/json");
- httpPut.setEntity(new StringEntity(body, StandardCharsets.UTF_8));
- return doRequest(httpPut, username, password);
- }
+ response = restClient.doPut("/" + REGION_NAME + "/key1?op=CAS", "unknown-user", "1234567", "{ \"key9\" : \"foo\" }");
+ assertEquals(401, restClient.getCode(response));
+ response = restClient.doPut("/" + REGION_NAME + "/key1?op=REPLACE", "unknown-user", "1234567", "{ \"@old\" : \"value1\", \"@new\" : \"CASvalue\" }");
+ assertEquals(401, restClient.getCode(response));
- protected HttpResponse doGet(String uri, String username, String password) throws MalformedURLException {
- HttpGet getRequest = new HttpGet(CONTEXT + uri);
- return doRequest(getRequest, username, password);
- }
+ response = restClient.doPut("/" + REGION_NAME + "/key1?op=PUT", "dataReader", "1234567", "{ \"key1\" : \"foo\" }");
+ assertEquals(403, restClient.getCode(response));
- protected HttpResponse doDelete(String uri, String username, String password) throws MalformedURLException {
- HttpDelete httpDelete = new HttpDelete(CONTEXT + uri);
- return doRequest(httpDelete, username, password);
- }
+ response = restClient.doPut("/" + REGION_NAME + "/key1?op=REPLACE", "dataReader", "1234567", "{ \"key1\" : \"foo\" }");
+ assertEquals(403, restClient.getCode(response));
- /**
- * Check the HTTP status of the response and return if it's within the OK range
- *
- * @param response The HttpResponse message received from the server
- *
- * @return true if the status code is a 2XX-type code (200-299), otherwise false
- */
- protected boolean isOK(HttpResponse response) {
- int returnCode = response.getStatusLine().getStatusCode();
- return (returnCode < 300 && returnCode >= 200);
- }
+ response = restClient.doPut("/" + REGION_NAME + "/key1?op=CAS", "dataReader", "1234567", casJSON);
+ assertEquals(403, restClient.getCode(response));
- /**
- * Check the HTTP status of the response and return true if a 401
- *
- * @param response The HttpResponse message received from the server
- *
- * @return true if the status code is 401, otherwise false
- */
- protected boolean isUnauthorized(HttpResponse response) {
- int returnCode = response.getStatusLine().getStatusCode();
- return returnCode == 401;
- }
-
- /**
- * Retrieve the status code of the HttpResponse
- *
- * @param response The HttpResponse message received from the server
- *
- * @return a numeric value
- */
- protected int getCode(HttpResponse response) {
- return response.getStatusLine().getStatusCode();
- }
-
- protected JSONTokener getResponseBody(HttpResponse response) throws IOException {
- HttpEntity entity = response.getEntity();
- InputStream content = entity.getContent();
- BufferedReader reader = new BufferedReader(new InputStreamReader(content));
- String line;
- StringBuilder str = new StringBuilder();
- while ((line = reader.readLine()) != null) {
- str.append(line);
- }
- return new JSONTokener(str.toString());
- }
+ response = restClient.doPut("/" + REGION_NAME + "/key1?op=PUT", "key1User", "1234567", "{ \"key1\" : \"foo\" }");
+ assertEquals(200, restClient.getCode(response));
- private HttpResponse doRequest(HttpRequestBase request, String username, String password) throws MalformedURLException {
- HttpHost targetHost = new HttpHost(HOSTNAME, this.restPort, PROTOCOL);
- CloseableHttpClient httpclient = HttpClients.custom().build();
- HttpClientContext clientContext = HttpClientContext.create();
- // if username is null, do not put in authentication
- if (username != null) {
- CredentialsProvider credsProvider = new BasicCredentialsProvider();
- credsProvider.setCredentials(new AuthScope(targetHost.getHostName(), targetHost.getPort()), new UsernamePasswordCredentials(username, password));
- httpclient = HttpClients.custom().setDefaultCredentialsProvider(credsProvider).build();
- AuthCache authCache = new BasicAuthCache();
- BasicScheme basicAuth = new BasicScheme();
- authCache.put(targetHost, basicAuth);
- clientContext.setCredentialsProvider(credsProvider);
- clientContext.setAuthCache(authCache);
- }
-
- try {
- return httpclient.execute(targetHost, request, clientContext);
- } catch (ClientProtocolException e) {
- e.printStackTrace();
- fail("Rest GET should not have thrown ClientProtocolException!");
- } catch (IOException e) {
- e.printStackTrace();
- fail("Rest GET Request should not have thrown IOException!");
- }
- return null;
+ response = restClient.doPut("/" + REGION_NAME + "/key1?op=REPLACE", "key1User", "1234567", json);
+ assertEquals(200, restClient.getCode(response));
}
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/fadd92b0/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
----------------------------------------------------------------------
diff --git a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
index 30c8b3a..935b3ad 100644
--- a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
+++ b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/CommonCrudController.java
@@ -63,7 +63,7 @@ public abstract class CommonCrudController extends AbstractBaseController {
*
* @return JSON document containing result
*/
- @RequestMapping(method = RequestMethod.GET, produces = { MediaType.APPLICATION_JSON_VALUE, MediaType.APPLICATION_JSON_VALUE })
+ @RequestMapping(method = RequestMethod.GET, produces = { MediaType.APPLICATION_JSON_UTF8_VALUE})
@ApiOperation(
value = "list all resources (Regions)",
notes = "List all available resources (Regions) in the GemFire cluster",
@@ -92,7 +92,7 @@ public abstract class CommonCrudController extends AbstractBaseController {
* @return JSON document containing result
*/
@RequestMapping(method = RequestMethod.GET, value = "/{region}/keys",
- produces = { MediaType.APPLICATION_JSON_VALUE } )
+ produces = { MediaType.APPLICATION_JSON_UTF8_VALUE } )
@ApiOperation(
value = "list all keys",
notes = "List all keys in region",
@@ -198,7 +198,7 @@ public abstract class CommonCrudController extends AbstractBaseController {
return new ResponseEntity<>(HttpStatus.OK);
}
- @RequestMapping(method = { RequestMethod.GET }, value = "/servers")
+ @RequestMapping(method = { RequestMethod.GET }, value = "/servers", produces = { MediaType.APPLICATION_JSON_UTF8_VALUE } )
@ApiOperation(
value = "fetch all REST enabled servers in the DS",
notes = "Find all gemfire node where developer REST service is up and running!",
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/fadd92b0/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/FunctionAccessController.java
----------------------------------------------------------------------
diff --git a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/FunctionAccessController.java b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/FunctionAccessController.java
index e1ea1ad..831083e 100644
--- a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/FunctionAccessController.java
+++ b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/FunctionAccessController.java
@@ -86,7 +86,7 @@ public class FunctionAccessController extends AbstractBaseController {
*
* @return result as a JSON document.
*/
- @RequestMapping(method = RequestMethod.GET, produces = { MediaType.APPLICATION_JSON_VALUE })
+ @RequestMapping(method = RequestMethod.GET, produces = { MediaType.APPLICATION_JSON_UTF8_VALUE })
@ApiOperation(
value = "list all functions",
notes = "list all functions available in the GemFire cluster",
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/fadd92b0/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/PdxBasedCrudController.java
----------------------------------------------------------------------
diff --git a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/PdxBasedCrudController.java b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/PdxBasedCrudController.java
index ebb8ccc..32de04e 100644
--- a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/PdxBasedCrudController.java
+++ b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/PdxBasedCrudController.java
@@ -134,7 +134,7 @@ public class PdxBasedCrudController extends CommonCrudController {
* @param limit total number of entries requested
* @return JSON document
*/
- @RequestMapping(method = RequestMethod.GET, value = "/{region}", produces = MediaType.APPLICATION_JSON_VALUE)
+ @RequestMapping(method = RequestMethod.GET, value = "/{region}", produces = MediaType.APPLICATION_JSON_UTF8_VALUE)
@ApiOperation(
value = "read all data for region",
notes = "Read all data for region. Use limit param to get fixed or limited number of entries.",
@@ -213,7 +213,7 @@ public class PdxBasedCrudController extends CommonCrudController {
* @return JSON document
*/
@RequestMapping(method = RequestMethod.GET, value = "/{region}/{keys}",
- produces = MediaType.APPLICATION_JSON_VALUE)
+ produces = MediaType.APPLICATION_JSON_UTF8_VALUE)
@ApiOperation(
value = "read data for specific keys",
notes = "Read data for specific set of keys in region.",
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/fadd92b0/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/QueryAccessController.java
----------------------------------------------------------------------
diff --git a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/QueryAccessController.java b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/QueryAccessController.java
index d13c99c..e5287b9 100644
--- a/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/QueryAccessController.java
+++ b/geode-web-api/src/main/java/org/apache/geode/rest/internal/web/controllers/QueryAccessController.java
@@ -91,7 +91,7 @@ public class QueryAccessController extends AbstractBaseController {
* list all parametrized Queries created in a Gemfire data node
* @return result as a JSON document.
*/
- @RequestMapping(method = RequestMethod.GET, produces = { MediaType.APPLICATION_JSON_VALUE })
+ @RequestMapping(method = RequestMethod.GET, produces = { MediaType.APPLICATION_JSON_UTF8_VALUE })
@ApiOperation(
value = "list all parametrized queries",
notes = "List all parametrized queries by id/name",
@@ -165,7 +165,7 @@ public class QueryAccessController extends AbstractBaseController {
* @param oql OQL query string to be executed
* @return query result as a JSON document
*/
- @RequestMapping(method = RequestMethod.GET, value = "/adhoc", produces = { MediaType.APPLICATION_JSON_VALUE })
+ @RequestMapping(method = RequestMethod.GET, value = "/adhoc", produces = { MediaType.APPLICATION_JSON_UTF8_VALUE })
@ApiOperation(
value = "run an adhoc query",
notes = "Run an unnamed (unidentified), ad-hoc query passed as a URL parameter",
[12/15] incubator-geode git commit: GEODE-2012: always write stat
types to archive
Posted by kl...@apache.org.
GEODE-2012: always write stat types to archive
* write additional tests for stat archive rolling
* expose bug GEODE-2012 in StatTypesAreRolledOverRegressionTest
* fix failure in StatTypesAreRolledOverRegressionTest
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/5ab02de6
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/5ab02de6
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/5ab02de6
Branch: refs/heads/feature/GEODE-2012
Commit: 5ab02de6c63eb70245fa8b2cff174e66a5fe73b5
Parents: 34834ce
Author: Kirk Lund <kl...@apache.org>
Authored: Fri Oct 21 16:11:00 2016 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Wed Oct 26 10:19:14 2016 -0700
----------------------------------------------------------------------
.../org/apache/geode/internal/NanoTimer.java | 6 +-
.../internal/statistics/HostStatSampler.java | 22 +-
.../internal/statistics/SampleCollector.java | 10 +-
.../internal/statistics/SimpleStatSampler.java | 7 +-
.../DiskSpaceLimitIntegrationTest.java | 156 +++++++++++++
.../FileSizeLimitIntegrationTest.java | 140 +++++++++++
.../StatTypesAreRolledOverRegressionTest.java | 231 +++++++++++++++++++
7 files changed, 557 insertions(+), 15 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ab02de6/geode-core/src/main/java/org/apache/geode/internal/NanoTimer.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/NanoTimer.java b/geode-core/src/main/java/org/apache/geode/internal/NanoTimer.java
index e8c1145..67e0bb1 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/NanoTimer.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/NanoTimer.java
@@ -41,7 +41,7 @@ package org.apache.geode.internal;
* </pre>
*
*/
-public final class NanoTimer {
+public class NanoTimer {
public static final long NANOS_PER_MILLISECOND = 1000000;
@@ -76,7 +76,7 @@ public final class NanoTimer {
/**
* For unit testing
*/
- NanoTimer(TimeService ts) {
+ protected NanoTimer(TimeService ts) {
this.timeService = ts;
this.lastResetTime = ts.getTime();
this.constructionTime = this.lastResetTime;
@@ -174,7 +174,7 @@ public final class NanoTimer {
/**
* Allows unit tests to insert a deterministic clock for testing.
*/
- interface TimeService {
+ public interface TimeService {
/**
* Returns the current time.
*/
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ab02de6/geode-core/src/main/java/org/apache/geode/internal/statistics/HostStatSampler.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/statistics/HostStatSampler.java b/geode-core/src/main/java/org/apache/geode/internal/statistics/HostStatSampler.java
index 2ec32e7..a3cb046 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/statistics/HostStatSampler.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/statistics/HostStatSampler.java
@@ -85,16 +85,25 @@ public abstract class HostStatSampler
private final CancelCriterion stopper;
private final CallbackSampler callbackSampler;
+
+ private final NanoTimer timer;
protected HostStatSampler(CancelCriterion stopper,
StatSamplerStats samplerStats) {
+ this(stopper, samplerStats, new NanoTimer());
+ }
+
+ protected HostStatSampler(CancelCriterion stopper,
+ StatSamplerStats samplerStats,
+ NanoTimer timer) {
this.stopper = stopper;
this.statSamplerInitializedLatch = new StoppableCountDownLatch(this.stopper, 1);
this.samplerStats = samplerStats;
this.fileSizeLimitInKB = Boolean.getBoolean(TEST_FILE_SIZE_LIMIT_IN_KB_PROPERTY);
this.callbackSampler = new CallbackSampler(stopper, samplerStats);
+ this.timer = timer;
}
-
+
public final StatSamplerStats getStatSamplerStats() {
return this.samplerStats;
}
@@ -169,8 +178,6 @@ public abstract class HostStatSampler
*/
@Override
public final void run() {
- NanoTimer timer = new NanoTimer();
-
final boolean isDebugEnabled_STATISTICS = logger.isTraceEnabled(LogMarker.STATISTICS);
if (isDebugEnabled_STATISTICS) {
logger.trace(LogMarker.STATISTICS, "HostStatSampler started");
@@ -180,7 +187,7 @@ public abstract class HostStatSampler
initSpecialStats();
this.sampleCollector = new SampleCollector(this);
- this.sampleCollector.initialize(this, NanoTimer.getTime());
+ this.sampleCollector.initialize(this, timer.getTime());
this.statSamplerInitializedLatch.countDown();
latchCountedDown = true;
@@ -195,7 +202,7 @@ public abstract class HostStatSampler
}
final long nanosBeforeSleep = timer.getLastResetTime();
final long nanosToDelay = nanosLastTimeStamp + getNanoRate();
- delay(timer, nanosToDelay);
+ delay(nanosToDelay);
nanosLastTimeStamp = timer.getLastResetTime();
if (!stopRequested() && isSamplingEnabled()) {
final long nanosTimeStamp = timer.getLastResetTime();
@@ -381,7 +388,7 @@ public abstract class HostStatSampler
}
public final void changeArchive(File newFile) {
- this.sampleCollector.changeArchive(newFile, NanoTimer.getTime());
+ this.sampleCollector.changeArchive(newFile, timer.getTime());
}
/**
@@ -484,10 +491,9 @@ public abstract class HostStatSampler
}
/**
- * @param timer a NanoTimer used to compute the elapsed delay
* @param nanosToDelay the timestamp to delay until it is the current time
*/
- private void delay(NanoTimer timer, final long nanosToDelay) throws InterruptedException {
+ private void delay(final long nanosToDelay) throws InterruptedException {
timer.reset();
long now = timer.getLastResetTime();
long remainingNanos = nanosToDelay - now;
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ab02de6/geode-core/src/main/java/org/apache/geode/internal/statistics/SampleCollector.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/statistics/SampleCollector.java b/geode-core/src/main/java/org/apache/geode/internal/statistics/SampleCollector.java
index 68430a1..428e83c 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/statistics/SampleCollector.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/statistics/SampleCollector.java
@@ -366,7 +366,7 @@ public class SampleCollector {
}
// notify unmarked/new handlers but not marked/old handlers
- notifyNewHandlersOfResources(handlers, this.resourceInstMap.values());
+ notifyNewHandlersOfResources(handlers, this.resourceTypeMap.values(), this.resourceInstMap.values());
}
private ResourceType getResourceType(List<MarkableSampleHandler> handlers,
@@ -478,8 +478,7 @@ public class SampleCollector {
}
}
- private void notifyNewHandlersOfResources(List<MarkableSampleHandler> handlers,
- Collection<ResourceInstance> resources) {
+ private void notifyNewHandlersOfResources(List<MarkableSampleHandler> handlers, Collection<ResourceType> types, Collection<ResourceInstance> resources) {
final boolean isDebugEnabled_STATISTICS = logger.isTraceEnabled(LogMarker.STATISTICS);
if (isDebugEnabled_STATISTICS) {
logger.trace(LogMarker.STATISTICS, "SampleCollector#notifyNewHandlersOfResources ri.size()={}", resources.size());
@@ -498,6 +497,11 @@ public class SampleCollector {
// allocatedResourceInstance...
handler.allocatedResourceInstance(resourceInstance);
}
+ for (ResourceType resourceType : types) {
+ if (!allocatedResourceTypes.contains(resourceType)) {
+ handler.allocatedResourceType(resourceType);
+ }
+ }
handler.mark();
count++;
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ab02de6/geode-core/src/main/java/org/apache/geode/internal/statistics/SimpleStatSampler.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/statistics/SimpleStatSampler.java b/geode-core/src/main/java/org/apache/geode/internal/statistics/SimpleStatSampler.java
index 50f49a7..ebf15cf 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/statistics/SimpleStatSampler.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/statistics/SimpleStatSampler.java
@@ -21,6 +21,7 @@ import java.io.File;
import org.apache.logging.log4j.Logger;
import org.apache.geode.CancelCriterion;
+import org.apache.geode.internal.NanoTimer;
import org.apache.geode.internal.i18n.LocalizedStrings;
import org.apache.geode.internal.logging.LogService;
import org.apache.geode.internal.logging.log4j.LocalizedMessage;
@@ -57,7 +58,11 @@ public class SimpleStatSampler extends HostStatSampler {
private final StatisticsManager sm;
public SimpleStatSampler(CancelCriterion stopper, StatisticsManager sm) {
- super(stopper, new StatSamplerStats(sm, sm.getId()));
+ this(stopper, sm, new NanoTimer());
+ }
+
+ public SimpleStatSampler(CancelCriterion stopper, StatisticsManager sm, NanoTimer timer) {
+ super(stopper, new StatSamplerStats(sm, sm.getId()), timer);
this.sm = sm;
logger.info(LogMarker.STATISTICS, LocalizedMessage.create(LocalizedStrings.SimpleStatSampler_STATSSAMPLERATE_0, getSampleRate()));
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ab02de6/geode-core/src/test/java/org/apache/geode/internal/statistics/DiskSpaceLimitIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/statistics/DiskSpaceLimitIntegrationTest.java b/geode-core/src/test/java/org/apache/geode/internal/statistics/DiskSpaceLimitIntegrationTest.java
new file mode 100644
index 0000000..9ab7953
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/statistics/DiskSpaceLimitIntegrationTest.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.statistics;
+
+import static java.util.concurrent.TimeUnit.*;
+import static org.assertj.core.api.Assertions.*;
+import static org.mockito.Mockito.*;
+
+import java.io.File;
+import java.util.concurrent.TimeoutException;
+
+import com.jayway.awaitility.Awaitility;
+import com.jayway.awaitility.core.ConditionFactory;
+import org.apache.logging.log4j.Logger;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.contrib.java.lang.system.RestoreSystemProperties;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TemporaryFolder;
+import org.junit.rules.TestName;
+
+import org.apache.geode.StatisticDescriptor;
+import org.apache.geode.Statistics;
+import org.apache.geode.StatisticsType;
+import org.apache.geode.internal.NanoTimer;
+import org.apache.geode.internal.logging.LogService;
+import org.apache.geode.test.junit.categories.IntegrationTest;
+
+@Category(IntegrationTest.class)
+public class DiskSpaceLimitIntegrationTest {
+
+ private static final Logger logger = LogService.getLogger();
+
+ private static final long FILE_SIZE_LIMIT = 1024*1;
+ private static final long DISK_SPACE_LIMIT = Long.MAX_VALUE;
+
+ private File dir;
+ private String archiveFileName;
+
+ private LocalStatisticsFactory factory;
+ private StatisticDescriptor[] statisticDescriptors;
+ private StatisticsType statisticsType;
+ private Statistics statistics;
+
+ private SampleCollector sampleCollector;
+ private StatArchiveHandlerConfig config;
+
+ @Rule
+ public RestoreSystemProperties restoreSystemProperties = new RestoreSystemProperties();
+ @Rule
+ public TemporaryFolder temporaryFolder = new TemporaryFolder();
+ @Rule
+ public TestName testName = new TestName();
+
+ @Before
+ public void setUp() throws Exception {
+ this.dir = this.temporaryFolder.getRoot();
+ this.archiveFileName = new File(this.dir, this.testName.getMethodName() + ".gfs").getAbsolutePath();
+
+ this.factory = new LocalStatisticsFactory(null);
+ this.statisticDescriptors = new StatisticDescriptor[] {
+ this.factory.createIntCounter("stat1", "description of stat1", "units", true),
+ };
+ this.statisticsType = factory.createType("statisticsType1", "statisticsType1", this.statisticDescriptors);
+ this.statistics = factory.createAtomicStatistics(this.statisticsType, "statistics1", 1);
+
+ StatisticsSampler sampler = mock(StatisticsSampler.class);
+ when(sampler.getStatistics()).thenReturn(this.factory.getStatistics());
+
+ this.config = mock(StatArchiveHandlerConfig.class);
+ when(this.config.getArchiveFileName()).thenReturn(new File(this.archiveFileName));
+ when(this.config.getArchiveFileSizeLimit()).thenReturn(FILE_SIZE_LIMIT);
+ when(this.config.getSystemId()).thenReturn(1L);
+ when(this.config.getSystemStartTime()).thenReturn(System.currentTimeMillis());
+ when(this.config.getSystemDirectoryPath()).thenReturn(this.temporaryFolder.getRoot().getAbsolutePath());
+ when(this.config.getProductDescription()).thenReturn(this.testName.getMethodName());
+ when(this.config.getArchiveDiskSpaceLimit()).thenReturn(DISK_SPACE_LIMIT);
+
+ this.sampleCollector = new SampleCollector(sampler);
+ this.sampleCollector.initialize(this.config, NanoTimer.getTime());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ StatisticsTypeFactoryImpl.clear();
+ }
+
+ @Test
+ public void zeroKeepsAllFiles() throws Exception {
+ when(this.config.getArchiveDiskSpaceLimit()).thenReturn(0L);
+ sampleUntilFileExists(archiveFile(1));
+ sampleUntilFileExists(archiveFile(2));
+ assertThat(archiveFile(1)).exists();
+ assertThat(archiveFile(2)).exists();
+ }
+
+ @Test
+ public void sameKeepsOneFile() throws Exception {
+ when(this.config.getArchiveDiskSpaceLimit()).thenReturn(FILE_SIZE_LIMIT*2);
+ sampleUntilFileExists(archiveFile(1));
+ sampleUntilFileExists(archiveFile(2));
+ assertThat(archiveFile(1)).doesNotExist();
+ assertThat(archiveFile(2)).exists();
+ }
+
+ private File archiveFile(final int child) {
+ return new File(this.dir, this.testName.getMethodName() + "-01-" +String.format("%02d", child) + ".gfs");
+ }
+
+ private void sampleUntilFileExists(final File file) throws InterruptedException, TimeoutException {
+ logger.info("Sampling until {} exists", file);
+ long end = System.nanoTime() + MINUTES.toNanos(1);
+ while (!file.exists() && System.nanoTime() < end) {
+ sample();
+ }
+ if (!file.exists()) {
+ throw new TimeoutException("File " + file + " does not exist within " + 1 + " " + MINUTES);
+ }
+ }
+
+ private void sample() {
+ getSampleCollector().sample(System.nanoTime());
+ }
+
+ private SampleCollector getSampleCollector() {
+ return this.sampleCollector;
+ }
+
+ private void await(final Runnable supplier) {
+ await().ignoreExceptions().until(() -> supplier.run());
+ }
+
+ private ConditionFactory await() {
+ return await((String)null);
+ }
+
+ private ConditionFactory await(final String alias) {
+ return Awaitility.await(alias).atMost(10, SECONDS);
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ab02de6/geode-core/src/test/java/org/apache/geode/internal/statistics/FileSizeLimitIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/statistics/FileSizeLimitIntegrationTest.java b/geode-core/src/test/java/org/apache/geode/internal/statistics/FileSizeLimitIntegrationTest.java
new file mode 100644
index 0000000..1b6c63b
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/statistics/FileSizeLimitIntegrationTest.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.statistics;
+
+import static java.util.concurrent.TimeUnit.*;
+import static org.assertj.core.api.Assertions.*;
+import static org.mockito.Mockito.*;
+
+import java.io.File;
+import java.util.concurrent.TimeoutException;
+
+import com.jayway.awaitility.Awaitility;
+import com.jayway.awaitility.core.ConditionFactory;
+import org.apache.logging.log4j.Logger;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.contrib.java.lang.system.RestoreSystemProperties;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TemporaryFolder;
+import org.junit.rules.TestName;
+
+import org.apache.geode.StatisticDescriptor;
+import org.apache.geode.Statistics;
+import org.apache.geode.StatisticsType;
+import org.apache.geode.internal.NanoTimer;
+import org.apache.geode.internal.logging.LogService;
+import org.apache.geode.test.junit.categories.IntegrationTest;
+
+@Category(IntegrationTest.class)
+public class FileSizeLimitIntegrationTest {
+
+ private static final Logger logger = LogService.getLogger();
+
+ private static final long FILE_SIZE_LIMIT = 1024*1;
+
+ private File dir;
+ private String archiveFileName;
+
+ private LocalStatisticsFactory factory;
+ private StatisticDescriptor[] statisticDescriptors;
+ private StatisticsType statisticsType;
+ private Statistics statistics;
+
+ private SampleCollector sampleCollector;
+
+ @Rule
+ public RestoreSystemProperties restoreSystemProperties = new RestoreSystemProperties();
+ @Rule
+ public TemporaryFolder temporaryFolder = new TemporaryFolder();
+ @Rule
+ public TestName testName = new TestName();
+
+ @Before
+ public void setUp() throws Exception {
+ this.dir = this.temporaryFolder.getRoot();
+ this.archiveFileName = new File(this.dir, this.testName.getMethodName() + ".gfs").getAbsolutePath();
+
+ this.factory = new LocalStatisticsFactory(null);
+ this.statisticDescriptors = new StatisticDescriptor[] {
+ this.factory.createIntCounter("stat1", "description of stat1", "units", true),
+ };
+ this.statisticsType = factory.createType("statisticsType1", "statisticsType1", this.statisticDescriptors);
+ this.statistics = factory.createAtomicStatistics(this.statisticsType, "statistics1", 1);
+
+ StatisticsSampler sampler = mock(StatisticsSampler.class);
+ when(sampler.getStatistics()).thenReturn(this.factory.getStatistics());
+
+ StatArchiveHandlerConfig config = mock(StatArchiveHandlerConfig.class);
+ when(config.getArchiveFileName()).thenReturn(new File(this.archiveFileName));
+ when(config.getArchiveFileSizeLimit()).thenReturn(FILE_SIZE_LIMIT);
+ when(config.getSystemId()).thenReturn(1L);
+ when(config.getSystemStartTime()).thenReturn(System.currentTimeMillis());
+ when(config.getSystemDirectoryPath()).thenReturn(this.temporaryFolder.getRoot().getAbsolutePath());
+ when(config.getProductDescription()).thenReturn(this.testName.getMethodName());
+ when(config.getArchiveDiskSpaceLimit()).thenReturn(0L);
+
+ this.sampleCollector = new SampleCollector(sampler);
+ this.sampleCollector.initialize(config, NanoTimer.getTime());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ StatisticsTypeFactoryImpl.clear();
+ }
+
+ private File archiveFile(final int child) {
+ return new File(this.dir, this.testName.getMethodName() + "-01-" +String.format("%02d", child) + ".gfs");
+ }
+
+ @Test
+ public void rollsWhenLimitIsReached() throws Exception { // TODO: add test to assert size is correct
+ sampleUntilFileExists(archiveFile(1));
+ sampleUntilFileExists(archiveFile(2));
+ assertThat(archiveFile(1)).exists();
+ assertThat(archiveFile(2)).exists();
+ }
+
+ private void sampleUntilFileExists(final File file) throws InterruptedException, TimeoutException {
+ logger.info("Sampling until {} exists", file);
+ long end = System.nanoTime() + MINUTES.toNanos(1);
+ while (!file.exists() && System.nanoTime() < end) {
+ sample();
+ }
+ if (!file.exists()) {
+ throw new TimeoutException("File " + file + " does not exist within " + 1 + " " + MINUTES);
+ }
+ }
+
+ private void sample() {
+ getSampleCollector().sample(System.nanoTime());
+ }
+
+ private SampleCollector getSampleCollector() {
+ return this.sampleCollector;
+ }
+
+ private ConditionFactory await() {
+ return await((String)null);
+ }
+
+ private ConditionFactory await(final String alias) {
+ return Awaitility.await(alias).atMost(1, MINUTES);
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ab02de6/geode-core/src/test/java/org/apache/geode/internal/statistics/StatTypesAreRolledOverRegressionTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/statistics/StatTypesAreRolledOverRegressionTest.java b/geode-core/src/test/java/org/apache/geode/internal/statistics/StatTypesAreRolledOverRegressionTest.java
new file mode 100644
index 0000000..2c40039
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/statistics/StatTypesAreRolledOverRegressionTest.java
@@ -0,0 +1,231 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.statistics;
+
+import static java.util.concurrent.TimeUnit.*;
+import static org.apache.geode.internal.statistics.TestStatArchiveWriter.WRITER_INITIAL_DATE_MILLIS;
+import static org.assertj.core.api.Assertions.*;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.*;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.jayway.awaitility.Awaitility;
+import com.jayway.awaitility.core.ConditionFactory;
+import org.apache.logging.log4j.Logger;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.contrib.java.lang.system.RestoreSystemProperties;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TemporaryFolder;
+import org.junit.rules.TestName;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+import org.apache.geode.StatisticDescriptor;
+import org.apache.geode.Statistics;
+import org.apache.geode.StatisticsType;
+import org.apache.geode.internal.NanoTimer;
+import org.apache.geode.internal.logging.LogService;
+import org.apache.geode.internal.statistics.StatArchiveReader.ResourceInst;
+import org.apache.geode.test.junit.categories.IntegrationTest;
+
+@Category(IntegrationTest.class)
+public class StatTypesAreRolledOverRegressionTest {
+
+ private static final Logger logger = LogService.getLogger();
+
+ private static final long FILE_SIZE_LIMIT = 1024*1;
+
+ private File dir;
+ private String archiveFileName;
+
+ private LocalStatisticsFactory factory;
+ private StatisticDescriptor[] statisticDescriptors;
+ private StatisticsType statisticsType;
+ private Statistics statistics;
+
+ private SampleCollector sampleCollector;
+
+ private NanoTimer timer = new NanoTimer();
+ private long nanosTimeStamp;
+ private long time;
+
+ @Rule
+ public RestoreSystemProperties restoreSystemProperties = new RestoreSystemProperties();
+ @Rule
+ public TemporaryFolder temporaryFolder = new TemporaryFolder();
+ @Rule
+ public TestName testName = new TestName();
+
+ @Before
+ public void setUp() throws Exception {
+ this.dir = this.temporaryFolder.getRoot();
+ this.archiveFileName = new File(this.dir, this.testName.getMethodName() + ".gfs").getAbsolutePath();
+
+ this.factory = new LocalStatisticsFactory(null);
+ this.statisticDescriptors = new StatisticDescriptor[] {
+ this.factory.createIntCounter("stat1", "description of stat1", "units", true),
+ };
+ this.statisticsType = factory.createType("statisticsType1", "statisticsType1", this.statisticDescriptors);
+ this.statistics = factory.createAtomicStatistics(this.statisticsType, "statistics1", 1);
+
+ Answer<Statistics[]> statisticsAnswer = new Answer<Statistics[]>() {
+ public Statistics[] answer(InvocationOnMock invocation) throws Throwable {
+ return factory.getStatistics();
+ }
+ };
+
+ Answer<Integer> modCountAnswer = new Answer<Integer>() {
+ public Integer answer(InvocationOnMock invocation) throws Throwable {
+ return factory.getStatListModCount();
+ }
+ };
+
+ StatisticsSampler sampler = mock(StatisticsSampler.class);
+ when(sampler.getStatistics()).thenAnswer(statisticsAnswer);
+ when(sampler.getStatisticsModCount()).thenAnswer(modCountAnswer);
+
+ StatArchiveHandlerConfig config = mock(StatArchiveHandlerConfig.class);
+ when(config.getArchiveFileName()).thenReturn(new File(this.archiveFileName));
+ when(config.getArchiveFileSizeLimit()).thenReturn(FILE_SIZE_LIMIT);
+ when(config.getSystemId()).thenReturn(1L);
+ when(config.getSystemStartTime()).thenReturn(System.currentTimeMillis());
+ when(config.getSystemDirectoryPath()).thenReturn(this.temporaryFolder.getRoot().getAbsolutePath());
+ when(config.getProductDescription()).thenReturn(this.testName.getMethodName());
+ when(config.getArchiveDiskSpaceLimit()).thenReturn(0L);
+
+ this.sampleCollector = new SampleCollector(sampler);
+ this.sampleCollector.initialize(config, this.timer.getTime());
+
+ this.timer.reset();
+ this.nanosTimeStamp = this.timer.getLastResetTime() - getNanoRate();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ StatisticsTypeFactoryImpl.clear();
+ }
+
+ @Test
+ public void closedInstanceShouldHaveTypeInRolledArchives() throws Exception {
+ // initial state
+ verifyStatisticsTypeIsInArchiveFile(archiveFile(), 0);
+
+ // one sample
+ sample(advanceNanosTimeStamp());
+ verifyStatisticsTypeIsInArchiveFile(archiveFile(), 1);
+
+ // close stats
+ this.statistics.close();
+
+ assertThat(archiveFile(1)).doesNotExist();
+
+ // cause roll
+ sampleUntilFileExists(archiveFile(1));
+
+ sample(advanceNanosTimeStamp());
+ verifyStatisticsTypeIsInArchiveFile(archiveFile(), 0);
+
+ this.statistics = factory.createAtomicStatistics(this.statisticsType, "statistics1", 2);
+
+ sample(advanceNanosTimeStamp());
+ verifyStatisticsTypeIsInArchiveFile(archiveFile(), 1); // should be corrupt?
+
+ }
+
+ private void verifyStatisticsTypeIsInArchiveFile(final File archiveFile, final int expectedResources) throws IOException {
+ StatArchiveReader reader = new StatArchiveReader(new File[]{ archiveFile }, null, false);
+
+ // compare all resourceInst values against what was printed above
+
+ List<ResourceInst> resources = reader.getResourceInstList();
+// assertThat(resources).hasSize(expectedResources);
+ if (expectedResources > 0) {
+ assertThat(resources).hasAtLeastOneElementOfType(ResourceInst.class);
+ }
+
+ for (ResourceInst resourceInstance : resources) {
+ if (resourceInstance == null) continue;
+ assertThat(resourceInstance.getName()).isNotNull();
+ assertThat(resourceInstance.getType()).isNotNull();
+ assertThat(resourceInstance.getType().getName()).isEqualTo(this.statisticsType.getName());
+ }
+ }
+
+ //private void rollArchiveFile() {
+ //getSampleCollector() .changeArchive(archiveFile(), ++this.time);
+ //getSampleCollector().getStatArchiveHandler().changeArchiveFile(true, ++this.time);
+ //}
+
+ private void sampleUntilFileExists(final File file) throws InterruptedException, TimeoutException {
+ logger.info("Sampling until {} exists", file);
+ long timeout = System.nanoTime() + MINUTES.toNanos(1);
+ int count = 0;
+ do {
+ sample(advanceNanosTimeStamp());
+ count++;
+ } while (!file.exists() && System.nanoTime() < timeout);
+ logger.info("Performed " + count + " samples");
+ if (!file.exists()) {
+ throw new TimeoutException("File " + file + " does not exist within " + 1 + " " + MINUTES);
+ }
+ }
+
+ private void sample(final long time) {
+ getSampleCollector().sample(time);
+ }
+
+ private SampleCollector getSampleCollector() {
+ return this.sampleCollector;
+ }
+
+ private File archiveFile(final int child) {
+ return new File(this.dir, this.testName.getMethodName() + "-01-" +String.format("%02d", child) + ".gfs");
+ }
+
+ private File archiveFile() {
+ return new File(this.archiveFileName);
+ }
+
+ private long advanceNanosTimeStamp() {
+ this.nanosTimeStamp += getNanoRate();
+ return this.nanosTimeStamp;
+ }
+
+ private long getNanoRate() {
+ return NanoTimer.millisToNanos(getSampleRate());
+ }
+
+ private long getSampleRate() {
+ return 1000; // 1 second
+ }
+
+ private ConditionFactory await() {
+ return await((String)null);
+ }
+
+ private ConditionFactory await(final String alias) {
+ return Awaitility.await(alias).atMost(1, MINUTES);
+ }
+}