You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by mm...@apache.org on 2018/11/29 23:30:34 UTC

[geode] branch geode-5971-createregion updated (af3e912 -> 315c5d4)

This is an automated email from the ASF dual-hosted git repository.

mmartell pushed a change to branch geode-5971-createregion
in repository https://gitbox.apache.org/repos/asf/geode.git.


 discard af3e912  WIP - add partition attributes and disk store tests
     new 315c5d4  WIP - add partition attributes and disk store tests

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (af3e912)
            \
             N -- N -- N   refs/heads/geode-5971-createregion (315c5d4)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../internal/cache/configuration/RegionConfigFactory.java   | 13 ++++++++++---
 1 file changed, 10 insertions(+), 3 deletions(-)


[geode] 01/01: WIP - add partition attributes and disk store tests

Posted by mm...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

mmartell pushed a commit to branch geode-5971-createregion
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 315c5d4d74da712813d2a50102a7402a5998185f
Author: Peter Tran <pt...@pivotal.io>
AuthorDate: Thu Nov 29 15:29:52 2018 -0800

    WIP - add partition attributes and disk store tests
    
    Signed-off-by: Aditya Anchuri <aa...@pivotal.io>
    Signed-off-by: Peter Tran <pt...@pivotal.io>
---
 .../cli/commands/CreateRegionCommandDUnitTest.java |  95 ++++++++++
 ...egionCommandPersistsConfigurationDUnitTest.java | 203 +++++++++++++++++++--
 .../apache/geode/cache/PartitionAttributes.java    |  19 ++
 .../cache/configuration/RegionConfigFactory.java   |  13 +-
 4 files changed, 316 insertions(+), 14 deletions(-)

diff --git a/geode-core/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/CreateRegionCommandDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/CreateRegionCommandDUnitTest.java
index 3dbecfc..7a6568f 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/CreateRegionCommandDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/CreateRegionCommandDUnitTest.java
@@ -20,10 +20,12 @@ import static org.assertj.core.api.Assertions.assertThat;
 import java.io.File;
 import java.io.Serializable;
 import java.util.Arrays;
+import java.util.List;
 import java.util.stream.Collectors;
 
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -31,13 +33,18 @@ import org.junit.rules.TemporaryFolder;
 import org.junit.rules.TestName;
 
 import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.Declarable;
+import org.apache.geode.cache.EntryOperation;
 import org.apache.geode.cache.PartitionResolver;
 import org.apache.geode.cache.Region;
+import org.apache.geode.cache.asyncqueue.AsyncEvent;
+import org.apache.geode.cache.asyncqueue.AsyncEventListener;
 import org.apache.geode.cache.configuration.CacheConfig;
 import org.apache.geode.cache.configuration.CacheElement;
 import org.apache.geode.cache.configuration.RegionConfig;
 import org.apache.geode.cache.util.CacheListenerAdapter;
 import org.apache.geode.compression.SnappyCompressor;
+import org.apache.geode.internal.cache.InternalRegion;
 import org.apache.geode.internal.cache.PartitionedRegion;
 import org.apache.geode.internal.cache.RegionEntryContext;
 import org.apache.geode.test.compiler.JarBuilder;
@@ -59,6 +66,26 @@ public class CreateRegionCommandDUnitTest {
       implements Serializable {
   }
 
+  public static class DummyAEQListener implements AsyncEventListener, Declarable {
+    @Override
+    public boolean processEvents(List<AsyncEvent> events) {
+      return false;
+    }
+  }
+
+  public static class DummyPartitionResolver implements PartitionResolver, Declarable {
+    @Override
+    public Object getRoutingObject(EntryOperation opDetails) {
+      return null;
+    }
+
+    @Override
+    public String getName() {
+      return "dummy";
+    }
+  }
+
+
   @ClassRule
   public static ClusterStartupRule lsRule = new ClusterStartupRule();
 
@@ -230,6 +257,7 @@ public class CreateRegionCommandDUnitTest {
     gfsh.executeAndAssertThat("destroy region --name=/TEMPLATE").statusIsSuccess();
   }
 
+
   @Test
   public void cannotSetRegionExpirationForPartitionedTemplate() {
     gfsh.executeAndAssertThat("create region --name=/TEMPLATE --type=PARTITION")
@@ -561,6 +589,73 @@ public class CreateRegionCommandDUnitTest {
         .containsOutput("Region /startWithLocalRegion already exists on the cluster");
   }
 
+  /**
+   * Ignored this test until we refactor the FetchRegionAttributesFunction to not use
+   * AttributesFactory, and instead use RegionConfig, which we will do as part of implementing
+   * GEODE-6103
+   */
+  @Ignore
+  @Test
+  public void testCreateRegionFromTemplateWithAsyncEventListeners() {
+    String queueId = "queue1";
+    gfsh.executeAndAssertThat(
+        "create async-event-queue --id=" + queueId
+            + " --listener=" + CreateRegionCommandDUnitTest.DummyAEQListener.class.getName())
+        .statusIsSuccess();
+
+    String regionName = testName.getMethodName();
+    gfsh.executeAndAssertThat(
+        "create region --name=" + regionName
+            + " --type=REPLICATE"
+            + " --async-event-queue-id=" + queueId)
+        .statusIsSuccess();
+
+    gfsh.executeAndAssertThat(
+        "create region --name=" + regionName + "-from-template"
+            + " --template-region=" + regionName)
+        .statusIsSuccess();
+
+    server1.invoke(() -> {
+      Region regionFromTemplate = ClusterStartupRule.getCache()
+          .getRegion(regionName + "-from-template");
+      assertThat(regionFromTemplate).isNotNull();
+      assertThat(((InternalRegion) regionFromTemplate).getAsyncEventQueueIds())
+          .contains(queueId);
+    });
+  }
+
+  /**
+   * Ignored this test until we refactor the FetchRegionAttributesFunction to not use
+   * AttributesFactory, and instead use RegionConfig, which we will do as part of implementing
+   * GEODE-6103
+   */
+  @Ignore
+  @Test
+  public void testCreateRegionFromTemplateWithPartitionResolver() {
+    String regionName = testName.getMethodName();
+    String regionFromTemplateName = regionName + "-from-template";
+
+    gfsh.executeAndAssertThat("create region"
+        + " --name=" + regionName
+        + " --type=PARTITION"
+        + " --partition-resolver=" + DummyPartitionResolver.class.getName()).statusIsSuccess();
+    gfsh.executeAndAssertThat("create region"
+        + " --name=" + regionFromTemplateName
+        + " --template-region=" + regionName).statusIsSuccess();
+
+    server1.invoke(() -> {
+      Region regionFromTemplate = ClusterStartupRule.getCache()
+          .getRegion(regionName + "-from-template");
+      assertThat(regionFromTemplate).isNotNull();
+      assertThat(((InternalRegion) regionFromTemplate).getPartitionAttributes()
+          .getPartitionResolver())
+          .isNotNull();
+      assertThat(((InternalRegion) regionFromTemplate).getPartitionAttributes()
+          .getPartitionResolver().getName())
+          .isEqualTo(DummyPartitionResolver.class.getName());
+    });
+  }
+
   private String getUniversalClassCode(String classname) {
     String code = "package io.pivotal;" + "import org.apache.geode.cache.CacheLoader;"
         + "import org.apache.geode.cache.CacheLoaderException;"
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/CreateRegionCommandPersistsConfigurationDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/CreateRegionCommandPersistsConfigurationDUnitTest.java
index 0a0dbcc..4c414f9 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/CreateRegionCommandPersistsConfigurationDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/CreateRegionCommandPersistsConfigurationDUnitTest.java
@@ -412,35 +412,215 @@ public class CreateRegionCommandPersistsConfigurationDUnitTest {
   }
 
   @Test
-  public void placeholderAEQ() {}
+  public void createRegionPersistsAEQConfig() {
+    String queueId = "queue1";
+    gfsh.executeAndAssertThat(
+        "create async-event-queue --id=" + queueId
+            + " --listener=" + CreateRegionCommandDUnitTest.DummyAEQListener.class.getName())
+        .statusIsSuccess();
+
+    String regionName = testName.getMethodName();
+    gfsh.executeAndAssertThat(
+        "create region --name=" + regionName
+            + " --type=REPLICATE"
+            + " --async-event-queue-id=" + queueId)
+        .statusIsSuccess();
+
+    locator.invoke(() -> {
+      InternalConfigurationPersistenceService cc =
+          ClusterStartupRule.getLocator().getConfigurationPersistenceService();
+      CacheConfig config = cc.getCacheConfig("cluster");
+
+      List<RegionConfig> regions = config.getRegions();
+      assertThat(regions).isNotEmpty();
+      assertThat(regions).hasSize(1);
+      RegionConfig regionConfig = CacheElement.findElement(regions, regionName);
+      assertThat(regionConfig.getRegionAttributes().get(0).getAsyncEventQueueIds())
+          .contains(queueId);
+    });
+  }
 
   @Test
-  public void placeholderColocation() {}
+  public void createRegionWithColocation() {
+    String regionName = testName.getMethodName();
+    String colocatedRegionName = regionName + "-colocated";
+    String colocatedRegionFromTemplateName = colocatedRegionName + "-from-template";
+
+    gfsh.executeAndAssertThat("create region"
+        + " --name=" + regionName
+        + " --type=PARTITION");
+    gfsh.executeAndAssertThat("create region"
+        + " --name=" + colocatedRegionName
+        + " --colocated-with=" + regionName
+        + " --type=PARTITION");
+
+    gfsh.executeAndAssertThat("create region"
+        + " --name=" + colocatedRegionFromTemplateName
+        + " --template-region=" + colocatedRegionName);
+
+    locator.invoke(() -> {
+      InternalConfigurationPersistenceService cc =
+          ClusterStartupRule.getLocator().getConfigurationPersistenceService();
+      CacheConfig config = cc.getCacheConfig("cluster");
+
+      List<RegionConfig> regions = config.getRegions();
+      assertThat(regions).isNotEmpty();
+      assertThat(regions).hasSize(3);
+
+      RegionConfig colocatedConfig = CacheElement.findElement(regions, colocatedRegionName);
+      assertThat(
+          colocatedConfig.getRegionAttributes().get(0).getPartitionAttributes().getColocatedWith())
+              .isEqualTo("/" + regionName);
+
+      RegionConfig colocatedConfigFromTemplate = CacheElement.findElement(regions,
+          colocatedRegionFromTemplateName);
+      assertThat(
+          colocatedConfigFromTemplate.getRegionAttributes().get(0).getPartitionAttributes()
+              .getColocatedWith())
+                  .isEqualTo("/" + regionName);
+    });
+  }
 
   @Test
-  public void placeholderDiskstores() {
-    // test disk-synchronous
+  public void createRegionPersistsDiskstores() throws Exception {
+    String regionName = testName.getMethodName();
+    String store = "Store1";
+    gfsh.executeAndAssertThat("create disk-store"
+        + " --name=" + store
+        + " --dir=/tmp/foo").statusIsSuccess();
+
+    // Give disk store time to get created
+    Thread.sleep(2000);
+
+    gfsh.executeAndAssertThat("create region"
+        + " --name=" + regionName
+        + " --type=REPLICATE_PERSISTENT"
+        + " --disk-store=" + store
+    ).statusIsSuccess();
+
+    String regionNameFromTemplate = regionName + "-from-template";
+    gfsh.executeAndAssertThat("create region --name=" + regionNameFromTemplate
+        + " --template-region=" + regionName)
+        .statusIsSuccess();
+
+    locator.invoke(() -> {
+      InternalConfigurationPersistenceService cc =
+          ClusterStartupRule.getLocator().getConfigurationPersistenceService();
+      CacheConfig config = cc.getCacheConfig("cluster");
+
+      List<RegionConfig> regions = config.getRegions();
+      assertThat(regions).isNotEmpty();
+      assertThat(regions).hasSize(2);
+
+      List<String> regionNames = Arrays.asList(regionName, regionNameFromTemplate);
+      regionNames.forEach(name -> {
+        RegionConfig regionConfig = CacheElement.findElement(config.getRegions(), name);
+        assertThat(regionConfig).isNotNull();
+        assertThat(regionConfig.getName()).isEqualTo(name);
+
+        RegionAttributesType regionAttributes = regionConfig.getRegionAttributes().get(0);
+        assertThat(regionAttributes.getDiskStoreName())
+            .isEqualTo(store);
+      });
+    });
   }
 
   @Test
-  public void placeholderPartitionedRegion() {
-    // test disk-synchronous String regionName = testName.getMethodName();
+  public void createRegionPersistsPartitionAttributes() {
     String regionName = testName.getMethodName();
+    String regionFromTemplateName = regionName + "-from-template";
+
     gfsh.executeAndAssertThat("create region"
         + " --name=" + regionName
         + " --type=PARTITION"
-        + " --partition-resolver=" + DummyPartitionResolver.class.getName()
         + " --recovery-delay=1"
+        + " --local-max-memory=1000"
         + " --redundant-copies=1"
         + " --startup-recovery-delay=1"
         + " --total-max-memory=100"
-        + " --total-num-buckets=1"
-        + " --eviction-max-memory=700"
-        + " --eviction-entry-count=7"
-        + " --eviction-object-sizer=" + DummyObjectSizer.class.getName()).statusIsSuccess();
+        + " --total-num-buckets=1").statusIsSuccess();
+    gfsh.executeAndAssertThat("create region"
+        + " --name=" + regionFromTemplateName
+        + " --template-region=" + regionName);
+
+    locator.invoke(() -> {
+      InternalConfigurationPersistenceService cc =
+          ClusterStartupRule.getLocator().getConfigurationPersistenceService();
+      CacheConfig config = cc.getCacheConfig("cluster");
+
+      List<RegionConfig> regions = config.getRegions();
+      assertThat(regions).isNotEmpty();
+      assertThat(regions).hasSize(2);
+
+      List<String> regionNames = Arrays.asList(regionName, regionFromTemplateName);
+      regionNames.forEach(name -> {
+        RegionConfig regionConfig = CacheElement.findElement(config.getRegions(), name);
+        assertThat(regionConfig).isNotNull();
+        assertThat(regionConfig.getName()).isEqualTo(name);
+
+        RegionAttributesType regionAttributes = regionConfig.getRegionAttributes().get(0);
+        RegionAttributesType.PartitionAttributes partitionAttributes =
+            regionAttributes.getPartitionAttributes();
+
+        assertThat(partitionAttributes.getRecoveryDelay())
+            .describedAs("Recovery delay should be 1 for region " + name)
+            .isEqualTo("1");
+        assertThat(partitionAttributes.getLocalMaxMemory())
+            .describedAs("Local max memory should be 1000 for region " + name)
+            .isEqualTo("1000");
+        assertThat(partitionAttributes.getRedundantCopies())
+            .describedAs("Redundant copies should be 1 for region " + name)
+            .isEqualTo("1");
+        assertThat(partitionAttributes.getStartupRecoveryDelay())
+            .describedAs("Startup recovery delay should be 1 for region " + name)
+            .isEqualTo("1");
+        assertThat(partitionAttributes.getTotalMaxMemory())
+            .describedAs("Total max memory should be 100 for region " + name)
+            .isEqualTo("100");
+        assertThat(partitionAttributes.getTotalNumBuckets())
+            .describedAs("Total num buckets should be 1 for region " + name)
+            .isEqualTo("1");
+      });
+    });
   }
 
   @Test
+  public void createRegionPersistsPartitionResolver() {
+    String regionName = testName.getMethodName();
+
+    gfsh.executeAndAssertThat("create region"
+        + " --name=" + regionName
+        + " --type=PARTITION"
+        + " --partition-resolver=" + DummyPartitionResolver.class.getName()).statusIsSuccess();
+
+    locator.invoke(() -> {
+      InternalConfigurationPersistenceService cc =
+          ClusterStartupRule.getLocator().getConfigurationPersistenceService();
+      CacheConfig config = cc.getCacheConfig("cluster");
+
+      List<RegionConfig> regions = config.getRegions();
+      assertThat(regions).isNotEmpty();
+      assertThat(regions).hasSize(1);
+
+      List<String> regionNames = Arrays.asList(regionName);
+      regionNames.forEach(name -> {
+        RegionConfig regionConfig = CacheElement.findElement(config.getRegions(), name);
+        assertThat(regionConfig).isNotNull();
+        assertThat(regionConfig.getName()).isEqualTo(name);
+
+        RegionAttributesType regionAttributes = regionConfig.getRegionAttributes().get(0);
+        RegionAttributesType.PartitionAttributes partitionAttributes =
+            regionAttributes.getPartitionAttributes();
+
+        assertThat(partitionAttributes.getPartitionResolver().getClassName())
+            .isEqualTo(DummyPartitionResolver.class.getName());
+      });
+    });
+  }
+
+  // TODO test empty partition attributes
+
+  @Test
   public void placeholderCustomExpiryClass() {
     // + " --entry-idle-time-custom-expiry=" + DummyCustomExpiry.class.getName()
     // assertThat(attr.getEntryIdleTime().getExpirationAttributes().getCustomExpiry().toString())
@@ -449,6 +629,7 @@ public class CreateRegionCommandPersistsConfigurationDUnitTest {
 
   }
 
+  // TODO + " --eviction-entry-count=7"
   @Test
   public void placeHolderDisableCloning() {
     // " --enable-cloning=false"
diff --git a/geode-core/src/main/java/org/apache/geode/cache/PartitionAttributes.java b/geode-core/src/main/java/org/apache/geode/cache/PartitionAttributes.java
index d137fa3..2cab775 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/PartitionAttributes.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/PartitionAttributes.java
@@ -18,6 +18,8 @@ package org.apache.geode.cache;
 import java.util.List;
 import java.util.Properties;
 
+import org.apache.geode.cache.configuration.DeclarableType;
+import org.apache.geode.cache.configuration.RegionAttributesType;
 import org.apache.geode.cache.partition.PartitionListener;
 
 /**
@@ -153,4 +155,21 @@ public interface PartitionAttributes<K, V> {
    */
   List<FixedPartitionAttributes> getFixedPartitionAttributes();
 
+  default RegionAttributesType.PartitionAttributes convertToConfigPartitionAttributes() {
+    RegionAttributesType.PartitionAttributes configAttributes =
+        new RegionAttributesType.PartitionAttributes();
+    configAttributes.setColocatedWith(getColocatedWith());
+    configAttributes.setLocalMaxMemory(Integer.toString(getLocalMaxMemory()));
+    if (getPartitionResolver() != null) {
+      configAttributes.setPartitionResolver(new DeclarableType(getPartitionResolver().getName()));
+    }
+    configAttributes.setRecoveryDelay(Long.toString(getRecoveryDelay()));
+    configAttributes.setStartupRecoveryDelay(Long.toString(getStartupRecoveryDelay()));
+    configAttributes.setRedundantCopies(Integer.toString(getRedundantCopies()));
+    configAttributes.setTotalMaxMemory(Long.toString(getTotalMaxMemory()));
+    configAttributes.setTotalNumBuckets(Long.toString(getTotalNumBuckets()));
+
+    return configAttributes;
+  }
+
 }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/configuration/RegionConfigFactory.java b/geode-core/src/main/java/org/apache/geode/internal/cache/configuration/RegionConfigFactory.java
index c78f68b..0dd0bce 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/configuration/RegionConfigFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/configuration/RegionConfigFactory.java
@@ -149,6 +149,8 @@ public class RegionConfigFactory {
 
     if (args.getDiskStore() != null) {
       addAttribute(regionConfig, a -> a.setDiskStoreName(args.getDiskStore()));
+    } else if (regionAttributes != null) {
+      addAttribute(regionConfig, a -> a.setDiskStoreName(regionAttributes.getDiskStoreName()));
     }
 
     if (args.getDiskSynchronous() != null) {
@@ -211,11 +213,16 @@ public class RegionConfigFactory {
       partitionAttributes.setTotalMaxMemory(long2string(partitionArgs.getPrTotalMaxMemory()));
       partitionAttributes.setTotalNumBuckets(int2string(partitionArgs.getPrTotalNumBuckets()));
 
-      DeclarableType partitionResolverType = new DeclarableType();
-      partitionResolverType.setClassName(partitionArgs.getPartitionResolver());
-      partitionAttributes.setPartitionResolver(partitionResolverType);
+      if (partitionArgs.getPartitionResolver() != null) {
+        DeclarableType partitionResolverType = new DeclarableType();
+        partitionResolverType.setClassName(partitionArgs.getPartitionResolver());
+        partitionAttributes.setPartitionResolver(partitionResolverType);
+      }
 
       addAttribute(regionConfig, a -> a.setPartitionAttributes(partitionAttributes));
+    } else if (regionAttributes != null && regionAttributes.getPartitionAttributes() != null) {
+      addAttribute(regionConfig, a -> a.setPartitionAttributes(
+          regionAttributes.getPartitionAttributes().convertToConfigPartitionAttributes()));
     }
 
     if (args.getGatewaySenderIds() != null && !args.getGatewaySenderIds().isEmpty()) {