You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by nn...@apache.org on 2021/06/04 17:37:29 UTC

[geode] branch support/1.13 updated: GEODE-9289: Configuration compatibile with pre-1.12.0 versions.

This is an automated email from the ASF dual-hosted git repository.

nnag pushed a commit to branch support/1.13
in repository https://gitbox.apache.org/repos/asf/geode.git


The following commit(s) were added to refs/heads/support/1.13 by this push:
     new 1a39ccc  GEODE-9289: Configuration compatibile with pre-1.12.0 versions.
1a39ccc is described below

commit 1a39ccca027d2783b3f746ab505d5c1b5e81be45
Author: Nabarun Nag <na...@cs.wisc.edu>
AuthorDate: Mon May 17 18:33:19 2021 -0700

    GEODE-9289: Configuration compatibile with pre-1.12.0 versions.
    
    	* Configuration sent from 1.12.0 locators and later can be interpreted by pre 1.12.0 version locators
    
    (cherry picked from commit c1e59b23a89d4eac89334f525cd4a8bfaebefe1d)
---
 .../codeAnalysis/sanctionedDataSerializables.txt   |   4 +-
 .../internal/cache/ClusterConfigurationLoader.java |   1 +
 .../configuration/domain/Configuration.java        |  25 +++--
 .../management/ConfigurationCompatibilityTest.java | 110 +++++++++++++++++++++
 4 files changed, 128 insertions(+), 12 deletions(-)

diff --git a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
index 3a1288a..16d0b42 100644
--- a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
+++ b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
@@ -1963,8 +1963,8 @@ fromData,17
 toData,17
 
 org/apache/geode/management/internal/configuration/domain/Configuration,2
-fromData,111
-toData,62
+fromData,112
+toData,79
 
 org/apache/geode/management/internal/configuration/domain/ConfigurationChangeResult,2
 fromData,31
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ClusterConfigurationLoader.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ClusterConfigurationLoader.java
index 89b04bb..9197562 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/ClusterConfigurationLoader.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ClusterConfigurationLoader.java
@@ -91,6 +91,7 @@ public class ClusterConfigurationLoader {
     logger.info("deploying jars received from cluster configuration");
     List<String> jarFileNames =
         response.getJarNames().values().stream()
+            .filter(Objects::nonNull)
             .flatMap(Set::stream)
             .collect(Collectors.toList());
 
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/configuration/domain/Configuration.java b/geode-core/src/main/java/org/apache/geode/management/internal/configuration/domain/Configuration.java
index ed6de32..a06bd4c 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/configuration/domain/Configuration.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/configuration/domain/Configuration.java
@@ -19,6 +19,7 @@ import static org.apache.geode.internal.JarDeployer.getArtifactId;
 
 import java.io.DataInput;
 import java.io.DataOutput;
+import java.io.EOFException;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
@@ -164,10 +165,10 @@ public class Configuration implements DataSerializable {
     DataSerializer.writeString(cacheXmlContent, out);
     DataSerializer.writeString(propertiesFileName, out);
     DataSerializer.writeProperties(gemfireProperties, out);
-    // Before 1.12, this code wrote a non-null HashSet of jarnames to the output stream.
-    // As of 1.12, it writes a null HashSet to the stream, so that when we can still read the old
-    // configuration, and will now also write the deployment map.
-    DataSerializer.writeHashSet(null, out);
+    // As of 1.12, it writes a jarNames HashSet to the stream, so that pre 1.12.0 members can
+    // read the jarName, and will now also write the deployment map for the post 1.12.0 members.
+    HashSet<String> jarNames = new HashSet<>(deployments.keySet());
+    DataSerializer.writeHashSet(jarNames, out);
     // As of 1.12, this class starting writing the current version
     Version.getCurrentVersion().writeOrdinal(out, true);
     DataSerializer.writeHashMap(deployments, out);
@@ -181,17 +182,20 @@ public class Configuration implements DataSerializable {
     propertiesFileName = DataSerializer.readString(in);
     gemfireProperties = DataSerializer.readProperties(in);
     HashSet<String> jarNames = DataSerializer.readHashSet(in);
-    if (jarNames != null) {
-      // we are reading pre 1.12 data. So add each jar name to deployments
-      jarNames.stream()
-          .map(x -> new Deployment(x, null, null))
-          .forEach(deployment -> deployments.put(deployment.getFileName(), deployment));
-    } else {
+    try {
       // version of the data we are reading (1.12 or later)
       final VersionOrdinal version = Versioning.getVersionOrdinal(Version.readOrdinal(in));
       if (version.isNotOlderThan(Version.GEODE_1_12_0)) {
         deployments.putAll(DataSerializer.readHashMap(in));
       }
+    } catch (EOFException ex) {
+      if (jarNames != null) {
+        // we are reading pre 1.12 data. So add each jar name to deployments
+        jarNames.stream()
+            .map(x -> new Deployment(x, null, null))
+            .forEach(
+                deployment -> deployments.put(deployment.getFileName(), deployment));
+      }
     }
   }
 
@@ -229,4 +233,5 @@ public class Configuration implements DataSerializable {
     return Objects.hash(configName, cacheXmlContent, cacheXmlFileName, propertiesFileName,
         gemfireProperties, deployments);
   }
+
 }
diff --git a/geode-gfsh/src/upgradeTest/java/org/apache/geode/management/ConfigurationCompatibilityTest.java b/geode-gfsh/src/upgradeTest/java/org/apache/geode/management/ConfigurationCompatibilityTest.java
new file mode 100644
index 0000000..f88021d
--- /dev/null
+++ b/geode-gfsh/src/upgradeTest/java/org/apache/geode/management/ConfigurationCompatibilityTest.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.management;
+
+
+import java.util.Collection;
+import java.util.List;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.junit.categories.BackwardCompatibilityTest;
+import org.apache.geode.test.junit.rules.GfshCommandRule;
+import org.apache.geode.test.version.TestVersion;
+import org.apache.geode.test.version.VersionManager;
+
+@Category({BackwardCompatibilityTest.class})
+@RunWith(Parameterized.class)
+public class ConfigurationCompatibilityTest {
+  private final String oldVersion;
+
+  @Parameterized.Parameters(name = "{0}")
+  public static Collection<String> data() {
+    List<String> result = VersionManager.getInstance().getVersionsWithoutCurrent();
+    result.removeIf(s -> TestVersion.compare(s, "1.10.0") < 0);
+    return result;
+  }
+
+  public ConfigurationCompatibilityTest(String oldVersion) {
+    this.oldVersion = oldVersion;
+  }
+
+  @Rule
+  public ClusterStartupRule clusterStartupRule = new ClusterStartupRule();
+
+  @Rule
+  public GfshCommandRule gfsh = new GfshCommandRule();
+
+  @Test
+  public void whenConfigurationIsExchangedBetweenMixedVersionLocatorsThenItShouldNotThrowExceptions()
+      throws Exception {
+    MemberVM locator1 = clusterStartupRule.startLocatorVM(0, oldVersion);
+    int locatorPort1 = locator1.getPort();
+    MemberVM locator2 =
+        clusterStartupRule
+            .startLocatorVM(1, 0, oldVersion, l -> l.withConnectionToLocator(locatorPort1));
+    int locatorPort2 = locator2.getPort();
+
+    gfsh.connect(locator1);
+    gfsh.executeAndAssertThat("configure pdx --read-serialized=true --disk-store=DEFAULT")
+        .statusIsSuccess();
+
+    clusterStartupRule.startServerVM(2, oldVersion,
+        s -> s.withConnectionToLocator(locatorPort1, locatorPort2).withRegion(
+            RegionShortcut.PARTITION, "region"));
+    clusterStartupRule.startServerVM(3, oldVersion,
+        s -> s.withConnectionToLocator(locatorPort1, locatorPort2)
+            .withRegion(RegionShortcut.PARTITION, "region"));
+
+    clusterStartupRule.stop(0);
+    locator1 = clusterStartupRule.startLocatorVM(0, l -> l.withConnectionToLocator(locatorPort2));
+    int newLocatorPort1 = locator1.getPort();
+
+    // configure pdx command is executed to trigger a cluster configuration change event.
+    gfsh.disconnect();
+    gfsh.connect(locator1);
+    gfsh.executeAndAssertThat("configure pdx --read-serialized=true --disk-store=DEFAULT")
+        .statusIsSuccess();
+
+    clusterStartupRule.stop(1);
+    locator2 =
+        clusterStartupRule.startLocatorVM(1, l -> l.withConnectionToLocator(newLocatorPort1));
+    int newLocatorPort2 = locator2.getPort();
+
+    // configure pdx command is executed to trigger a cluster configuration change event.
+    gfsh.disconnect();
+    gfsh.connect(locator2);
+    gfsh.executeAndAssertThat("configure pdx --read-serialized=true --disk-store=DEFAULT")
+        .statusIsSuccess();
+
+    clusterStartupRule.stop(2);
+    clusterStartupRule.startServerVM(2,
+        s -> s.withConnectionToLocator(newLocatorPort1, newLocatorPort2)
+            .withRegion(RegionShortcut.PARTITION, "region"));
+
+    clusterStartupRule.stop(3);
+    clusterStartupRule.startServerVM(3,
+        s -> s.withConnectionToLocator(newLocatorPort1, newLocatorPort2)
+            .withRegion(RegionShortcut.PARTITION, "region"));
+
+  }
+}