You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by so...@apache.org on 2021/12/10 11:13:05 UTC

[ozone] 02/02: HDDS-6081. EC: Fix ReplicationConfig related test failures that happened due to merging HDDS-5997 to the EC branch

This is an automated email from the ASF dual-hosted git repository.

sodonnell pushed a commit to branch HDDS-3816-ec
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 3007b5c68dfda26218748bcc8ded7cd3115fe7c3
Author: Istvan Fajth <fa...@gmail.com>
AuthorDate: Fri Dec 10 12:05:39 2021 +0100

    HDDS-6081. EC: Fix ReplicationConfig related test failures that happened due to merging HDDS-5997 to the EC branch
---
 .../hadoop/hdds/client/ECReplicationConfig.java    |   3 +-
 .../hadoop/hdds/client/ReplicationConfig.java      |   9 +-
 .../hadoop/hdds/client/TestReplicationConfig.java  | 184 +++++++++++++++++++--
 .../hadoop/fs/ozone/TestRootedOzoneFileSystem.java |   4 +-
 4 files changed, 179 insertions(+), 21 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java
index 93cef00..8690876 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java
@@ -92,7 +92,8 @@ public class ECReplicationConfig implements ReplicationConfig {
     final Matcher matcher = STRING_FORMAT.matcher(string);
     if (!matcher.matches()) {
       throw new IllegalArgumentException("EC replication config should be " +
-          "defined in the form rs-3-2-1024k, rs-6-3-1024; or rs-10-4-1024k");
+          "defined in the form rs-3-2-1024k, rs-6-3-1024; or rs-10-4-1024k." +
+          "Provided configuration was: " + string);
     }
 
     try {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java
index 8e368fc..c031ed6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java
@@ -123,9 +123,12 @@ public interface ReplicationConfig {
    */
   static ReplicationConfig adjustReplication(
       ReplicationConfig config, short replication, ConfigurationSource conf) {
-    return parse(
-        ReplicationType.valueOf(config.getReplicationType().toString()),
-        Short.toString(replication), conf);
+    ReplicationType replicationType =
+        ReplicationType.valueOf(config.getReplicationType().toString());
+    if (replicationType.equals(ReplicationType.EC)) {
+      return config;
+    }
+    return parse(replicationType, Short.toString(replication), conf);
   }
 
   /**
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
index 6e6f7b5..19cc13e 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
@@ -17,10 +17,13 @@
  */
 package org.apache.hadoop.hdds.client;
 
+import org.apache.hadoop.hdds.client.ECReplicationConfig.EcCodec;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.junit.Assume;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -36,8 +39,11 @@ import static org.junit.Assert.assertThrows;
 @RunWith(Parameterized.class)
 public class TestReplicationConfig {
 
+  private static final int MB = 1024*1024;
+  private static final int KB = 1024;
+
   @SuppressWarnings("checkstyle:VisibilityModifier")
-  @Parameterized.Parameter()
+  @Parameterized.Parameter
   public String type;
 
   @SuppressWarnings("checkstyle:VisibilityModifier")
@@ -46,20 +52,46 @@ public class TestReplicationConfig {
 
   @SuppressWarnings("checkstyle:VisibilityModifier")
   @Parameterized.Parameter(2)
+  public String codec;
+
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  @Parameterized.Parameter(3)
+  public int data;
+
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  @Parameterized.Parameter(4)
+  public int parity;
+
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  @Parameterized.Parameter(5)
+  public int chunkSize;
+
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  @Parameterized.Parameter(6)
   public Class<?> replicationConfigClass;
 
+  //NOTE: if a new cunckSize is used/added in the parameters other than KB or MB
+  // please revisit the method createECDescriptor, to handle the new chunkSize.
   @Parameterized.Parameters(name = "{0}/{1}")
   public static Object[][] parameters() {
     return new Object[][] {
-        {"RATIS", "ONE", RatisReplicationConfig.class },
-        {"RATIS", "THREE", RatisReplicationConfig.class},
-        {"STAND_ALONE", "ONE", StandaloneReplicationConfig.class},
-        {"STAND_ALONE", "THREE", StandaloneReplicationConfig.class}
+        {"RATIS", "ONE", null, 0, 0, 0, RatisReplicationConfig.class },
+        {"RATIS", "THREE", null, 0, 0, 0, RatisReplicationConfig.class},
+        {"STAND_ALONE", "ONE", null, 0, 0, 0,
+            StandaloneReplicationConfig.class},
+        {"STAND_ALONE", "THREE", null, 0, 0, 0,
+            StandaloneReplicationConfig.class},
+        {"EC", "RS-3-2-1024K", "RS", 3, 2, MB, ECReplicationConfig.class},
+        {"EC", "RS-3-2-1024", "RS", 3, 2, KB, ECReplicationConfig.class},
+        {"EC", "RS-6-3-1024K", "RS", 6, 3, MB, ECReplicationConfig.class},
+        {"EC", "RS-6-3-1024", "RS", 6, 3, KB, ECReplicationConfig.class},
+        {"EC", "RS-10-4-1024K", "RS", 10, 4, MB, ECReplicationConfig.class},
+        {"EC", "RS-10-4-1024", "RS", 10, 4, KB, ECReplicationConfig.class},
     };
   }
 
   @Test
-  public void testGetDefaultShouldReturnNullIfNotSetClientSide() {
+  public void testGetDefaultShouldReturnRatisThreeIfNotSetClientSide() {
     OzoneConfiguration conf = new OzoneConfiguration();
 
     ReplicationConfig replicationConfig = ReplicationConfig.getDefault(conf);
@@ -70,7 +102,8 @@ public class TestReplicationConfig {
   }
 
   @Test
-  public void testGetDefaultShouldCreateReplicationConfFromCustomConfValues() {
+  public void testGetDefaultShouldCreateReplicationConfFromConfValues() {
+    assumeRatisOrStandaloneType();
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OZONE_REPLICATION_TYPE, type);
     conf.set(OZONE_REPLICATION, factor);
@@ -83,7 +116,22 @@ public class TestReplicationConfig {
   }
 
   @Test
+  public void testGetDefaultShouldCreateECReplicationConfFromConfValues() {
+    assumeECType();
+
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(OZONE_REPLICATION_TYPE, type);
+    conf.set(OZONE_REPLICATION, ecDescriptor());
+
+    ReplicationConfig replicationConfig = ReplicationConfig.getDefault(conf);
+
+    validate(replicationConfig,
+        EcCodec.valueOf(codec), data, parity, chunkSize);
+  }
+
+  @Test
   public void deserialize() {
+    assumeRatisOrStandaloneType();
     final ReplicationConfig replicationConfig =
         ReplicationConfig.fromProtoTypeAndFactor(
             ReplicationType.valueOf(type),
@@ -95,7 +143,25 @@ public class TestReplicationConfig {
   }
 
   @Test
+  public void deserializeEC() {
+    assumeECType();
+    HddsProtos.ECReplicationConfig proto =
+        HddsProtos.ECReplicationConfig.newBuilder()
+            .setCodec(codec)
+            .setData(data)
+            .setParity(parity)
+            .setEcChunkSize(chunkSize)
+            .build();
+
+    ReplicationConfig config = ReplicationConfig
+        .fromProto(ReplicationType.EC, null, proto);
+
+    validate(config, EcCodec.valueOf(codec), data, parity, chunkSize);
+  }
+
+  @Test
   public void fromJavaObjects() {
+    assumeRatisOrStandaloneType();
     final ReplicationConfig replicationConfig =
         ReplicationConfig.fromTypeAndFactor(
             org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
@@ -108,6 +174,7 @@ public class TestReplicationConfig {
 
   @Test
   public void testParseFromTypeAndFactorAsString() {
+    assumeRatisOrStandaloneType();
     ConfigurationSource conf = new OzoneConfiguration();
     ReplicationConfig replicationConfig = ReplicationConfig.parse(
         org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
@@ -120,10 +187,11 @@ public class TestReplicationConfig {
 
   @Test
   public void testParseFromTypeAndFactorAsStringifiedInteger() {
+    assumeRatisOrStandaloneType();
     ConfigurationSource conf = new OzoneConfiguration();
     String f =
-        factor == "ONE" ? "1"
-            : factor == "THREE" ? "3"
+        factor.equals("ONE") ? "1"
+            : factor.equals("THREE") ? "3"
             : "Test adjustment needed!";
 
     ReplicationConfig replicationConfig = ReplicationConfig.parse(
@@ -136,7 +204,27 @@ public class TestReplicationConfig {
   }
 
   @Test
+  public void testParseECReplicationConfigFromString() {
+    assumeECType();
+
+    ConfigurationSource conf = new OzoneConfiguration();
+    ReplicationConfig repConfig = ReplicationConfig.parse(
+        org.apache.hadoop.hdds.client.ReplicationType.EC, ecDescriptor(), conf);
+
+    validate(repConfig, EcCodec.valueOf(codec), data, parity, chunkSize);
+  }
+
+  /**
+   * The adjustReplication is a method that is used by RootedOzoneFileSystem
+   * to adjust the bucket's default replication config if needed.
+   *
+   * As we define, if the bucket's default replication configuration is RATIS
+   * or STAND_ALONE, then replica count can be adjusted with the replication
+   * factor.
+   */
+  @Test
   public void testAdjustReplication() {
+    assumeRatisOrStandaloneType();
     ConfigurationSource conf = new OzoneConfiguration();
     ReplicationConfig replicationConfig = ReplicationConfig.parse(
         org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
@@ -154,6 +242,30 @@ public class TestReplicationConfig {
   }
 
   /**
+   * The adjustReplication is a method that is used by RootedOzoneFileSystem
+   * to adjust the bucket's default replication config if needed.
+   *
+   * As we define, if the bucket's default replication configuration is EC,
+   * then the client can not adjust the configuration via the replication
+   * factor.
+   */
+  @Test
+  public void testAdjustECReplication() {
+    assumeECType();
+    ConfigurationSource conf = new OzoneConfiguration();
+    ReplicationConfig replicationConfig = ReplicationConfig.parse(
+        org.apache.hadoop.hdds.client.ReplicationType.EC, ecDescriptor(), conf);
+
+    validate(
+        ReplicationConfig.adjustReplication(replicationConfig, (short) 3, conf),
+        EcCodec.valueOf(codec), data, parity, chunkSize);
+
+    validate(
+        ReplicationConfig.adjustReplication(replicationConfig, (short) 1, conf),
+        EcCodec.valueOf(codec), data, parity, chunkSize);
+  }
+
+  /**
    * This is a bit of a tricky test in the parametrized environment.
    * The goal is to ensure that the following methods do validation while
    * creating the ReplicationConfig: getDefault, adjustReplication, parse.
@@ -165,17 +277,21 @@ public class TestReplicationConfig {
    */
   @Test
   public void testValidationBasedOnConfig() {
+    assumeRatisOrStandaloneType();
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OZONE_REPLICATION+".allowed-configs",
         "^STANDALONE/ONE|RATIS/THREE$");
     conf.set(OZONE_REPLICATION, factor);
     conf.set(OZONE_REPLICATION_TYPE, type);
 
+    // in case of allowed configurations
     if ((type.equals("RATIS") && factor.equals("THREE"))
         || (type.equals("STAND_ALONE") && factor.equals("ONE"))) {
       ReplicationConfig replicationConfig = ReplicationConfig.parse(
           org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
           factor, conf);
+      // check if adjust throws exception when adjusting to a config that is
+      // not allowed
       if (type.equals("RATIS")) {
         assertThrows(IllegalArgumentException.class,
             () -> ReplicationConfig
@@ -185,27 +301,33 @@ public class TestReplicationConfig {
             () -> ReplicationConfig
                 .adjustReplication(replicationConfig, (short) 3, conf));
       }
-      ReplicationConfig.fromTypeAndFactor(
-          org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
-          org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor));
-      ReplicationConfig.fromProtoTypeAndFactor(
-          ReplicationType.valueOf(type), ReplicationFactor.valueOf(factor));
       ReplicationConfig.getDefault(conf);
     } else {
+      // parse should fail in case of a configuration that is not allowed.
       assertThrows(IllegalArgumentException.class,
           () -> ReplicationConfig.parse(
               org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
               factor, conf));
+      // default can not be a configuration that is not allowed.
       assertThrows(IllegalArgumentException.class,
           () -> ReplicationConfig.getDefault(conf));
     }
+
+    // From proto and java objects, we need to be able to create replication
+    // configs even though they are not allowed, as there might have been
+    // keys, that were created earlier when the config was allowed.
+    ReplicationConfig.fromTypeAndFactor(
+        org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
+        org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor));
+    ReplicationConfig.fromProtoTypeAndFactor(
+        ReplicationType.valueOf(type), ReplicationFactor.valueOf(factor));
+
     // CHAINED replication type is not supported by ReplicationConfig.
     assertThrows(RuntimeException.class,
         () -> ReplicationConfig.parse(
             org.apache.hadoop.hdds.client.ReplicationType.CHAINED, "", conf));
   }
 
-
   private void validate(ReplicationConfig replicationConfig,
       org.apache.hadoop.hdds.client.ReplicationType expectedType,
       org.apache.hadoop.hdds.client.ReplicationFactor expectedFactor) {
@@ -231,4 +353,36 @@ public class TestReplicationConfig {
         ((ReplicatedReplicationConfig) replicationConfig)
             .getReplicationFactor().name());
   }
+
+  private void validate(ReplicationConfig replicationConfig,
+      EcCodec expectedCodec,
+      int expectedData, int expectedParity, int expectedChunkSize) {
+
+    assertEquals(ECReplicationConfig.class, replicationConfig.getClass());
+    assertEquals(ReplicationType.EC, replicationConfig.getReplicationType());
+
+    ECReplicationConfig ecReplicationConfig =
+        (ECReplicationConfig) replicationConfig;
+
+    assertEquals(expectedCodec, ecReplicationConfig.getCodec());
+    assertEquals(expectedData, ecReplicationConfig.getData());
+    assertEquals(expectedParity, ecReplicationConfig.getParity());
+    assertEquals(expectedChunkSize, ecReplicationConfig.getEcChunkSize());
+
+    assertEquals(expectedData + expectedParity,
+        replicationConfig.getRequiredNodes());
+  }
+
+  private String ecDescriptor() {
+    return codec.toUpperCase() + "-" + data + "-" + parity + "-" +
+        (chunkSize == MB ? "1024K" : "1024");
+  }
+
+  private void assumeRatisOrStandaloneType() {
+    Assume.assumeTrue(type.equals("RATIS") || type.equals("STAND_ALONE"));
+  }
+
+  private void assumeECType() {
+    Assume.assumeTrue(type.equals("EC"));
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
index 4ca2193..d720b7b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
@@ -1639,8 +1639,8 @@ public class TestRootedOzoneFileSystem {
     OFSPath ofsPath = new OFSPath(vol + "/" + buck + "/test");
     final OzoneBucket bucket = adapter.getBucket(ofsPath, false);
     final OzoneKeyDetails key = bucket.getKey(ofsPath.getKeyName());
-    Assert.assertEquals(key.getReplicationConfig().getReplicationType().name(),
-        ReplicationType.EC.name());
+    Assert.assertEquals(ReplicationType.EC.name(),
+        key.getReplicationConfig().getReplicationType().name());
   }
 
   public void testNonPrivilegedUserMkdirCreateBucket() throws IOException {

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org