You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by pi...@apache.org on 2021/12/03 10:47:17 UTC
[ozone] branch master updated: HDDS-5997 Centralize string based replication config validation via ReplicationConfigValidator (#2871)
This is an automated email from the ASF dual-hosted git repository.
pifta pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new d6e0e47 HDDS-5997 Centralize string based replication config validation via ReplicationConfigValidator (#2871)
d6e0e47 is described below
commit d6e0e4747f09ce5c04bcd2ac42cf9ce3e73198db
Author: Istvan Fajth <pi...@cloudera.com>
AuthorDate: Fri Dec 3 11:46:54 2021 +0100
HDDS-5997 Centralize string based replication config validation via ReplicationConfigValidator (#2871)
---
.../hadoop/hdds/client/RatisReplicationConfig.java | 27 +-
.../hdds/client/ReplicatedReplicationConfig.java | 34 +++
.../hadoop/hdds/client/ReplicationConfig.java | 138 +++++-----
.../hdds/client/ReplicationConfigValidator.java | 5 +-
.../hdds/client/StandaloneReplicationConfig.java | 23 +-
.../hadoop/hdds/scm/container/ContainerInfo.java | 3 +-
.../apache/hadoop/hdds/scm/pipeline/Pipeline.java | 2 +-
.../hadoop/hdds/client/TestReplicationConfig.java | 280 +++++++++++++--------
.../scm/protocol/ScmBlockLocationProtocol.java | 2 +-
.../scm/pipeline/BackgroundPipelineCreator.java | 2 +-
...lockLocationProtocolServerSideTranslatorPB.java | 2 +-
.../hdds/scm/server/SCMClientProtocolServer.java | 4 +-
.../scm/server/upgrade/SCMUpgradeFinalizer.java | 5 +-
.../hdds/scm/node/TestContainerPlacement.java | 2 +-
.../hadoop/hdds/scm/node/TestSCMNodeManager.java | 3 +-
.../scm/pipeline/MockRatisPipelineProvider.java | 2 +-
.../scm/pipeline/TestPipelineStateManagerImpl.java | 8 +-
.../apache/hadoop/ozone/client/OzoneBucket.java | 1 +
.../ozone/client/protocol/ClientProtocol.java | 1 +
.../apache/hadoop/ozone/client/rpc/RpcClient.java | 2 +
.../apache/hadoop/ozone/om/helpers/OmKeyInfo.java | 2 +-
.../ozone/om/helpers/OmMultipartKeyInfo.java | 2 +-
...OzoneManagerProtocolClientSideTranslatorPB.java | 4 +-
.../hdds/scm/pipeline/TestMultiRaftSetup.java | 4 +-
.../hadoop/hdds/upgrade/TestHDDSUpgrade.java | 2 +-
.../client/rpc/TestOzoneRpcClientAbstract.java | 9 +-
.../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 2 +-
.../om/request/file/OMDirectoryCreateRequest.java | 2 +-
.../hadoop/ozone/om/request/key/OMKeyRequest.java | 4 +-
.../S3InitiateMultipartUploadRequest.java | 2 +-
.../S3InitiateMultipartUploadRequestWithFSO.java | 2 +-
.../S3MultipartUploadCompleteRequest.java | 2 +-
.../ozone/om/request/TestOMRequestUtils.java | 4 +-
.../fs/ozone/BasicOzoneClientAdapterImpl.java | 4 +-
.../ozone/BasicRootedOzoneClientAdapterImpl.java | 5 +-
.../ozone/genesis/BenchMarkContainerStateMap.java | 2 +-
.../hadoop/ozone/shell/keys/CopyKeyHandler.java | 18 +-
.../hadoop/ozone/shell/keys/PutKeyHandler.java | 18 +-
38 files changed, 350 insertions(+), 284 deletions(-)
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/RatisReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/RatisReplicationConfig.java
index 1646e39..43f5eba 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/RatisReplicationConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/RatisReplicationConfig.java
@@ -27,8 +27,7 @@ import java.util.Objects;
/**
* Replication configuration for EC replication.
*/
-public class RatisReplicationConfig
- implements ReplicationConfig {
+public class RatisReplicationConfig implements ReplicatedReplicationConfig {
private final ReplicationFactor replicationFactor;
private static final ReplicationType REPLICATION_TYPE = ReplicationType.RATIS;
@@ -37,21 +36,6 @@ public class RatisReplicationConfig
this.replicationFactor = replicationFactor;
}
- public RatisReplicationConfig(String factorString) {
- ReplicationFactor factor = null;
- try {
- factor = ReplicationFactor.valueOf(Integer.parseInt(factorString));
- } catch (NumberFormatException ex) {
- try {
- factor = ReplicationFactor.valueOf(factorString);
- } catch (IllegalArgumentException x) {
- throw new IllegalArgumentException("Invalid RatisReplicationFactor '" +
- factorString + "'. Please use ONE or THREE!");
- }
- }
- this.replicationFactor = factor;
- }
-
public static boolean hasFactor(ReplicationConfig replicationConfig,
ReplicationFactor factor) {
if (replicationConfig instanceof RatisReplicationConfig) {
@@ -72,6 +56,7 @@ public class RatisReplicationConfig
return replicationFactor.getNumber();
}
+ @Override
public ReplicationFactor getReplicationFactor() {
return replicationFactor;
}
@@ -89,12 +74,12 @@ public class RatisReplicationConfig
}
@Override
- public String toString() {
- return REPLICATION_TYPE.name() + "/" + replicationFactor;
+ public int hashCode() {
+ return Objects.hash(replicationFactor);
}
@Override
- public int hashCode() {
- return Objects.hash(replicationFactor);
+ public String toString() {
+ return REPLICATION_TYPE.name() + "/" + replicationFactor;
}
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicatedReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicatedReplicationConfig.java
new file mode 100644
index 0000000..c949524
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicatedReplicationConfig.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.client;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+
+/**
+ * Interface extension to denote replication configurations that work by
+ * copying the data replicationFactor times, like RATIS or STANDALONE
+ * replications.
+ */
+public interface ReplicatedReplicationConfig extends ReplicationConfig {
+
+ /**
+ * Returns the associated replication factor of this ReplicationConfig.
+ * @return the replication factor
+ */
+ HddsProtos.ReplicationFactor getReplicationFactor();
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java
index a770989..61aa7fa 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java
@@ -20,7 +20,13 @@ package org.apache.hadoop.hdds.client;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
+
+import java.util.Objects;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT;
/**
* Replication configuration for any ReplicationType with all the required
@@ -34,7 +40,7 @@ public interface ReplicationConfig {
* <p>
* Note: it's never used for EC replication where config is created.
*/
- static ReplicationConfig fromTypeAndFactor(
+ static ReplicationConfig fromProtoTypeAndFactor(
HddsProtos.ReplicationType type,
HddsProtos.ReplicationFactor factor
) {
@@ -59,93 +65,97 @@ public interface ReplicationConfig {
org.apache.hadoop.hdds.client.ReplicationType type,
org.apache.hadoop.hdds.client.ReplicationFactor factor
) {
- return fromTypeAndFactor(HddsProtos.ReplicationType.valueOf(type.name()),
+ return fromProtoTypeAndFactor(
+ HddsProtos.ReplicationType.valueOf(type.name()),
HddsProtos.ReplicationFactor.valueOf(factor.name()));
}
static ReplicationConfig getDefault(ConfigurationSource config) {
- String replication = config.get(OzoneConfigKeys.OZONE_REPLICATION);
- String replType = config.get(OzoneConfigKeys.OZONE_REPLICATION_TYPE);
- ReplicationConfig replicationConfig = null;
- if (replication != null && replType != null) {
- replicationConfig = ReplicationConfig
- .fromTypeAndString(ReplicationType.valueOf(replType), replication);
- }
- return replicationConfig;
- }
-
- /**
- * Helper method to serialize from proto.
- * <p>
- * This uses either the old type/factor or the new ecConfig depends on the
- * type.
- * <p>
- * Note: It will support all the available replication types (including EC).
- * <p>
- * Separated to remain be synced with the EC feature branch, as later it
- * will have different signature.
- */
- static ReplicationConfig fromProto(
- HddsProtos.ReplicationType type,
- HddsProtos.ReplicationFactor factor) {
- switch (type) {
- case RATIS:
- case STAND_ALONE:
- return fromTypeAndFactor(type, factor);
- default:
- throw new UnsupportedOperationException(
- "Not supported replication: " + type);
- }
+ String replication =
+ config.get(OZONE_REPLICATION, OZONE_REPLICATION_DEFAULT);
+ return parse(null, replication, config);
}
static HddsProtos.ReplicationFactor getLegacyFactor(
ReplicationConfig replicationConfig) {
- if (replicationConfig instanceof RatisReplicationConfig) {
- return ((RatisReplicationConfig) replicationConfig)
- .getReplicationFactor();
- } else if (replicationConfig instanceof StandaloneReplicationConfig) {
- return ((StandaloneReplicationConfig) replicationConfig)
+ if (replicationConfig instanceof ReplicatedReplicationConfig) {
+ return ((ReplicatedReplicationConfig) replicationConfig)
.getReplicationFactor();
}
throw new UnsupportedOperationException(
- "factor is not valid property of replication " + replicationConfig
- .getReplicationType());
+ "Replication configuration of type "
+ + replicationConfig.getReplicationType()
+ + " does not have a replication factor property.");
}
/**
* Create new replication config with adjusted replication factor.
* <p>
- * Used by hadoop file system. Some replication scheme (like EC) may not
+ * Used by hadoop file system. Some replication schemes (like EC) may not
* support changing the replication.
+ * <p>
+ * Based on the provided configuration the adjusted ReplicationConfig is
+ * validated against the ozone.replication.allowed-configs property, and if
+ * the new config is not allowed the method throws an
+ * IllegalArgumentException.
*/
static ReplicationConfig adjustReplication(
- ReplicationConfig replicationConfig, short replication) {
- switch (replicationConfig.getReplicationType()) {
- case RATIS:
- return new RatisReplicationConfig(
- org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor
- .valueOf(replication));
- case STAND_ALONE:
- return new StandaloneReplicationConfig(
- org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor
- .valueOf(replication));
- default:
- return replicationConfig;
- }
+ ReplicationConfig config, short replication, ConfigurationSource conf) {
+ return parse(
+ ReplicationType.valueOf(config.getReplicationType().toString()),
+ Short.toString(replication), conf);
}
- static ReplicationConfig fromTypeAndString(ReplicationType replicationType,
- String replication) {
- switch (replicationType) {
+ /**
+ * Parses the string representation of the replication configuration that is
+ * defined by the ReplicationType parameter.
+ * The configuration object is necessary to check if the parsed
+ * ReplicationConfig object is allowed based on the
+ * ozone.replication.allowed-configs property.
+ * @param type the ReplicationType to parse from the replication string
+ * @param replication the replication String that for example contains the
+ * replication factor for RATIS replication.
+ * @param config the current Ozone configuration to apply validation on the
+ * parsed object.
+ * @return a validated ReplicationConfig object that is allowed based on the
+ * system's configuration.
+ * @throws IllegalArgumentException if the parsed ReplicationConfig is not
+ * allowed by the ozone.replication.allowed-configs property, or
+ * if the give replication type or replication can not be parsed.
+ * @throws NullPointerException if the ReplicationConfig was not created
+ * for the type.
+ */
+ static ReplicationConfig parse(ReplicationType type, String replication,
+ ConfigurationSource config) {
+ if (type == null) {
+ type = ReplicationType.valueOf(
+ config.get(OZONE_REPLICATION_TYPE, OZONE_REPLICATION_TYPE_DEFAULT));
+ }
+ replication = Objects.toString(replication,
+ config.get(OZONE_REPLICATION, OZONE_REPLICATION_DEFAULT));
+
+ ReplicationConfig replicationConfig;
+ switch (type) {
case RATIS:
- return new RatisReplicationConfig(replication);
case STAND_ALONE:
- return new StandaloneReplicationConfig(replication);
+ ReplicationFactor factor;
+ try {
+ factor = ReplicationFactor.valueOf(Integer.parseInt(replication));
+ } catch (NumberFormatException ex) {
+ factor = ReplicationFactor.valueOf(replication);
+ }
+ replicationConfig = fromTypeAndFactor(type, factor);
+ break;
default:
- throw new UnsupportedOperationException(
- "String based replication config initialization is not supported for "
- + replicationType);
+ throw new RuntimeException("Replication type" + type + " can not"
+ + "be parsed.");
}
+
+ ReplicationConfigValidator validator =
+ config.getObject(ReplicationConfigValidator.class);
+ validator.validate(replicationConfig);
+
+ return replicationConfig;
}
/**
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfigValidator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfigValidator.java
index d460db8..5041f6a 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfigValidator.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfigValidator.java
@@ -52,12 +52,11 @@ public class ReplicationConfigValidator {
if (validationRegexp != null) {
if (!validationRegexp.matcher(replicationConfig.toString()).matches()) {
throw new IllegalArgumentException("Invalid replication config " +
- replicationConfig.toString() +
- " Replication config should match " + validationPattern);
+ replicationConfig + ". Replication config should match the "
+ + validationPattern + " pattern.");
}
}
return replicationConfig;
}
-
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StandaloneReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StandaloneReplicationConfig.java
index 663f06c..840ba18 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StandaloneReplicationConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StandaloneReplicationConfig.java
@@ -27,7 +27,8 @@ import java.util.Objects;
/**
* Replication configuration for STANDALONE replication.
*/
-public class StandaloneReplicationConfig implements ReplicationConfig {
+public class StandaloneReplicationConfig implements
+ ReplicatedReplicationConfig {
private final ReplicationFactor replicationFactor;
private static final String REPLICATION_TYPE = "STANDALONE";
@@ -36,15 +37,7 @@ public class StandaloneReplicationConfig implements ReplicationConfig {
this.replicationFactor = replicationFactor;
}
- public StandaloneReplicationConfig(String factorString) {
- ReplicationFactor factor = null;
- try {
- factor = ReplicationFactor.valueOf(Integer.parseInt(factorString));
- } catch (NumberFormatException ex) {
- factor = ReplicationFactor.valueOf(factorString);
- }
- this.replicationFactor = factor;
- }
+ @Override
public ReplicationFactor getReplicationFactor() {
return replicationFactor;
}
@@ -59,13 +52,13 @@ public class StandaloneReplicationConfig implements ReplicationConfig {
return ReplicationType.STAND_ALONE;
}
- @JsonProperty("replicationType")
/**
* This method is here only to allow the string value for replicationType to
* be output in JSON. The enum defining the replication type STAND_ALONE has a
* string value of "STAND_ALONE", however various tests expect to see
* "STANDALONE" as the string.
*/
+ @JsonProperty("replicationType")
public String replicationType() {
return REPLICATION_TYPE;
}
@@ -83,12 +76,12 @@ public class StandaloneReplicationConfig implements ReplicationConfig {
}
@Override
- public String toString() {
- return REPLICATION_TYPE + "/" + replicationFactor;
+ public int hashCode() {
+ return Objects.hash(replicationFactor);
}
@Override
- public int hashCode() {
- return Objects.hash(replicationFactor);
+ public String toString() {
+ return REPLICATION_TYPE + "/" + replicationFactor;
}
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
index fd5ee16..ae16784 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
@@ -108,7 +108,8 @@ public class ContainerInfo implements Comparator<ContainerInfo>,
public static ContainerInfo fromProtobuf(HddsProtos.ContainerInfoProto info) {
ContainerInfo.Builder builder = new ContainerInfo.Builder();
final ReplicationConfig config = ReplicationConfig
- .fromProto(info.getReplicationType(), info.getReplicationFactor());
+ .fromProtoTypeAndFactor(
+ info.getReplicationType(), info.getReplicationFactor());
builder.setUsedBytes(info.getUsedBytes())
.setNumberOfKeys(info.getNumberOfKeys())
.setState(info.getState())
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index 7b375f4..044f151 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -338,7 +338,7 @@ public final class Pipeline {
}
final ReplicationConfig config = ReplicationConfig
- .fromProto(pipeline.getType(), pipeline.getFactor());
+ .fromProtoTypeAndFactor(pipeline.getType(), pipeline.getFactor());
return new Builder().setId(PipelineID.getFromProtobuf(pipeline.getId()))
.setReplicationConfig(config)
.setState(PipelineState.fromProtobuf(pipeline.getState()))
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
index 1c75d48..9adf8f7 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
@@ -17,155 +17,219 @@
*/
package org.apache.hadoop.hdds.client;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.junit.Assert;
import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
/**
* Test replicationConfig.
*/
+@RunWith(Parameterized.class)
public class TestReplicationConfig {
+ @SuppressWarnings("checkstyle:VisibilityModifier")
+ @Parameterized.Parameter()
+ public String type;
+
+ @SuppressWarnings("checkstyle:VisibilityModifier")
+ @Parameterized.Parameter(1)
+ public String factor;
+
+ @SuppressWarnings("checkstyle:VisibilityModifier")
+ @Parameterized.Parameter(2)
+ public Class<?> replicationConfigClass;
+
+ @Parameterized.Parameters(name = "{0}/{1}")
+ public static Object[][] parameters() {
+ return new Object[][] {
+ {"RATIS", "ONE", RatisReplicationConfig.class },
+ {"RATIS", "THREE", RatisReplicationConfig.class},
+ {"STAND_ALONE", "ONE", StandaloneReplicationConfig.class},
+ {"STAND_ALONE", "THREE", StandaloneReplicationConfig.class}
+ };
+ }
+
@Test
public void testGetDefaultShouldCreateReplicationConfigFromDefaultConf() {
OzoneConfiguration conf = new OzoneConfiguration();
+
ReplicationConfig replicationConfig = ReplicationConfig.getDefault(conf);
- Assert.assertEquals(
- org.apache.hadoop.hdds.client.ReplicationType.RATIS.name(),
- replicationConfig.getReplicationType().name());
- Assert.assertEquals(3, replicationConfig.getRequiredNodes());
+
+ validate(replicationConfig,
+ org.apache.hadoop.hdds.client.ReplicationType.RATIS,
+ org.apache.hadoop.hdds.client.ReplicationFactor.THREE,
+ RatisReplicationConfig.class);
}
@Test
public void testGetDefaultShouldCreateReplicationConfFromCustomConfValues() {
OzoneConfiguration conf = new OzoneConfiguration();
- conf.set(OzoneConfigKeys.OZONE_REPLICATION_TYPE, "STAND_ALONE");
- conf.set(OzoneConfigKeys.OZONE_REPLICATION, "1");
+ conf.set(OZONE_REPLICATION_TYPE, type);
+ conf.set(OZONE_REPLICATION, factor);
+
ReplicationConfig replicationConfig = ReplicationConfig.getDefault(conf);
- Assert.assertEquals(
- org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE.name(),
- replicationConfig.getReplicationType().name());
- Assert.assertEquals(1, replicationConfig.getRequiredNodes());
- }
- @Test
- public void deserializeRatis() {
- final ReplicationConfig replicationConfig = ReplicationConfig
- .fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE);
-
- Assert
- .assertEquals(RatisReplicationConfig.class,
- replicationConfig.getClass());
-
- RatisReplicationConfig ratisReplicationConfig =
- (RatisReplicationConfig) replicationConfig;
- Assert.assertEquals(ReplicationType.RATIS,
- ratisReplicationConfig.getReplicationType());
- Assert.assertEquals(ReplicationFactor.THREE,
- ratisReplicationConfig.getReplicationFactor());
+ validate(replicationConfig,
+ org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
+ org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor));
}
@Test
- public void deserializeStandalone() {
- final ReplicationConfig replicationConfig = ReplicationConfig
- .fromTypeAndFactor(ReplicationType.STAND_ALONE, ReplicationFactor.ONE);
-
- Assert
- .assertEquals(StandaloneReplicationConfig.class,
- replicationConfig.getClass());
-
- StandaloneReplicationConfig standalone =
- (StandaloneReplicationConfig) replicationConfig;
- Assert.assertEquals(ReplicationType.STAND_ALONE,
- standalone.getReplicationType());
- Assert.assertEquals(ReplicationFactor.ONE,
- standalone.getReplicationFactor());
+ public void deserialize() {
+ final ReplicationConfig replicationConfig =
+ ReplicationConfig.fromProtoTypeAndFactor(
+ ReplicationType.valueOf(type),
+ ReplicationFactor.valueOf(factor));
+
+ validate(replicationConfig,
+ org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
+ org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor));
}
@Test
public void fromJavaObjects() {
+ final ReplicationConfig replicationConfig =
+ ReplicationConfig.fromTypeAndFactor(
+ org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
+ org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor));
- final ReplicationConfig replicationConfig = ReplicationConfig
- .fromTypeAndFactor(org.apache.hadoop.hdds.client.ReplicationType.RATIS,
- org.apache.hadoop.hdds.client.ReplicationFactor.THREE);
-
- Assert.assertEquals(replicationConfig.getReplicationType(),
- ReplicationType.RATIS);
- Assert.assertEquals(
- ((RatisReplicationConfig) replicationConfig).getReplicationFactor(),
- ReplicationFactor.THREE);
-
+ validate(replicationConfig,
+ org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
+ org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor));
}
@Test
- public void fromTypeAndStringName() {
-
- ReplicationConfig replicationConfig = null;
-
- //RATIS-THREE
- replicationConfig = ReplicationConfig.fromTypeAndString(
- org.apache.hadoop.hdds.client.ReplicationType.RATIS, "THREE");
-
- Assert.assertEquals(replicationConfig.getReplicationType(),
- ReplicationType.RATIS);
- Assert.assertEquals(
- ((RatisReplicationConfig) replicationConfig).getReplicationFactor(),
- ReplicationFactor.THREE);
-
- //RATIS-ONE
- replicationConfig = ReplicationConfig.fromTypeAndString(
- org.apache.hadoop.hdds.client.ReplicationType.RATIS, "ONE");
-
- Assert.assertEquals(replicationConfig.getReplicationType(),
- ReplicationType.RATIS);
- Assert.assertEquals(
- ((RatisReplicationConfig) replicationConfig).getReplicationFactor(),
- ReplicationFactor.ONE);
+ public void testParseFromTypeAndFactorAsString() {
+ ConfigurationSource conf = new OzoneConfiguration();
+ ReplicationConfig replicationConfig = ReplicationConfig.parse(
+ org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
+ factor, conf);
+
+ validate(replicationConfig,
+ org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
+ org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor));
+ }
- //STANDALONE-ONE
- replicationConfig = ReplicationConfig.fromTypeAndString(
- org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE, "ONE");
+ @Test
+ public void testParseFromTypeAndFactorAsStringifiedInteger() {
+ ConfigurationSource conf = new OzoneConfiguration();
+ String f =
+ factor == "ONE" ? "1"
+ : factor == "THREE" ? "3"
+ : "Test adjustment needed!";
+
+ ReplicationConfig replicationConfig = ReplicationConfig.parse(
+ org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
+ f, conf);
+
+ validate(replicationConfig,
+ org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
+ org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(this.factor));
+ }
- Assert.assertEquals(replicationConfig.getReplicationType(),
- ReplicationType.STAND_ALONE);
- Assert.assertEquals(
- ((StandaloneReplicationConfig) replicationConfig)
- .getReplicationFactor(),
- ReplicationFactor.ONE);
+ @Test
+ public void testAdjustReplication() {
+ ConfigurationSource conf = new OzoneConfiguration();
+ ReplicationConfig replicationConfig = ReplicationConfig.parse(
+ org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
+ factor, conf);
+
+ validate(
+ ReplicationConfig.adjustReplication(replicationConfig, (short) 3, conf),
+ org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
+ org.apache.hadoop.hdds.client.ReplicationFactor.THREE);
+
+ validate(
+ ReplicationConfig.adjustReplication(replicationConfig, (short) 1, conf),
+ org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
+ org.apache.hadoop.hdds.client.ReplicationFactor.ONE);
+ }
+ /**
+ * This is a bit of a tricky test in the parametrized environment.
+ * The goal is to ensure that the following methods do validation while
+ * creating the ReplicationConfig: getDefault, adjustReplication, parse.
+ *
+ * Two other creator methods fromProtoTypeAndFactor, and fromTypeAndFactor
+ * should allow creation of disallowed ReplicationConfigs as well, as in the
+ * system there might exist some keys that were created with a now disallowed
+ * ReplicationConfig.
+ */
+ @Test
+ public void testValidationBasedOnConfig() {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.set(OZONE_REPLICATION+".allowed-configs",
+ "^STANDALONE/ONE|RATIS/THREE$");
+ conf.set(OZONE_REPLICATION, factor);
+ conf.set(OZONE_REPLICATION_TYPE, type);
+
+ if ((type.equals("RATIS") && factor.equals("THREE"))
+ || (type.equals("STAND_ALONE") && factor.equals("ONE"))) {
+ ReplicationConfig replicationConfig = ReplicationConfig.parse(
+ org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
+ factor, conf);
+ if (type.equals("RATIS")) {
+ assertThrows(IllegalArgumentException.class,
+ () -> ReplicationConfig
+ .adjustReplication(replicationConfig, (short) 1, conf));
+ } else {
+ assertThrows(IllegalArgumentException.class,
+ () -> ReplicationConfig
+ .adjustReplication(replicationConfig, (short) 3, conf));
+ }
+ ReplicationConfig.fromTypeAndFactor(
+ org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
+ org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor));
+ ReplicationConfig.fromProtoTypeAndFactor(
+ ReplicationType.valueOf(type), ReplicationFactor.valueOf(factor));
+ ReplicationConfig.getDefault(conf);
+ } else {
+ assertThrows(IllegalArgumentException.class,
+ () -> ReplicationConfig.parse(
+ org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
+ factor, conf));
+ assertThrows(IllegalArgumentException.class,
+ () -> ReplicationConfig.getDefault(conf));
+ }
+ // CHAINED replication type is not supported by ReplicationConfig.
+ assertThrows(RuntimeException.class,
+ () -> ReplicationConfig.parse(
+ org.apache.hadoop.hdds.client.ReplicationType.CHAINED, "", conf));
}
- @Test
- public void fromTypeAndStringInteger() {
- //RATIS-THREE
- ReplicationConfig replicationConfig = ReplicationConfig.fromTypeAndString(
- org.apache.hadoop.hdds.client.ReplicationType.RATIS, "3");
-
- Assert.assertEquals(replicationConfig.getReplicationType(),
- ReplicationType.RATIS);
- Assert.assertEquals(
- ((RatisReplicationConfig) replicationConfig).getReplicationFactor(),
- ReplicationFactor.THREE);
+ private void validate(ReplicationConfig replicationConfig,
+ org.apache.hadoop.hdds.client.ReplicationType expectedType,
+ org.apache.hadoop.hdds.client.ReplicationFactor expectedFactor) {
+
+ validate(replicationConfig, expectedType, expectedFactor,
+ replicationConfigClass);
}
- @Test
- public void adjustReplication() {
- ReplicationConfig config =
- new RatisReplicationConfig(ReplicationFactor.ONE);
- final ReplicationConfig replicationConfig =
- ReplicationConfig.adjustReplication(config, (short) 1);
+ private void validate(ReplicationConfig replicationConfig,
+ org.apache.hadoop.hdds.client.ReplicationType expectedType,
+ org.apache.hadoop.hdds.client.ReplicationFactor expectedFactor,
+ Class<?> expectedReplicationConfigClass) {
- Assert.assertEquals(replicationConfig.getReplicationType(),
- ReplicationType.RATIS);
- Assert.assertEquals(
- ((RatisReplicationConfig) replicationConfig)
- .getReplicationFactor(),
- ReplicationFactor.ONE);
+ assertEquals(expectedReplicationConfigClass, replicationConfig.getClass());
+ assertEquals(
+ expectedType.name(), replicationConfig.getReplicationType().name());
+ assertEquals(
+ expectedFactor.getValue(), replicationConfig.getRequiredNodes());
+ assertEquals(
+ expectedFactor.name(),
+ ((ReplicatedReplicationConfig) replicationConfig)
+ .getReplicationFactor().name());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
index 175b65a..0f84eaa 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
@@ -65,7 +65,7 @@ public interface ScmBlockLocationProtocol extends Closeable {
ReplicationType type, ReplicationFactor factor, String owner,
ExcludeList excludeList) throws IOException {
return allocateBlock(size, numBlocks, ReplicationConfig
- .fromTypeAndFactor(type, factor), owner, excludeList);
+ .fromProtoTypeAndFactor(type, factor), owner, excludeList);
}
/**
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java
index 2251be7..86a4061 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java
@@ -218,7 +218,7 @@ public class BackgroundPipelineCreator implements SCMService {
for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor
.values()) {
final ReplicationConfig replicationConfig =
- ReplicationConfig.fromTypeAndFactor(type, factor);
+ ReplicationConfig.fromProtoTypeAndFactor(type, factor);
if (skipCreation(replicationConfig, autoCreateFactorOne)) {
// Skip this iteration for creating pipeline
continue;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java
index 95c7e1c..7b0fc67 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java
@@ -179,7 +179,7 @@ public final class ScmBlockLocationProtocolServerSideTranslatorPB
List<AllocatedBlock> allocatedBlocks =
impl.allocateBlock(request.getSize(),
request.getNumBlocks(),
- ReplicationConfig.fromProto(
+ ReplicationConfig.fromProtoTypeAndFactor(
request.getType(),
request.getFactor()),
request.getOwner(),
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 6fbd8c6..2c6bdd5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -198,7 +198,7 @@ public class SCMClientProtocolServer implements
final ContainerInfo container = scm.getContainerManager()
.allocateContainer(
- ReplicationConfig.fromTypeAndFactor(replicationType, factor),
+ ReplicationConfig.fromProtoTypeAndFactor(replicationType, factor),
owner);
final Pipeline pipeline = scm.getPipelineManager()
.getPipeline(container.getPipelineID());
@@ -563,7 +563,7 @@ public class SCMClientProtocolServer implements
HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
throws IOException {
Pipeline result = scm.getPipelineManager()
- .createPipeline(ReplicationConfig.fromTypeAndFactor(type, factor));
+ .createPipeline(ReplicationConfig.fromProtoTypeAndFactor(type, factor));
AUDIT.logWriteSuccess(
buildAuditMessageForSuccess(SCMAction.CREATE_PIPELINE, null));
return result;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizer.java
index 8cec9a3..b466d82 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizer.java
@@ -104,8 +104,9 @@ public class SCMUpgradeFinalizer extends
boolean hasPipeline = false;
while (!hasPipeline) {
ReplicationConfig ratisThree =
- ReplicationConfig.fromTypeAndFactor(HddsProtos.ReplicationType.RATIS,
- HddsProtos.ReplicationFactor.THREE);
+ ReplicationConfig.fromProtoTypeAndFactor(
+ HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.THREE);
int pipelineCount =
pipelineManager.getPipelines(ratisThree, Pipeline.PipelineState.OPEN)
.size();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index f17ea5e..773713f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -216,7 +216,7 @@ public class TestContainerPlacement {
ContainerInfo container = containerManager
.allocateContainer(
- ReplicationConfig.fromTypeAndFactor(
+ ReplicationConfig.fromProtoTypeAndFactor(
SCMTestUtils.getReplicationType(conf),
SCMTestUtils.getReplicationFactor(conf)),
OzoneConsts.OZONE);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index 7c4de69..327de8c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -422,7 +422,8 @@ public class TestSCMNodeManager {
int actualNodeCount) throws Exception {
try {
ReplicationConfig ratisThree =
- ReplicationConfig.fromTypeAndFactor(HddsProtos.ReplicationType.RATIS,
+ ReplicationConfig.fromProtoTypeAndFactor(
+ HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.THREE);
scm.getPipelineManager().createPipeline(ratisThree);
Assert.fail("3 nodes should not have been found for a pipeline.");
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java
index 4760c0e..b03b24a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java
@@ -77,7 +77,7 @@ public class MockRatisPipelineProvider extends RatisPipelineProvider {
// overwrite pipeline state to main ALLOCATED
.setState(Pipeline.PipelineState.ALLOCATED)
.setReplicationConfig(ReplicationConfig
- .fromTypeAndFactor(initialPipeline.getType(),
+ .fromProtoTypeAndFactor(initialPipeline.getType(),
replicationConfig.getReplicationFactor()))
.setNodes(initialPipeline.getNodes())
.build();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java
index aa9c590..ead75c1 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java
@@ -104,7 +104,8 @@ public class TestPipelineStateManagerImpl {
nodes.add(MockDatanodeDetails.randomDatanodeDetails());
}
return Pipeline.newBuilder()
- .setReplicationConfig(ReplicationConfig.fromTypeAndFactor(type, factor))
+ .setReplicationConfig(
+ ReplicationConfig.fromProtoTypeAndFactor(type, factor))
.setNodes(nodes)
.setState(Pipeline.PipelineState.ALLOCATED)
.setId(PipelineID.randomId())
@@ -213,7 +214,7 @@ public class TestPipelineStateManagerImpl {
// verify pipelines received
List<Pipeline> pipelines1 =
stateManager.getPipelines(
- ReplicationConfig.fromTypeAndFactor(type, factor));
+ ReplicationConfig.fromProtoTypeAndFactor(type, factor));
Assert.assertEquals(15, pipelines1.size());
pipelines1.stream().forEach(p -> {
Assert.assertEquals(type, p.getType());
@@ -276,7 +277,8 @@ public class TestPipelineStateManagerImpl {
// verify pipelines received
List<Pipeline> pipelines1 =
stateManager.getPipelines(
- ReplicationConfig.fromTypeAndFactor(type, factor), state);
+ ReplicationConfig.fromProtoTypeAndFactor(type, factor),
+ state);
Assert.assertEquals(5, pipelines1.size());
pipelines1.forEach(p -> {
Assert.assertEquals(type, p.getType());
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 4993ced..e472808 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -818,6 +818,7 @@ public class OzoneBucket extends WithMetadata {
* @throws IOException if there is error in the db
* invalid arguments
*/
+ @Deprecated
public OzoneOutputStream createFile(String keyName, long size,
ReplicationType type, ReplicationFactor factor, boolean overWrite,
boolean recursive) throws IOException {
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 2feb577..8d6ea10 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -636,6 +636,7 @@ public interface ClientProtocol {
* invalid arguments
*/
@SuppressWarnings("checkstyle:parameternumber")
+ @Deprecated
OzoneOutputStream createFile(String volumeName, String bucketName,
String keyName, long size, ReplicationType type, ReplicationFactor factor,
boolean overWrite, boolean recursive) throws IOException;
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 7d22930..4df5508 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -1040,6 +1040,7 @@ public class RpcClient implements ClientProtocol {
keyProviderCache.cleanUp();
}
+ @Deprecated
@Override
public OmMultipartInfo initiateMultipartUpload(String volumeName,
String bucketName, String keyName, ReplicationType type,
@@ -1263,6 +1264,7 @@ public class RpcClient implements ClientProtocol {
}
@Override
+ @Deprecated
public OzoneOutputStream createFile(String volumeName, String bucketName,
String keyName, long size, ReplicationType type, ReplicationFactor factor,
boolean overWrite, boolean recursive) throws IOException {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index 65ea918..d6f2243 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -592,7 +592,7 @@ public final class OmKeyInfo extends WithParentObjectId {
.setCreationTime(keyInfo.getCreationTime())
.setModificationTime(keyInfo.getModificationTime())
.setReplicationConfig(ReplicationConfig
- .fromTypeAndFactor(keyInfo.getType(), keyInfo.getFactor()))
+ .fromProtoTypeAndFactor(keyInfo.getType(), keyInfo.getFactor()))
.addAllMetadata(KeyValueUtil.getFromProtobuf(keyInfo.getMetadataList()))
.setFileEncryptionInfo(keyInfo.hasFileEncryptionInfo() ?
OMPBHelper.convert(keyInfo.getFileEncryptionInfo()) : null)
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java
index 430772a..41779ab 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java
@@ -201,7 +201,7 @@ public class OmMultipartKeyInfo extends WithObjectID {
list.put(partKeyInfo.getPartNumber(), partKeyInfo));
final ReplicationConfig replicationConfig = ReplicationConfig
- .fromTypeAndFactor(
+ .fromProtoTypeAndFactor(
multipartKeyInfo.getType(),
multipartKeyInfo.getFactor());
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index a1e8b5a..b2c367f 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -1053,7 +1053,7 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
OmMultipartUploadListParts omMultipartUploadListParts =
new OmMultipartUploadListParts(
- ReplicationConfig.fromTypeAndFactor(response.getType(),
+ ReplicationConfig.fromProtoTypeAndFactor(response.getType(),
response.getFactor()),
response.getNextPartNumberMarker(), response.getIsTruncated());
omMultipartUploadListParts.addProtoPartList(response.getPartsListList());
@@ -1089,7 +1089,7 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
proto.getKeyName(),
proto.getUploadId(),
Instant.ofEpochMilli(proto.getCreationTime()),
- ReplicationConfig.fromTypeAndFactor(proto.getType(),
+ ReplicationConfig.fromProtoTypeAndFactor(proto.getType(),
proto.getFactor())
))
.collect(Collectors.toList());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
index 84d025c..8a8de67 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
@@ -52,7 +52,7 @@ public class TestMultiRaftSetup {
private long pipelineDestroyTimeoutInMillis;
private static final ReplicationConfig RATIS_THREE =
- ReplicationConfig.fromTypeAndFactor(HddsProtos.ReplicationType.RATIS,
+ ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.THREE);
public void init(int dnCount, OzoneConfiguration conf) throws Exception {
@@ -87,7 +87,7 @@ public class TestMultiRaftSetup {
init(3, conf);
waitForPipelineCreated(2);
Assert.assertEquals(2, pipelineManager.getPipelines(ReplicationConfig
- .fromTypeAndFactor(HddsProtos.ReplicationType.RATIS,
+ .fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS,
ReplicationFactor.THREE)).size());
assertNotSamePeers();
shutdown();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
index 7e0489f..ecf12724 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
@@ -120,7 +120,7 @@ public class TestHDDSUpgrade {
private AtomicBoolean testPassed = new AtomicBoolean(true);
private static final ReplicationConfig RATIS_THREE =
- ReplicationConfig.fromTypeAndFactor(HddsProtos.ReplicationType.RATIS,
+ ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.THREE);
private static MiniOzoneClusterProvider clusterProvider;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index b30121c..222e352 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -114,7 +114,6 @@ import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.StringUtils;
import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
-import static org.apache.hadoop.hdds.client.ReplicationConfig.fromTypeAndFactor;
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
@@ -3636,8 +3635,8 @@ public abstract class TestOzoneRpcClientAbstract {
public void testHeadObject() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
- ReplicationConfig replicationConfig = fromTypeAndFactor(RATIS,
- HddsProtos.ReplicationFactor.THREE);
+ ReplicationConfig replicationConfig = ReplicationConfig
+ .fromProtoTypeAndFactor(RATIS, HddsProtos.ReplicationFactor.THREE);
String value = "sample value";
store.createVolume(volumeName);
@@ -3678,8 +3677,8 @@ public abstract class TestOzoneRpcClientAbstract {
private void createRequiredForVersioningTest(String volumeName,
String bucketName, String keyName, boolean versioning) throws Exception {
- ReplicationConfig replicationConfig = fromTypeAndFactor(RATIS,
- HddsProtos.ReplicationFactor.THREE);
+ ReplicationConfig replicationConfig = ReplicationConfig
+ .fromProtoTypeAndFactor(RATIS, HddsProtos.ReplicationFactor.THREE);
String value = "sample value";
store.createVolume(volumeName);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 29ef0b4..d64607e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -1507,7 +1507,7 @@ public class KeyManagerImpl implements KeyManager {
omPartInfoList.add(omPartInfo);
//if there are parts, use replication type from one of the parts
- replicationConfig = ReplicationConfig.fromTypeAndFactor(
+ replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(
partKeyInfo.getPartKeyInfo().getType(),
partKeyInfo.getPartKeyInfo().getFactor());
count++;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
index 314fa74..6c2a862 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
@@ -365,7 +365,7 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
.setModificationTime(keyArgs.getModificationTime())
.setDataSize(0)
.setReplicationConfig(ReplicationConfig
- .fromTypeAndFactor(keyArgs.getType(), keyArgs.getFactor()))
+ .fromProtoTypeAndFactor(keyArgs.getType(), keyArgs.getFactor()))
.setObjectID(objectId)
.setUpdateID(objectId);
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index 0bbeab9..e5fbcea 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -140,7 +140,7 @@ public abstract class OMKeyRequest extends OMClientRequest {
List<AllocatedBlock> allocatedBlocks;
try {
allocatedBlocks = scmClient.getBlockClient().allocateBlock(scmBlockSize,
- numBlocks, ReplicationConfig.fromTypeAndFactor(replicationType,
+ numBlocks, ReplicationConfig.fromProtoTypeAndFactor(replicationType,
replicationFactor),
omID, excludeList);
} catch (SCMException ex) {
@@ -647,7 +647,7 @@ public abstract class OMKeyRequest extends OMClientRequest {
// the key does not exist, create a new object.
// Blocks will be appended as version 0.
return createFileInfo(keyArgs, locations,
- ReplicationConfig.fromTypeAndFactor(
+ ReplicationConfig.fromProtoTypeAndFactor(
keyArgs.getType(), keyArgs.getFactor()),
keyArgs.getDataSize(), encInfo, prefixManager,
omBucketInfo, omPathInfo, transactionLogIndex, objectID);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
index 56e87e6..151564c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
@@ -172,7 +172,7 @@ public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
// initiate MPU.
final ReplicationConfig replicationConfig =
- ReplicationConfig.fromTypeAndFactor(
+ ReplicationConfig.fromProtoTypeAndFactor(
keyArgs.getType(), keyArgs.getFactor());
multipartKeyInfo = new OmMultipartKeyInfo.Builder()
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
index a6e2085..b6b7464 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
@@ -163,7 +163,7 @@ public class S3InitiateMultipartUploadRequestWithFSO
// also like this, even when key exists in a bucket, user can still
// initiate MPU.
final ReplicationConfig replicationConfig =
- ReplicationConfig.fromTypeAndFactor(
+ ReplicationConfig.fromProtoTypeAndFactor(
keyArgs.getType(), keyArgs.getFactor());
multipartKeyInfo = new OmMultipartKeyInfo.Builder()
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index cb8f9e5..f26aba8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -333,7 +333,7 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
new OmKeyInfo.Builder().setVolumeName(volumeName)
.setBucketName(bucketName).setKeyName(dbOpenKeyInfo.getKeyName())
.setReplicationConfig(
- ReplicationConfig.fromTypeAndFactor(type, factor))
+ ReplicationConfig.fromProtoTypeAndFactor(type, factor))
.setCreationTime(keyArgs.getModificationTime())
.setModificationTime(keyArgs.getModificationTime())
.setDataSize(dataSize)
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index 6326ea3..cd5421a 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@ -380,7 +380,7 @@ public final class TestOMRequestUtils {
.setDataSize(1000L)
.setReplicationConfig(
ReplicationConfig
- .fromTypeAndFactor(replicationType, replicationFactor))
+ .fromProtoTypeAndFactor(replicationType, replicationFactor))
.setObjectID(objectID)
.setUpdateID(objectID)
.build();
@@ -969,7 +969,7 @@ public final class TestOMRequestUtils {
.setModificationTime(Time.now())
.setDataSize(1000L)
.setReplicationConfig(ReplicationConfig
- .fromTypeAndFactor(replicationType, replicationFactor))
+ .fromProtoTypeAndFactor(replicationType, replicationFactor))
.setObjectID(objectID)
.setUpdateID(trxnLogIndex)
.setParentObjectID(parentID)
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index d08f104..2c3a465 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -85,6 +85,7 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
private ReplicationConfig replicationConfig;
private boolean securityEnabled;
private int configuredDnPort;
+ private OzoneConfiguration config;
/**
* Create new OzoneClientAdapter implementation.
@@ -166,6 +167,7 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
this.configuredDnPort = conf.getInt(
OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
+ this.config = conf;
}
@Override
@@ -209,7 +211,7 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
ReplicationConfig customReplicationConfig =
ReplicationConfig.adjustReplication(
- replicationConfig, replication
+ replicationConfig, replication, config
);
ozoneOutputStream =
bucket.createFile(key, 0, customReplicationConfig, overWrite,
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index a952a2b..2577105 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -103,6 +103,7 @@ public class BasicRootedOzoneClientAdapterImpl
private boolean securityEnabled;
private int configuredDnPort;
private BucketLayout defaultOFSBucketLayout;
+ private OzoneConfiguration config;
/**
* Create new OzoneClientAdapter implementation.
@@ -193,6 +194,7 @@ public class BasicRootedOzoneClientAdapterImpl
this.defaultOFSBucketLayout = BucketLayout.fromString(
conf.get(OzoneConfigKeys.OZONE_CLIENT_TEST_OFS_DEFAULT_BUCKET_LAYOUT,
OzoneConfigKeys.OZONE_CLIENT_TEST_OFS_BUCKET_LAYOUT_DEFAULT));
+ config = conf;
} finally {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
@@ -334,7 +336,8 @@ public class BasicRootedOzoneClientAdapterImpl
|| replication == ReplicationFactor.THREE.getValue()) {
ozoneOutputStream = bucket.createFile(key, 0,
- ReplicationConfig.adjustReplication(replicationConfig, replication),
+ ReplicationConfig.adjustReplication(
+ replicationConfig, replication, config),
overWrite, recursive);
} else {
ozoneOutputStream =
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
index 50aa48f..917dd83 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
@@ -193,7 +193,7 @@ public class BenchMarkContainerStateMap {
}
bh.consume(state.stateMap
.getMatchingContainerIDs(OPEN, OzoneConsts.OZONE,
- ReplicationConfig.fromTypeAndFactor(
+ ReplicationConfig.fromProtoTypeAndFactor(
ReplicationType.STAND_ALONE, ReplicationFactor.ONE)));
}
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CopyKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CopyKeyHandler.java
index b47f313..5c0aaa3 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CopyKeyHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CopyKeyHandler.java
@@ -25,7 +25,6 @@ import java.util.Map;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.ReplicationConfig;
-import org.apache.hadoop.hdds.client.ReplicationConfigValidator;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -39,10 +38,6 @@ import org.apache.hadoop.ozone.shell.bucket.BucketHandler;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT;
import picocli.CommandLine.Command;
import picocli.CommandLine.Option;
import picocli.CommandLine.Parameters;
@@ -85,19 +80,8 @@ public class CopyKeyHandler extends BucketHandler {
OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
OzoneBucket bucket = vol.getBucket(bucketName);
- if (replicationType == null) {
- replicationType = ReplicationType.valueOf(
- getConf()
- .get(OZONE_REPLICATION_TYPE, OZONE_REPLICATION_TYPE_DEFAULT));
- }
-
- if (replication == null) {
- replication = getConf().get(OZONE_REPLICATION, OZONE_REPLICATION_DEFAULT);
- }
-
ReplicationConfig replicationConfig =
- getConf().getObject(ReplicationConfigValidator.class).validate(
- ReplicationConfig.fromTypeAndString(replicationType, replication));
+ ReplicationConfig.parse(replicationType, replication, getConf());
OzoneKeyDetails keyDetail = bucket.getKey(fromKey);
Map<String, String> keyMetadata = new HashMap<>(keyDetail.getMetadata());
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
index c575b6e..b7fbdd1 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
@@ -28,7 +28,6 @@ import java.util.Map;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.ReplicationConfig;
-import org.apache.hadoop.hdds.client.ReplicationConfigValidator;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -41,10 +40,6 @@ import org.apache.hadoop.ozone.shell.OzoneAddress;
import org.apache.commons.codec.digest.DigestUtils;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT;
import picocli.CommandLine.Command;
import picocli.CommandLine.Option;
import picocli.CommandLine.Parameters;
@@ -88,19 +83,8 @@ public class PutKeyHandler extends KeyHandler {
}
}
- if (replicationType == null) {
- replicationType = ReplicationType.valueOf(
- getConf()
- .get(OZONE_REPLICATION_TYPE, OZONE_REPLICATION_TYPE_DEFAULT));
- }
-
- if (replication == null) {
- replication = getConf().get(OZONE_REPLICATION, OZONE_REPLICATION_DEFAULT);
- }
-
ReplicationConfig replicationConfig =
- getConf().getObject(ReplicationConfigValidator.class).validate(
- ReplicationConfig.fromTypeAndString(replicationType, replication));
+ ReplicationConfig.parse(replicationType, replication, getConf());
OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
OzoneBucket bucket = vol.getBucket(bucketName);
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org