You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by so...@apache.org on 2021/12/10 11:13:04 UTC

[ozone] 01/02: Updates to address compile issues after merge

This is an automated email from the ASF dual-hosted git repository.

sodonnell pushed a commit to branch HDDS-3816-ec
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 73bddf36d009983d0e5a38c0e6043e8ae24e585e
Merge: a1d7292 937a3d0
Author: S O'Donnell <so...@cloudera.com>
AuthorDate: Wed Dec 8 21:07:54 2021 +0000

    Updates to address compile issues after merge

 hadoop-hdds/client/pom.xml                         |    4 +-
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java  |   38 +-
 hadoop-hdds/common/pom.xml                         |    4 +-
 .../java/org/apache/hadoop/hdds/HddsUtils.java     |   71 ++
 .../hadoop/hdds/client/RatisReplicationConfig.java |   27 +-
 .../hdds/client/ReplicatedReplicationConfig.java}  |   23 +-
 .../hadoop/hdds/client/ReplicationConfig.java      |  116 ++-
 .../hdds/client/ReplicationConfigValidator.java    |    5 +-
 .../hdds/client/StandaloneReplicationConfig.java   |   23 +-
 .../org/apache/hadoop/hdds/ratis/RatisHelper.java  |    1 +
 .../apache/hadoop/hdds/scm/XceiverClientSpi.java   |    6 +-
 .../apache/hadoop/hdds/scm/pipeline/Pipeline.java  |   92 +-
 .../hadoop/hdds/utils/ResourceLimitCache.java      |    7 +-
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java   |    6 +
 .../common/src/main/resources/ozone-default.xml    |   18 +
 .../hadoop/hdds/client/TestReplicationConfig.java  |  276 ++++--
 .../hadoop/hdds/utils/TestResourceLimitCache.java  |   46 +-
 hadoop-hdds/config/pom.xml                         |    4 +-
 hadoop-hdds/container-service/pom.xml              |    4 +-
 .../container/common/helpers/ContainerUtils.java   |   68 --
 .../container/common/impl/HddsDispatcher.java      |    4 +-
 .../common/statemachine/DatanodeConfiguration.java |   43 +
 .../common/transport/server/XceiverServerGrpc.java |   52 +-
 .../server/ratis/ContainerStateMachine.java        |   73 +-
 .../container/keyvalue/KeyValueContainer.java      |    4 +
 .../common/helpers/TestContainerUtils.java         |    2 +-
 hadoop-hdds/docs/pom.xml                           |    4 +-
 hadoop-hdds/erasurecode/pom.xml                    |    4 +-
 hadoop-hdds/framework/pom.xml                      |    4 +-
 .../java/org/apache/hadoop/hdds/ExitManager.java   |   12 +-
 .../scm/protocol/ScmBlockLocationProtocol.java     |    2 +-
 hadoop-hdds/hadoop-dependency-client/pom.xml       |    4 +-
 hadoop-hdds/hadoop-dependency-server/pom.xml       |    4 +-
 hadoop-hdds/hadoop-dependency-test/pom.xml         |    4 +-
 hadoop-hdds/interface-admin/pom.xml                |    4 +-
 .../interface-admin/src/main/resources/proto.lock  |  504 ++++++++++
 hadoop-hdds/interface-client/pom.xml               |    4 +-
 .../interface-client/src/main/proto/hdds.proto     |   11 +
 .../interface-client/src/main/resources/proto.lock |  408 +++++++-
 hadoop-hdds/interface-server/pom.xml               |    4 +-
 .../interface-server/src/main/resources/proto.lock | 1024 ++++++++++++++++++++
 hadoop-hdds/pom.xml                                |    4 +-
 hadoop-hdds/server-scm/pom.xml                     |    4 +-
 .../scm/container/CloseContainerEventHandler.java  |   12 +-
 .../hdds/scm/container/ReplicationManager.java     |   10 +-
 .../scm/container/balancer/ContainerBalancer.java  |  209 ++--
 .../balancer/ContainerBalancerConfiguration.java   |   75 +-
 .../ContainerBalancerSelectionCriteria.java        |   22 +-
 .../scm/container/balancer/FindSourceGreedy.java   |  158 +++
 .../scm/container/balancer/FindSourceStrategy.java |   67 ++
 .../scm/container/balancer/FindTargetGreedy.java   |  121 ++-
 .../scm/container/balancer/FindTargetStrategy.java |   32 +-
 .../algorithms/SCMContainerPlacementCapacity.java  |    2 +-
 .../org/apache/hadoop/hdds/scm/ha/SCMContext.java  |   23 +-
 .../apache/hadoop/hdds/scm/ha/SCMStateMachine.java |    7 +
 .../scm/pipeline/BackgroundPipelineCreator.java    |    2 +-
 .../hdds/scm/pipeline/PipelineManagerImpl.java     |   25 +-
 .../hdds/scm/server/SCMClientProtocolServer.java   |    4 +-
 .../scm/server/upgrade/SCMUpgradeFinalizer.java    |    5 +-
 .../hadoop/hdds/scm/container/MockNodeManager.java |    6 +-
 .../container/balancer/TestContainerBalancer.java  |   74 ++
 .../TestSCMContainerPlacementCapacity.java         |    6 +-
 .../hdds/scm/node/TestContainerPlacement.java      |    2 +-
 .../hadoop/hdds/scm/node/TestDeadNodeHandler.java  |    6 +
 .../hadoop/hdds/scm/node/TestSCMNodeManager.java   |   11 +-
 .../scm/pipeline/MockRatisPipelineProvider.java    |    2 +-
 .../hdds/scm/pipeline/TestPipelineManagerImpl.java |   59 +-
 .../scm/pipeline/TestPipelineStateManagerImpl.java |    8 +-
 .../placement/TestContainerPlacement.java          |    4 +-
 hadoop-hdds/test-utils/pom.xml                     |    4 +-
 hadoop-hdds/tools/pom.xml                          |    4 +-
 .../hdds/scm/cli/datanode/UsageInfoSubcommand.java |   41 +-
 hadoop-ozone/client/pom.xml                        |    4 +-
 .../org/apache/hadoop/ozone/client/BucketArgs.java |   22 +-
 .../apache/hadoop/ozone/client/OzoneBucket.java    |   47 +-
 .../ozone/client/protocol/ClientProtocol.java      |    1 +
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |   62 +-
 .../hadoop/ozone/client/MockOmTransport.java       |    8 +-
 .../hadoop/ozone/client/rpc/RpcClientTest.java     |  217 +++++
 hadoop-ozone/common/pom.xml                        |    4 +-
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |   82 ++
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |   11 +
 .../hadoop/ozone/om/helpers/OMNodeDetails.java     |   50 +-
 .../hadoop/ozone/om/helpers/OmBucketInfo.java      |   31 +-
 .../ozone/om/helpers/OmMultipartKeyInfo.java       |    2 +-
 .../hadoop/ozone/om/helpers/OzoneFSUtils.java      |   17 +
 .../hadoop/ozone/om/helpers/ServiceInfo.java       |   41 +-
 .../hadoop/ozone/om/protocol/OMAdminProtocol.java  |   23 +-
 .../hadoop/ozone/om/protocol/OMConfiguration.java  |   92 ++
 .../protocolPB/OMAdminProtocolClientSideImpl.java  |  136 +++
 .../ozone/om/protocolPB/OMAdminProtocolPB.java     |   37 +
 ...OzoneManagerProtocolClientSideTranslatorPB.java |    4 +-
 hadoop-ozone/csi/pom.xml                           |    4 +-
 hadoop-ozone/datanode/pom.xml                      |    4 +-
 hadoop-ozone/dist/pom.xml                          |    4 +-
 .../dist/src/main/compose/ozone-mr/common-config   |    1 -
 hadoop-ozone/dist/src/main/compose/ozone/README.md |    6 -
 .../src/main/compose/ozone/docker-compose.yaml     |   10 -
 hadoop-ozone/dist/src/main/compose/ozone/run.sh    |    4 -
 hadoop-ozone/dist/src/main/compose/ozone/test.sh   |   29 +-
 .../src/main/compose/ozonesecure/docker-config     |    1 -
 .../dist/src/main/compose/xcompat/docker-config    |    1 +
 hadoop-ozone/dist/src/main/k8s/examples/testlib.sh |    5 +-
 .../main/smoketest/basic/ozone-shell-single.robot  |    2 +-
 .../src/main/smoketest/basic/ozone-shell.robot     |   15 +-
 .../dist/src/main/smoketest/createmrenv.robot      |    2 +-
 .../dist/src/main/smoketest/freon/generate.robot   |    9 +
 .../dist/src/main/smoketest/freon/validate.robot   |    9 +
 .../dist/src/main/smoketest/ozonefs/setup.robot    |   12 +-
 .../dist/src/main/smoketest/s3/objectdelete.robot  |    6 +-
 .../main/smoketest/security/ozone-secure-fs.robot  |   10 +-
 .../fault-injection-test/mini-chaos-tests/pom.xml  |    4 +-
 .../fault-injection-test/network-tests/pom.xml     |    2 +-
 hadoop-ozone/fault-injection-test/pom.xml          |    4 +-
 hadoop-ozone/insight/pom.xml                       |    4 +-
 hadoop-ozone/integration-test/pom.xml              |    4 +-
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java    |    4 +
 .../hadoop/fs/ozone/TestOzoneFileInterfaces.java   |   45 +
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |    7 +-
 .../fs/ozone/TestOzoneFileSystemMissingParent.java |    2 +
 .../fs/ozone/TestOzoneFileSystemWithLinks.java     |  248 +++++
 .../apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java  |    5 +
 .../hadoop/fs/ozone/contract/OzoneContract.java    |    3 +
 .../hdds/scm/pipeline/TestMultiRaftSetup.java      |    4 +-
 .../hadoop/hdds/upgrade/TestHDDSUpgrade.java       |    2 +-
 .../hadoop/ozone/MiniOzoneHAClusterImpl.java       |  150 ++-
 .../hadoop/ozone/TestOzoneConfigurationFields.java |    5 +
 .../rpc/TestContainerStateMachineFailures.java     |   20 +-
 .../ozone/client/rpc/TestECKeyOutputStream.java    |    4 +-
 .../client/rpc/TestOzoneRpcClientAbstract.java     |    9 +-
 .../ozone/freon/TestHadoopDirTreeGenerator.java    |    5 +-
 .../ozone/freon/TestHadoopNestedDirGenerator.java  |    3 +
 .../apache/hadoop/ozone/om/TestBucketOwner.java    |  244 +++++
 .../ozone/om/TestOMStartupWithBucketLayout.java    |  161 +++
 .../apache/hadoop/ozone/om/TestObjectStore.java    |  130 +++
 .../org/apache/hadoop/ozone/om/TestOmAcls.java     |    6 +-
 .../hadoop/ozone/om/TestOzoneManagerBootstrap.java |  184 +++-
 hadoop-ozone/interface-client/pom.xml              |    4 +-
 .../src/main/proto/OMAdminProtocol.proto           |   65 ++
 .../src/main/proto/OmClientProtocol.proto          |    4 +-
 .../interface-client/src/main/resources/proto.lock |  826 +++++++++++++++-
 hadoop-ozone/interface-storage/pom.xml             |    4 +-
 hadoop-ozone/ozone-manager/pom.xml                 |    4 +-
 .../apache/hadoop/ozone/om/BucketManagerImpl.java  |   58 ++
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |    2 +-
 .../apache/hadoop/ozone/om/OMPolicyProvider.java   |    5 +-
 .../apache/hadoop/ozone/om/OMStarterInterface.java |    2 +-
 .../org/apache/hadoop/ozone/om/OzoneAclUtils.java  |  125 +++
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  367 ++++---
 .../hadoop/ozone/om/OzoneManagerStarter.java       |   59 +-
 .../apache/hadoop/ozone/om/OzoneManagerUtils.java  |  125 +++
 .../apache/hadoop/ozone/om/ha/OMHANodeDetails.java |    6 +-
 .../ozone/om/ratis/OzoneManagerRatisServer.java    |   24 +-
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |  219 +----
 .../hadoop/ozone/om/request/OMClientRequest.java   |   90 +-
 .../ozone/om/request/OMKeyRequestFactory.java      |  139 +++
 .../om/request/file/OMDirectoryCreateRequest.java  |   29 +-
 .../ozone/om/request/file/OMFileCreateRequest.java |   13 +
 .../om/request/key/OMAllocateBlockRequest.java     |   14 +
 .../ozone/om/request/key/OMKeyCommitRequest.java   |   13 +
 .../ozone/om/request/key/OMKeyCreateRequest.java   |   14 +
 .../ozone/om/request/key/OMKeyDeleteRequest.java   |   14 +
 .../ozone/om/request/key/OMKeyRenameRequest.java   |   14 +
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  |    5 +-
 .../om/request/key/OMPathsPurgeRequestWithFSO.java |    5 +-
 .../ozone/om/request/key/acl/OMKeyAclRequest.java  |   25 +-
 .../om/request/key/acl/OMKeyAclRequestWithFSO.java |    7 +-
 .../om/request/key/acl/OMKeyAddAclRequest.java     |    5 +-
 .../request/key/acl/OMKeyAddAclRequestWithFSO.java |    5 +-
 .../om/request/key/acl/OMKeyRemoveAclRequest.java  |    4 +-
 .../key/acl/OMKeyRemoveAclRequestWithFSO.java      |    6 +-
 .../om/request/key/acl/OMKeySetAclRequest.java     |    4 +-
 .../request/key/acl/OMKeySetAclRequestWithFSO.java |    5 +-
 .../S3InitiateMultipartUploadRequest.java          |   15 +-
 .../multipart/S3MultipartUploadAbortRequest.java   |   15 +
 .../S3MultipartUploadCommitPartRequest.java        |   15 +
 .../S3MultipartUploadCompleteRequest.java          |   16 +-
 .../protocolPB/OMAdminProtocolServerSideImpl.java  |   66 ++
 .../protocolPB/OzoneManagerRequestHandler.java     |    2 +-
 .../hadoop/ozone/protocolPB/RequestHandler.java    |    4 +-
 .../hadoop/ozone/om/TestOzoneManagerStarter.java   |    4 +-
 .../ozone/om/request/TestOMRequestUtils.java       |    4 +-
 .../ozone/om/request/key/TestOMKeyAclRequest.java  |   10 +-
 .../om/request/key/TestOMKeyAclRequestWithFSO.java |   16 +-
 .../s3/multipart/TestS3MultipartRequest.java       |    7 +-
 .../ozone/om/response/TestCleanupTableInfo.java    |    7 +-
 hadoop-ozone/ozonefs-common/pom.xml                |    4 +-
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |   16 +-
 .../ozone/BasicRootedOzoneClientAdapterImpl.java   |   14 +-
 .../apache/hadoop/fs/ozone/OzoneClientUtils.java   |   67 ++
 hadoop-ozone/ozonefs-hadoop2/pom.xml               |    4 +-
 hadoop-ozone/ozonefs-hadoop3/pom.xml               |    4 +-
 hadoop-ozone/ozonefs-shaded/pom.xml                |    4 +-
 hadoop-ozone/ozonefs/pom.xml                       |    4 +-
 hadoop-ozone/pom.xml                               |    4 +-
 hadoop-ozone/recon-codegen/pom.xml                 |    2 +-
 .../recon/schema/ContainerSchemaDefinition.java    |    3 +-
 hadoop-ozone/recon/pom.xml                         |    2 +-
 .../codec/ContainerReplicaHistoryListCodec.java    |   45 +-
 .../ozone/recon/fsck/ContainerHealthStatus.java    |   17 +-
 .../persistence/ContainerHealthSchemaManager.java  |   12 +-
 .../ozone/recon/persistence/ContainerHistory.java  |    9 +-
 .../ozone/recon/scm/ContainerReplicaHistory.java   |   27 +-
 .../recon/scm/ContainerReplicaHistoryList.java     |   32 +-
 .../ozone/recon/scm/ReconContainerManager.java     |   21 +-
 .../scm/ReconStorageContainerManagerFacade.java    |    5 +-
 .../impl/ReconContainerMetadataManagerImpl.java    |    4 +-
 .../ozone/recon/spi/impl/ReconDBDefinition.java    |   13 +-
 .../ozone/recon/api/TestContainerEndpoint.java     |   26 +-
 .../ozone/recon/fsck/TestContainerHealthTask.java  |   16 +-
 .../ozone/recon/scm/TestReconContainerManager.java |   13 +-
 .../ozone/recon/scm/TestReconPipelineManager.java  |    5 +
 hadoop-ozone/s3gateway/pom.xml                     |    4 +-
 .../hadoop/ozone/s3/OzoneClientProducer.java       |    5 +
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   |    9 +-
 .../apache/hadoop/ozone/s3/util/S3StorageType.java |   22 +-
 .../s3/endpoint/TestAbortMultipartUpload.java      |    2 +
 .../s3/endpoint/TestInitiateMultipartUpload.java   |    2 +
 .../hadoop/ozone/s3/endpoint/TestListParts.java    |    2 +
 .../s3/endpoint/TestMultipartUploadComplete.java   |    2 +
 .../s3/endpoint/TestMultipartUploadWithCopy.java   |    2 +
 .../hadoop/ozone/s3/endpoint/TestObjectDelete.java |    2 +
 .../hadoop/ozone/s3/endpoint/TestObjectGet.java    |    2 +
 .../hadoop/ozone/s3/endpoint/TestObjectHead.java   |    2 +
 .../hadoop/ozone/s3/endpoint/TestObjectPut.java    |    2 +
 .../hadoop/ozone/s3/endpoint/TestPartUpload.java   |    2 +
 .../ozone/s3/endpoint/TestPermissionCheck.java     |    4 +
 hadoop-ozone/tools/pom.xml                         |    4 +-
 .../apache/hadoop/ozone/debug/PrefixParser.java    |    8 +-
 .../hadoop/ozone/freon/DatanodeChunkGenerator.java |    8 +-
 .../hadoop/ozone/freon/DatanodeChunkValidator.java |  155 ++-
 .../java/org/apache/hadoop/ozone/freon/Freon.java  |    3 +-
 .../hadoop/ozone/freon/SCMThroughputBenchmark.java |  908 +++++++++++++++++
 .../ozone/genesis/BenchMarkContainerStateMap.java  |    2 +-
 .../ozone/shell/bucket/CreateBucketHandler.java    |   19 +-
 .../hadoop/ozone/shell/keys/CopyKeyHandler.java    |   18 +-
 .../hadoop/ozone/shell/keys/PutKeyHandler.java     |   18 +-
 pom.xml                                            |    8 +-
 238 files changed, 8854 insertions(+), 1532 deletions(-)

diff --cc hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java
index a521195,61aa7fa..8e368fc
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java
@@@ -64,45 -71,15 +71,37 @@@ public interface ReplicationConfig 
    }
  
    static ReplicationConfig getDefault(ConfigurationSource config) {
-     String replication = config.get(OzoneConfigKeys.OZONE_REPLICATION);
-     String replType = config.get(OzoneConfigKeys.OZONE_REPLICATION_TYPE);
-     ReplicationConfig replicationConfig = null;
-     if (replication != null && replType != null) {
-       replicationConfig = ReplicationConfig
-           .fromTypeAndString(ReplicationType.valueOf(replType), replication);
-     }
-     return replicationConfig;
+     String replication =
+         config.get(OZONE_REPLICATION, OZONE_REPLICATION_DEFAULT);
+     return parse(null, replication, config);
    }
  
 +  /**
 +   * Helper method to serialize from proto.
 +   * <p>
 +   * This uses either the old type/factor or the new ecConfig depends on the
 +   * type.
 +   */
 +  static ReplicationConfig fromProto(
 +      HddsProtos.ReplicationType type,
 +      HddsProtos.ReplicationFactor factor,
 +      HddsProtos.ECReplicationConfig ecConfig) {
 +    switch (type) {
 +    case EC:
 +      return new ECReplicationConfig(ecConfig);
 +    case RATIS:
 +    case STAND_ALONE:
-       return fromTypeAndFactor(type, factor);
++      return fromProtoTypeAndFactor(type, factor);
 +    default:
 +      throw new UnsupportedOperationException(
 +          "Not supported replication: " + type);
 +    }
 +  }
 +
    static HddsProtos.ReplicationFactor getLegacyFactor(
        ReplicationConfig replicationConfig) {
-     if (replicationConfig instanceof RatisReplicationConfig) {
-       return ((RatisReplicationConfig) replicationConfig)
-           .getReplicationFactor();
-     } else if (replicationConfig instanceof StandaloneReplicationConfig) {
-       return ((StandaloneReplicationConfig) replicationConfig)
+     if (replicationConfig instanceof ReplicatedReplicationConfig) {
+       return ((ReplicatedReplicationConfig) replicationConfig)
            .getReplicationFactor();
      }
      throw new UnsupportedOperationException(
@@@ -113,39 -91,71 +113,73 @@@
    /**
     * Create new replication config with adjusted replication factor.
     * <p>
-    * Used by hadoop file system. Some replication scheme (like EC) may not
+    * Used by hadoop file system. Some replication schemes (like EC) may not
     * support changing the replication.
+    * <p>
+    * Based on the provided configuration the adjusted ReplicationConfig is
+    * validated against the ozone.replication.allowed-configs property, and if
+    * the new config is not allowed the method throws an
+    * IllegalArgumentException.
     */
    static ReplicationConfig adjustReplication(
-       ReplicationConfig replicationConfig, short replication) {
-     switch (replicationConfig.getReplicationType()) {
-     case RATIS:
-       return new RatisReplicationConfig(
-           org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor
-               .valueOf(replication));
-     case STAND_ALONE:
-       return new StandaloneReplicationConfig(
-           org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor
-               .valueOf(replication));
-     default:
-       return replicationConfig;
-     }
+       ReplicationConfig config, short replication, ConfigurationSource conf) {
+     return parse(
+         ReplicationType.valueOf(config.getReplicationType().toString()),
+         Short.toString(replication), conf);
    }
  
-   static ReplicationConfig fromTypeAndString(ReplicationType replicationType,
-       String replication) {
-     switch (replicationType) {
+   /**
+    * Parses the string representation of the replication configuration that is
+    * defined by the ReplicationType parameter.
+    * The configuration object is necessary to check if the parsed
+    * ReplicationConfig object is allowed based on the
+    * ozone.replication.allowed-configs property.
+    * @param type the ReplicationType to parse from the replication string
+    * @param replication the replication String that for example contains the
+    *                    replication factor for RATIS replication.
+    * @param config the current Ozone configuration to apply validation on the
+    *               parsed object.
+    * @return a validated ReplicationConfig object that is allowed based on the
+    *         system's configuration.
+    * @throws IllegalArgumentException if the parsed ReplicationConfig is not
+    *         allowed by the ozone.replication.allowed-configs property, or
+    *         if the give replication type or replication can not be parsed.
+    * @throws NullPointerException if the ReplicationConfig was not created
+    *         for the type.
+    */
+   static ReplicationConfig parse(ReplicationType type, String replication,
+       ConfigurationSource config) {
+     if (type == null) {
+       type = ReplicationType.valueOf(
+           config.get(OZONE_REPLICATION_TYPE, OZONE_REPLICATION_TYPE_DEFAULT));
+     }
+     replication = Objects.toString(replication,
+         config.get(OZONE_REPLICATION, OZONE_REPLICATION_DEFAULT));
+ 
+     ReplicationConfig replicationConfig;
+     switch (type) {
      case RATIS:
-       return new RatisReplicationConfig(replication);
      case STAND_ALONE:
-       return new StandaloneReplicationConfig(replication);
+       ReplicationFactor factor;
+       try {
+         factor = ReplicationFactor.valueOf(Integer.parseInt(replication));
+       } catch (NumberFormatException ex) {
+         factor = ReplicationFactor.valueOf(replication);
+       }
+       replicationConfig = fromTypeAndFactor(type, factor);
+       break;
 +    case EC:
 +      return new ECReplicationConfig(replication);
      default:
-       throw new UnsupportedOperationException(
-           "String based replication config initialization is not supported for "
-               + replicationType);
+       throw new RuntimeException("Replication type" + type + " can not"
+           + "be parsed.");
      }
+ 
+     ReplicationConfigValidator validator =
+         config.getObject(ReplicationConfigValidator.class);
+     validator.validate(replicationConfig);
+ 
+     return replicationConfig;
    }
  
    /**
diff --cc hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index 2163539,044f151..2d14d4e
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@@ -286,61 -261,11 +286,15 @@@ public final class Pipeline 
      return replicationConfig;
    }
  
-   public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline)
-       throws UnknownPipelineStateException {
-     Preconditions.checkNotNull(pipeline, "Pipeline is null");
- 
-     Map<DatanodeDetails, Integer> nodes = new LinkedHashMap<>();
-     int index = 0;
-     int repIndexListLength = pipeline.getMemberReplicaIndexesCount();
-     for (DatanodeDetailsProto member : pipeline.getMembersList()) {
-       int repIndex = 0;
-       if (index < repIndexListLength) {
-         repIndex = pipeline.getMemberReplicaIndexes(index);
-       }
-       nodes.put(DatanodeDetails.getFromProtoBuf(member), repIndex);
-       index++;
-     }
-     UUID leaderId = null;
-     if (pipeline.hasLeaderID128()) {
-       HddsProtos.UUID uuid = pipeline.getLeaderID128();
-       leaderId = new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits());
-     } else if (pipeline.hasLeaderID() &&
-         StringUtils.isNotEmpty(pipeline.getLeaderID())) {
-       leaderId = UUID.fromString(pipeline.getLeaderID());
-     }
- 
-     UUID suggestedLeaderId = null;
-     if (pipeline.hasSuggestedLeaderID()) {
-       HddsProtos.UUID uuid = pipeline.getSuggestedLeaderID();
-       suggestedLeaderId =
-           new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits());
-     }
- 
-     final ReplicationConfig config = ReplicationConfig
-         .fromProto(pipeline.getType(), pipeline.getFactor(),
-             pipeline.getEcReplicationConfig());
-     return new Builder().setId(PipelineID.getFromProtobuf(pipeline.getId()))
-         .setReplicationConfig(config)
-         .setState(PipelineState.fromProtobuf(pipeline.getState()))
-         .setNodes(new ArrayList<>(nodes.keySet()))
-         .setReplicaIndexes(nodes)
-         .setLeaderId(leaderId)
-         .setSuggestedLeaderId(suggestedLeaderId)
-         .setNodesInOrder(pipeline.getMemberOrdersList())
-         .setCreateTimestamp(pipeline.getCreationTimeStamp())
-         .build();
-   }
- 
    public HddsProtos.Pipeline getProtobufMessage(int clientVersion)
        throws UnknownPipelineStateException {
 +
      List<HddsProtos.DatanodeDetailsProto> members = new ArrayList<>();
 +    List<Integer> memberReplicaIndexes = new ArrayList<>();
 +
      for (DatanodeDetails dn : nodeStatus.keySet()) {
        members.add(dn.toProto(clientVersion));
 +      memberReplicaIndexes.add(replicaIndexes.getOrDefault(dn, 0));
      }
  
      HddsProtos.Pipeline.Builder builder = HddsProtos.Pipeline.newBuilder()
@@@ -394,6 -313,43 +348,52 @@@
      return builder.build();
    }
  
+   public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline)
+       throws UnknownPipelineStateException {
+     Preconditions.checkNotNull(pipeline, "Pipeline is null");
+ 
 -    List<DatanodeDetails> nodes = new ArrayList<>();
++    Map<DatanodeDetails, Integer> nodes = new LinkedHashMap<>();
++    int index = 0;
++    int repIndexListLength = pipeline.getMemberReplicaIndexesCount();
+     for (DatanodeDetailsProto member : pipeline.getMembersList()) {
 -      nodes.add(DatanodeDetails.getFromProtoBuf(member));
++      int repIndex = 0;
++      if (index < repIndexListLength) {
++        repIndex = pipeline.getMemberReplicaIndexes(index);
++      }
++      nodes.put(DatanodeDetails.getFromProtoBuf(member), repIndex);
++      index++;
+     }
+     UUID leaderId = null;
+     if (pipeline.hasLeaderID128()) {
+       HddsProtos.UUID uuid = pipeline.getLeaderID128();
+       leaderId = new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits());
+     } else if (pipeline.hasLeaderID() &&
+         StringUtils.isNotEmpty(pipeline.getLeaderID())) {
+       leaderId = UUID.fromString(pipeline.getLeaderID());
+     }
+ 
+     UUID suggestedLeaderId = null;
+     if (pipeline.hasSuggestedLeaderID()) {
+       HddsProtos.UUID uuid = pipeline.getSuggestedLeaderID();
+       suggestedLeaderId =
+           new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits());
+     }
+ 
+     final ReplicationConfig config = ReplicationConfig
 -        .fromProtoTypeAndFactor(pipeline.getType(), pipeline.getFactor());
++        .fromProto(pipeline.getType(), pipeline.getFactor(),
++            pipeline.getEcReplicationConfig());
+     return new Builder().setId(PipelineID.getFromProtobuf(pipeline.getId()))
+         .setReplicationConfig(config)
+         .setState(PipelineState.fromProtobuf(pipeline.getState()))
 -        .setNodes(nodes)
++        .setNodes(new ArrayList<>(nodes.keySet()))
++        .setReplicaIndexes(nodes)
+         .setLeaderId(leaderId)
+         .setSuggestedLeaderId(suggestedLeaderId)
+         .setNodesInOrder(pipeline.getMemberOrdersList())
+         .setCreateTimestamp(pipeline.getCreationTimeStamp())
+         .build();
+   }
+ 
    @Override
    public boolean equals(Object o) {
      if (this == o) {
diff --cc hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
index a85415e,9adf8f7..6e6f7b5
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
@@@ -27,13 -33,41 +33,40 @@@ import static org.junit.Assert.assertTh
  /**
   * Test replicationConfig.
   */
+ @RunWith(Parameterized.class)
  public class TestReplicationConfig {
  
+   @SuppressWarnings("checkstyle:VisibilityModifier")
+   @Parameterized.Parameter()
+   public String type;
+ 
+   @SuppressWarnings("checkstyle:VisibilityModifier")
+   @Parameterized.Parameter(1)
+   public String factor;
+ 
+   @SuppressWarnings("checkstyle:VisibilityModifier")
+   @Parameterized.Parameter(2)
+   public Class<?> replicationConfigClass;
+ 
+   @Parameterized.Parameters(name = "{0}/{1}")
+   public static Object[][] parameters() {
+     return new Object[][] {
+         {"RATIS", "ONE", RatisReplicationConfig.class },
+         {"RATIS", "THREE", RatisReplicationConfig.class},
+         {"STAND_ALONE", "ONE", StandaloneReplicationConfig.class},
+         {"STAND_ALONE", "THREE", StandaloneReplicationConfig.class}
+     };
+   }
+ 
    @Test
 -  public void testGetDefaultShouldCreateReplicationConfigFromDefaultConf() {
 +  public void testGetDefaultShouldReturnNullIfNotSetClientSide() {
      OzoneConfiguration conf = new OzoneConfiguration();
+ 
      ReplicationConfig replicationConfig = ReplicationConfig.getDefault(conf);
-     Assert.assertNull(replicationConfig);
 -
+     validate(replicationConfig,
+         org.apache.hadoop.hdds.client.ReplicationType.RATIS,
+         org.apache.hadoop.hdds.client.ReplicationFactor.THREE,
+         RatisReplicationConfig.class);
    }
  
    @Test
diff --cc hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
index 521d563,032705d..b0ed3be
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
@@@ -46,8 -46,6 +46,7 @@@ import org.apache.hadoop.ozone.OzoneCon
  import org.apache.hadoop.ozone.container.common.impl.ContainerData;
  import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
  import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
- import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  import org.yaml.snakeyaml.Yaml;
diff --cc hadoop-hdds/erasurecode/pom.xml
index 40149fe,dbf5df3..ca2e3cb
--- a/hadoop-hdds/erasurecode/pom.xml
+++ b/hadoop-hdds/erasurecode/pom.xml
@@@ -19,14 -19,13 +19,14 @@@ https://maven.apache.org/xsd/maven-4.0.
    <modelVersion>4.0.0</modelVersion>
    <parent>
      <groupId>org.apache.ozone</groupId>
 -    <artifactId>ozone</artifactId>
 +    <artifactId>hdds</artifactId>
-     <version>1.2.0-SNAPSHOT</version>
+     <version>1.3.0-SNAPSHOT</version>
    </parent>
 -  <artifactId>ozone-client</artifactId>
 +  <artifactId>hdds-erasurecode</artifactId>
-   <version>1.2.0-SNAPSHOT</version>
+   <version>1.3.0-SNAPSHOT</version>
 -  <description>Apache Ozone Client</description>
 -  <name>Apache Ozone Client</name>
 +  <description>Apache Ozone Distributed Data Store Earsurecode utils
 +  </description>
 +  <name>Apache Ozone HDDS Erasurecode</name>
    <packaging>jar</packaging>
  
    <dependencies>
diff --cc hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
index 62f61f6,43d28f0..6876059
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
@@@ -80,15 -80,15 +82,16 @@@ public final class BucketArgs 
     * @param sourceBucket
     * @param quotaInBytes Bucket quota in bytes.
     * @param quotaInNamespace Bucket quota in counts.
 -   * @param bucketLayout Bucket Layouts.
 +   * @param bucketLayout bucket layout.
+    * @param owner owner of the bucket.
 +   * @param defaultReplicationConfig default replication config.
     */
    @SuppressWarnings("parameternumber")
    private BucketArgs(Boolean versioning, StorageType storageType,
        List<OzoneAcl> acls, Map<String, String> metadata,
        String bucketEncryptionKey, String sourceVolume, String sourceBucket,
        long quotaInBytes, long quotaInNamespace, BucketLayout bucketLayout,
-       DefaultReplicationConfig defaultReplicationConfig) {
 -      String owner) {
++      String owner, DefaultReplicationConfig defaultReplicationConfig) {
      this.acls = acls;
      this.versioning = versioning;
      this.storageType = storageType;
@@@ -99,7 -99,7 +102,8 @@@
      this.quotaInBytes = quotaInBytes;
      this.quotaInNamespace = quotaInNamespace;
      this.bucketLayout = bucketLayout;
+     this.owner = owner;
 +    this.defaultReplicationConfig = defaultReplicationConfig;
    }
  
    /**
@@@ -205,7 -204,7 +216,8 @@@
      private long quotaInBytes;
      private long quotaInNamespace;
      private BucketLayout bucketLayout;
+     private String owner;
 +    private DefaultReplicationConfig defaultReplicationConfig;
  
      public Builder() {
        metadata = new HashMap<>();
@@@ -263,13 -262,11 +275,17 @@@
        return this;
      }
  
+     public BucketArgs.Builder setOwner(String ownerName) {
+       owner = ownerName;
+       return this;
+     }
+ 
 +    public BucketArgs.Builder setDefaultReplicationConfig(
 +        DefaultReplicationConfig defaultReplConfig) {
 +      defaultReplicationConfig = defaultReplConfig;
 +      return this;
 +    }
 +
- 
      /**
       * Constructs the BucketArgs.
       * @return instance of BucketArgs.
@@@ -277,7 -274,7 +293,7 @@@
      public BucketArgs build() {
        return new BucketArgs(versioning, storageType, acls, metadata,
            bucketEncryptionKey, sourceVolume, sourceBucket, quotaInBytes,
-           quotaInNamespace, bucketLayout, defaultReplicationConfig);
 -          quotaInNamespace, bucketLayout, owner);
++          quotaInNamespace, bucketLayout, owner, defaultReplicationConfig);
      }
    }
  }
diff --cc hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 37098a9,23cf922..38ee2a7
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@@ -223,39 -225,22 +227,54 @@@ public class OzoneBucket extends WithMe
      this.bucketLayout = bucketLayout;
    }
  
 +  @SuppressWarnings("parameternumber")
 +  public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy,
 +      String volumeName, String bucketName, StorageType storageType,
 +      Boolean versioning, long creationTime, long modificationTime,
 +      Map<String, String> metadata, String encryptionKeyName,
 +      String sourceVolume, String sourceBucket, long usedBytes,
 +      long usedNamespace, long quotaInBytes, long quotaInNamespace,
-       BucketLayout bucketLayout,
++      BucketLayout bucketLayout, String owner,
 +      DefaultReplicationConfig defaultReplicationConfig) {
 +    this(conf, proxy, volumeName, bucketName, storageType, versioning,
 +        creationTime, modificationTime, metadata, encryptionKeyName,
 +        sourceVolume, sourceBucket, usedBytes, usedNamespace, quotaInBytes,
-         quotaInNamespace);
++        quotaInNamespace, bucketLayout, owner);
 +    this.bucketLayout = bucketLayout;
 +    if (defaultReplicationConfig != null) {
 +      this.defaultReplication =
 +          defaultReplicationConfig.getType() == ReplicationType.EC ?
 +              defaultReplicationConfig.getEcReplicationConfig() :
 +              ReplicationConfig
 +                  .fromTypeAndFactor(defaultReplicationConfig.getType(),
 +                      defaultReplicationConfig.getFactor());
 +    } else {
 +      // This can happen when talk to old server. So, using old client side
 +      // defaults.
-       this.defaultReplication = ReplicationConfig.fromTypeAndString(
++      this.defaultReplication = ReplicationConfig.parse(
 +          ReplicationType.valueOf(
 +              conf.get(OzoneConfigKeys.OZONE_REPLICATION_TYPE,
 +                  OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT)),
 +          conf.get(OzoneConfigKeys.OZONE_REPLICATION,
-               OzoneConfigKeys.OZONE_REPLICATION_DEFAULT));
++              OzoneConfigKeys.OZONE_REPLICATION_DEFAULT), conf);
 +    }
 +  }
  
+   @SuppressWarnings("checkstyle:ParameterNumber")
+   public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy,
+        String volumeName, String bucketName, StorageType storageType,
+        Boolean versioning, long creationTime, long modificationTime,
+        Map<String, String> metadata, String encryptionKeyName,
+        String sourceVolume, String sourceBucket, long usedBytes,
+        long usedNamespace, long quotaInBytes, long quotaInNamespace,
+        BucketLayout bucketLayout, String owner) {
+     this(conf, proxy, volumeName, bucketName, storageType, versioning,
+         creationTime, modificationTime, metadata, encryptionKeyName,
+         sourceVolume, sourceBucket, usedBytes, usedNamespace, quotaInBytes,
+         quotaInNamespace, bucketLayout);
+     this.owner = owner;
+   }
+ 
    /**
     * Constructs OzoneBucket instance.
     * @param conf Configuration object.
diff --cc hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 7fdb8fc,9ca8693..8a5ab08
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@@ -510,15 -549,10 +553,16 @@@ public class RpcClient implements Clien
        builder.setBucketEncryptionKey(bek);
      }
  
 +    DefaultReplicationConfig defaultReplicationConfig =
 +        bucketArgs.getDefaultReplicationConfig();
 +    if (defaultReplicationConfig != null) {
 +      builder.setDefaultReplicationConfig(defaultReplicationConfig);
 +    }
 +
-     LOG.info("Creating Bucket: {}/{}, with Versioning {} and " +
-             "Storage Type set to {} and Encryption set to {} ",
-         volumeName, bucketName, isVersionEnabled, storageType, bek != null);
+     LOG.info("Creating Bucket: {}/{}, with {} as owner and Versioning {} and " +
+         "Storage Type set to {} and Encryption set to {} ",
+         volumeName, bucketName, owner, isVersionEnabled,
+         storageType, bek != null);
      ozoneManagerClient.createBucket(builder.build());
    }
  
@@@ -759,7 -778,7 +803,8 @@@
          bucketInfo.getQuotaInBytes(),
          bucketInfo.getQuotaInNamespace(),
          bucketInfo.getBucketLayout(),
 -        bucketInfo.getOwner()
++        bucketInfo.getOwner(),
 +        bucketInfo.getDefaultReplicationConfig()
      );
    }
  
diff --cc hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockOmTransport.java
index 0a2fc41,17a7f6b..be16e1f
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockOmTransport.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockOmTransport.java
@@@ -17,12 -17,6 +17,13 @@@
   */
  package org.apache.hadoop.ozone.client;
  
 +import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
 +import org.apache.hadoop.hdds.client.ECReplicationConfig;
 +import org.apache.hadoop.hdds.client.ReplicationConfig;
 +import org.apache.hadoop.hdds.client.ReplicationFactor;
 +import org.apache.hadoop.hdds.client.ReplicationType;
++import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 +import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
  import org.apache.hadoop.io.Text;
  import org.apache.hadoop.ozone.om.protocolPB.OmTransport;
  import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
@@@ -255,28 -189,9 +253,26 @@@ public class MockOmTransport implement
    }
  
    private InfoBucketResponse infoBucket(InfoBucketRequest infoBucketRequest) {
 +    BucketInfo bucketInfo = buckets.get(infoBucketRequest.getVolumeName())
 +        .get(infoBucketRequest.getBucketName());
 +    if(!bucketInfo.hasDefaultReplicationConfig()) {
 +      final ReplicationConfig replicationConfig = ReplicationConfig
-           .fromTypeAndString(ReplicationType
-                   .valueOf(OZONE_SERVER_DEFAULT_REPLICATION_TYPE_DEFAULT),
-               OZONE_SERVER_DEFAULT_REPLICATION_DEFAULT);
++          .getDefault(new OzoneConfiguration());
 +
 +      bucketInfo = bucketInfo.toBuilder().setDefaultReplicationConfig(
 +          new DefaultReplicationConfig(
 +              ReplicationType.fromProto(replicationConfig.getReplicationType()),
 +              replicationConfig
 +                  .getReplicationType() != HddsProtos.ReplicationType.EC ?
 +                  ReplicationFactor
 +                      .valueOf(replicationConfig.getRequiredNodes()) :
 +                  null, replicationConfig
 +              .getReplicationType() == HddsProtos.ReplicationType.EC ?
 +              (ECReplicationConfig) replicationConfig :
 +              null).toProto()).build();
 +    }
      return InfoBucketResponse.newBuilder()
 -        .setBucketInfo(buckets.get(infoBucketRequest.getVolumeName())
 -            .get(infoBucketRequest.getBucketName()))
 +        .setBucketInfo(bucketInfo)
          .build();
    }
  
diff --cc hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
index a1da197,786bb74..a843490
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
@@@ -118,8 -109,8 +120,10 @@@ public final class OmBucketInfo extend
     * @param usedBytes - Bucket Quota Usage in bytes.
     * @param quotaInBytes Bucket quota in bytes.
     * @param quotaInNamespace Bucket quota in counts.
 -   * @param bucketLayout Bucket Layout.
 +   * @param bucketLayout bucket layout.
+    * @param owner owner of the bucket.
 +   * @param defaultReplicationConfig default replication config.
++   * @param bucketLayout Bucket Layout.
     */
    @SuppressWarnings("checkstyle:ParameterNumber")
    private OmBucketInfo(String volumeName,
@@@ -140,7 -131,7 +144,8 @@@
        long quotaInBytes,
        long quotaInNamespace,
        BucketLayout bucketLayout,
 -      String owner) {
++      String owner,
 +      DefaultReplicationConfig defaultReplicationConfig) {
      this.volumeName = volumeName;
      this.bucketName = bucketName;
      this.acls = acls;
@@@ -159,7 -150,7 +164,8 @@@
      this.quotaInBytes = quotaInBytes;
      this.quotaInNamespace = quotaInNamespace;
      this.bucketLayout = bucketLayout;
+     this.owner = owner;
 +    this.defaultReplicationConfig = defaultReplicationConfig;
    }
  
    /**
@@@ -391,20 -372,7 +401,21 @@@
          .setQuotaInBytes(quotaInBytes)
          .setQuotaInNamespace(quotaInNamespace)
          .setBucketLayout(bucketLayout)
 -        .setOwner(owner);
++        .setOwner(owner)
 +        .setDefaultReplicationConfig(defaultReplicationConfig);
 +  }
 +
 +  public void setDefaultReplicationConfig(ReplicationConfig replicationConfig) {
 +    this.defaultReplicationConfig = new DefaultReplicationConfig(
 +        ReplicationType.fromProto(replicationConfig.getReplicationType()),
 +        replicationConfig
 +            .getReplicationType() == HddsProtos.ReplicationType.EC ?
 +            null :
 +            ReplicationFactor.valueOf(replicationConfig.getRequiredNodes()),
 +        replicationConfig
 +            .getReplicationType() == HddsProtos.ReplicationType.EC ?
 +            ((ECReplicationConfig) replicationConfig) :
 +            null);
    }
  
    /**
@@@ -429,7 -397,7 +440,8 @@@
      private long quotaInBytes;
      private long quotaInNamespace;
      private BucketLayout bucketLayout;
+     private String owner;
 +    private DefaultReplicationConfig defaultReplicationConfig;
  
      public Builder() {
        //Default values
@@@ -553,12 -521,11 +565,17 @@@
        return this;
      }
  
+     public Builder setOwner(String ownerName) {
+       this.owner = ownerName;
+       return this;
+     }
+ 
 +    public Builder setDefaultReplicationConfig(
 +        DefaultReplicationConfig defaultReplConfig) {
 +      this.defaultReplicationConfig = defaultReplConfig;
 +      return this;
 +    }
 +
      /**
       * Constructs the OmBucketInfo.
       * @return instance of OmBucketInfo.
@@@ -572,8 -539,8 +589,8 @@@
        return new OmBucketInfo(volumeName, bucketName, acls, isVersionEnabled,
            storageType, creationTime, modificationTime, objectID, updateID,
            metadata, bekInfo, sourceVolume, sourceBucket, usedBytes,
-           usedNamespace, quotaInBytes, quotaInNamespace, bucketLayout,
 -          usedNamespace, quotaInBytes, quotaInNamespace, bucketLayout, owner);
++          usedNamespace, quotaInBytes, quotaInNamespace, bucketLayout, owner,
 +          defaultReplicationConfig);
      }
    }
  
@@@ -721,8 -686,8 +744,9 @@@
          Objects.equals(sourceVolume, that.sourceVolume) &&
          Objects.equals(sourceBucket, that.sourceBucket) &&
          Objects.equals(metadata, that.metadata) &&
-         Objects.equals(bekInfo, that.bekInfo) && Objects
-         .equals(defaultReplicationConfig, this.defaultReplicationConfig);
+         Objects.equals(bekInfo, that.bekInfo) &&
 -        Objects.equals(owner, that.owner);
++        Objects.equals(owner, that.owner) &&
++        Objects.equals(defaultReplicationConfig, this.defaultReplicationConfig);
    }
  
    @Override
@@@ -750,7 -715,7 +774,8 @@@
          ", quotaInBytes=" + quotaInBytes +
          ", quotaInNamespace=" + quotaInNamespace +
          ", bucketLayout=" + bucketLayout +
+         ", owner=" + owner +
 +        ", defaultReplicationConfig=" + defaultReplicationConfig +
          '}';
    }
  }
diff --cc hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
index 55715bd,8462caa..4de0f00
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
@@@ -66,11 -65,10 +66,15 @@@ public class TestOzoneConfigurationFiel
          ".duration"); // Deprecated config
      configurationPropsToSkipCompare
          .add(ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION);
 +    // Currently replication and type configs moved to server side.
 +    configurationPropsToSkipCompare
 +        .add(OzoneConfigKeys.OZONE_REPLICATION);
 +    configurationPropsToSkipCompare
 +        .add(OzoneConfigKeys.OZONE_REPLICATION_TYPE);
+     configurationPropsToSkipCompare
+         .add(OzoneConfigKeys.OZONE_OM_CLIENT_PROTOCOL_VERSION_KEY);
+     configurationPropsToSkipCompare
+         .add(OzoneConfigKeys.OZONE_OM_CLIENT_PROTOCOL_VERSION);
      // This property is tested in TestHttpServer2 instead
      xmlPropsToSkipCompare.add(HttpServer2.HTTP_IDLE_TIMEOUT_MS_KEY);
      addPropertiesNotInXml();
diff --cc hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
index 3549532,0000000..73e02a5
mode 100644,000000..100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
@@@ -1,323 -1,0 +1,325 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership.  The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.hadoop.ozone.client.rpc;
 +
 +import org.apache.hadoop.conf.StorageUnit;
 +import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
 +import org.apache.hadoop.hdds.client.ECReplicationConfig;
 +import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 +import org.apache.hadoop.hdds.client.ReplicationType;
 +import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 +import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
++import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 +import org.apache.hadoop.hdds.scm.OzoneClientConfig;
 +import org.apache.hadoop.ozone.MiniOzoneCluster;
 +import org.apache.hadoop.ozone.OzoneConfigKeys;
 +import org.apache.hadoop.ozone.client.BucketArgs;
 +import org.apache.hadoop.ozone.client.ObjectStore;
 +import org.apache.hadoop.ozone.client.OzoneBucket;
 +import org.apache.hadoop.ozone.client.OzoneClient;
 +import org.apache.hadoop.ozone.client.OzoneClientFactory;
 +import org.apache.hadoop.ozone.client.OzoneKey;
 +import org.apache.hadoop.ozone.client.OzoneVolume;
 +import org.apache.hadoop.ozone.client.io.ECKeyOutputStream;
 +import org.apache.hadoop.ozone.client.io.KeyOutputStream;
 +import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 +import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 +import org.apache.hadoop.ozone.container.TestHelper;
 +import org.junit.AfterClass;
 +import org.junit.Assert;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +import java.io.IOException;
 +import java.util.Arrays;
 +import java.util.HashMap;
 +import java.util.UUID;
 +import java.util.concurrent.TimeUnit;
 +
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
 +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
 +
 +/**
 + * Tests key output stream.
 + */
 +public class TestECKeyOutputStream {
 +  private static MiniOzoneCluster cluster;
 +  private static OzoneConfiguration conf = new OzoneConfiguration();
 +  private static OzoneClient client;
 +  private static ObjectStore objectStore;
 +  private static int chunkSize;
 +  private static int flushSize;
 +  private static int maxFlushSize;
 +  private static int blockSize;
 +  private static String volumeName;
 +  private static String bucketName;
 +  private static String keyString;
 +  private static int dataBlocks = 3;
 +  private static int inputSize = dataBlocks * chunkSize;
 +  private static byte[][] inputChunks = new byte[dataBlocks][chunkSize];
 +
 +  /**
 +   * Create a MiniDFSCluster for testing.
 +   */
 +  @BeforeClass
 +  public static void init() throws Exception {
 +    chunkSize = 1024;
 +    flushSize = 2 * chunkSize;
 +    maxFlushSize = 2 * flushSize;
 +    blockSize = 2 * maxFlushSize;
 +
 +    OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
 +    clientConfig.setChecksumType(ContainerProtos.ChecksumType.NONE);
 +    clientConfig.setStreamBufferFlushDelay(false);
 +    conf.setFromObject(clientConfig);
 +
 +    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
 +    // If SCM detects dead node too quickly, then container would be moved to
 +    // closed state and all in progress writes will get exception. To avoid
 +    // that, we are just keeping higher timeout and none of the tests depending
 +    // on deadnode detection timeout currently.
 +    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 300, TimeUnit.SECONDS);
 +    conf.setTimeDuration("hdds.ratis.raft.server.rpc.slowness.timeout", 300,
 +        TimeUnit.SECONDS);
 +    conf.setTimeDuration(
 +        "hdds.ratis.raft.server.notification.no-leader.timeout", 300,
 +        TimeUnit.SECONDS);
 +    conf.setQuietMode(false);
 +    conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4,
 +        StorageUnit.MB);
 +
 +    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10)
 +        .setTotalPipelineNumLimit(10).setBlockSize(blockSize)
 +        .setChunkSize(chunkSize).setStreamBufferFlushSize(flushSize)
 +        .setStreamBufferMaxSize(maxFlushSize)
 +        .setStreamBufferSizeUnit(StorageUnit.BYTES).build();
 +    cluster.waitForClusterToBeReady();
 +    client = OzoneClientFactory.getRpcClient(conf);
 +    objectStore = client.getObjectStore();
 +    keyString = UUID.randomUUID().toString();
 +    volumeName = "testeckeyoutputstream";
 +    bucketName = volumeName;
 +    objectStore.createVolume(volumeName);
 +    objectStore.getVolume(volumeName).createBucket(bucketName);
 +    initInputChunks();
 +  }
 +
 +  /**
 +   * Shutdown MiniDFSCluster.
 +   */
 +  @AfterClass
 +  public static void shutdown() {
 +    if (cluster != null) {
 +      cluster.shutdown();
 +    }
 +  }
 +
 +  @Test
 +  public void testCreateKeyWithECReplicationConfig() throws Exception {
 +    try (OzoneOutputStream key = TestHelper
 +        .createKey(keyString, new ECReplicationConfig(3, 2,
 +              ECReplicationConfig.EcCodec.RS, chunkSize), inputSize,
 +            objectStore, volumeName, bucketName)) {
 +      Assert.assertTrue(key.getOutputStream() instanceof ECKeyOutputStream);
 +    }
 +  }
 +
 +  @Test
 +  public void testCreateKeyWithOutBucketDefaults() throws Exception {
 +    OzoneVolume volume = objectStore.getVolume(volumeName);
 +    OzoneBucket bucket = volume.getBucket(bucketName);
 +    try (OzoneOutputStream out = bucket.createKey("myKey", inputSize)) {
 +      Assert.assertTrue(out.getOutputStream() instanceof KeyOutputStream);
 +      for (int i = 0; i < inputChunks.length; i++) {
 +        out.write(inputChunks[i]);
 +      }
 +    }
 +  }
 +
 +  @Test
 +  public void testCreateKeyWithBucketDefaults() throws Exception {
 +    String myBucket = UUID.randomUUID().toString();
 +    OzoneVolume volume = objectStore.getVolume(volumeName);
 +    final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder();
 +    bucketArgs.setDefaultReplicationConfig(
 +        new DefaultReplicationConfig(ReplicationType.EC,
 +            new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS,
 +                chunkSize)));
 +
 +    volume.createBucket(myBucket, bucketArgs.build());
 +    OzoneBucket bucket = volume.getBucket(myBucket);
 +
 +    try (OzoneOutputStream out = bucket.createKey(keyString, inputSize)) {
 +      Assert.assertTrue(out.getOutputStream() instanceof ECKeyOutputStream);
 +      for (int i = 0; i < inputChunks.length; i++) {
 +        out.write(inputChunks[i]);
 +      }
 +    }
 +    byte[] buf = new byte[chunkSize];
 +    try (OzoneInputStream in = bucket.readKey(keyString)) {
 +      for (int i=0; i< inputChunks.length; i++) {
 +        int read = in.read(buf, 0, chunkSize);
 +        Assert.assertEquals(chunkSize, read);
 +        Assert.assertTrue(Arrays.equals(buf, inputChunks[i]));
 +      }
 +    }
 +  }
 +
 +  @Test
 +  public void testCreateRatisKeyAndWithECBucketDefaults() throws Exception {
 +    OzoneBucket bucket = getOzoneBucket();
 +    try (OzoneOutputStream out = bucket
 +        .createKey("testCreateRatisKeyAndWithECBucketDefaults", 2000,
-             new RatisReplicationConfig("3"), new HashMap<>())) {
++            new RatisReplicationConfig(HddsProtos.ReplicationFactor.THREE),
++            new HashMap<>())) {
 +      Assert.assertTrue(out.getOutputStream() instanceof KeyOutputStream);
 +      for (int i = 0; i < inputChunks.length; i++) {
 +        out.write(inputChunks[i]);
 +      }
 +    }
 +  }
 +
 +  @Test
 +  public void test13ChunksInSingleWriteOp() throws IOException {
 +    testMultipleChunksInSingleWriteOp(13);
 +  }
 +
 +  @Test
 +  public void test15ChunksInSingleWriteOp() throws IOException {
 +    testMultipleChunksInSingleWriteOp(15);
 +  }
 +
 +  @Test
 +  public void test20ChunksInSingleWriteOp() throws IOException {
 +    testMultipleChunksInSingleWriteOp(20);
 +  }
 +
 +  @Test
 +  public void test21ChunksInSingleWriteOp() throws IOException {
 +    testMultipleChunksInSingleWriteOp(21);
 +  }
 +
 +  public void testMultipleChunksInSingleWriteOp(int numChunks)
 +      throws IOException {
 +    byte[] inputData = getInputBytes(numChunks);
 +    final OzoneBucket bucket = getOzoneBucket();
 +    String keyName = "testMultipleChunksInSingleWriteOp" + numChunks;
 +    try (OzoneOutputStream out = bucket.createKey(keyName, 4096,
 +        new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS,
 +            chunkSize), new HashMap<>())) {
 +      out.write(inputData);
 +    }
 +
 +    validateContent(inputData, bucket, bucket.getKey(keyName));
 +  }
 +
 +  private void validateContent(byte[] inputData, OzoneBucket bucket,
 +      OzoneKey key) throws IOException {
 +    try (OzoneInputStream is = bucket.readKey(key.getName())) {
 +      byte[] fileContent = new byte[inputData.length];
 +      Assert.assertEquals(inputData.length, is.read(fileContent));
 +      Assert.assertEquals(new String(inputData, UTF_8),
 +          new String(fileContent, UTF_8));
 +    }
 +  }
 +
 +  private OzoneBucket getOzoneBucket() throws IOException {
 +    String myBucket = UUID.randomUUID().toString();
 +    OzoneVolume volume = objectStore.getVolume(volumeName);
 +    final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder();
 +    bucketArgs.setDefaultReplicationConfig(
 +        new DefaultReplicationConfig(ReplicationType.EC,
 +            new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS,
 +                chunkSize)));
 +
 +    volume.createBucket(myBucket, bucketArgs.build());
 +    return volume.getBucket(myBucket);
 +  }
 +
 +  private static void initInputChunks() {
 +    for (int i = 0; i < dataBlocks; i++) {
 +      inputChunks[i] = getBytesWith(i + 1, chunkSize);
 +    }
 +  }
 +
 +  private static byte[] getBytesWith(int singleDigitNumber, int total) {
 +    StringBuilder builder = new StringBuilder(singleDigitNumber);
 +    for (int i = 1; i <= total; i++) {
 +      builder.append(singleDigitNumber);
 +    }
 +    return builder.toString().getBytes(UTF_8);
 +  }
 +
 +  @Test
 +  public void testWriteShouldSucceedWhenDNKilled() throws Exception {
 +    int numChunks = 3;
 +    byte[] inputData = getInputBytes(numChunks);
 +    final OzoneBucket bucket = getOzoneBucket();
 +    String keyName = "testWriteShouldSucceedWhenDNKilled" + numChunks;
 +    try {
 +      try (OzoneOutputStream out = bucket.createKey(keyName, 1024,
 +          new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS,
 +              chunkSize), new HashMap<>())) {
 +        out.write(inputData);
 +        // Kill a node from first pipeline
 +        DatanodeDetails nodeToKill =
 +            ((ECKeyOutputStream) out.getOutputStream()).getStreamEntries()
 +                .get(0).getPipeline().getFirstNode();
 +        cluster.shutdownHddsDatanode(nodeToKill);
 +
 +        out.write(inputData);
 +        // Check the second blockGroup pipeline to make sure that the failed not
 +        // is not selected.
 +        Assert.assertFalse(
 +            ((ECKeyOutputStream) out.getOutputStream()).getStreamEntries()
 +                .get(1).getPipeline().getNodes().contains(nodeToKill));
 +      }
 +
 +      try (OzoneInputStream is = bucket.readKey(keyName)) {
 +        // TODO: this skip can be removed once read handles online recovery.
 +        long skip = is.skip(inputData.length);
 +        Assert.assertTrue(skip == inputData.length);
 +        // All nodes available in second block group. So, lets assert.
 +        byte[] fileContent = new byte[inputData.length];
 +        Assert.assertEquals(inputData.length, is.read(fileContent));
 +        Assert.assertEquals(new String(inputData, UTF_8),
 +            new String(fileContent, UTF_8));
 +      }
 +    } finally {
 +      // TODO: optimize to just start the killed DN back.
 +      resetCluster();
 +    }
 +  }
 +
 +  private void resetCluster() throws Exception {
 +    cluster.shutdown();
 +    init();
 +  }
 +
 +  private byte[] getInputBytes(int numChunks) {
 +    byte[] inputData = new byte[numChunks * chunkSize];
 +    for (int i = 0; i < numChunks; i++) {
 +      int start = (i * chunkSize);
 +      Arrays.fill(inputData, start, start + chunkSize - 1,
 +          String.valueOf(i % 9).getBytes(UTF_8)[0]);
 +    }
 +    return inputData;
 +  }
 +
 +}
diff --cc hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java
index e8c0953,5a96d68..4ddefad
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java
@@@ -145,10 -144,11 +145,10 @@@ public class TestOmAcls 
      logCapturer.clearOutput();
  
      TestOmAcls.aclAllow = false;
 -
      OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED,
          () -> TestDataUtil.createKey(bucket, "testKey", "testcontent"));
-     assertTrue(logCapturer.getOutput().contains("doesn't have CREATE " +
-         "permission to access key"));
+     assertTrue(logCapturer.getOutput().contains("doesn't have READ " +
+         "permission to access volume"));
    }
  
    /**
diff --cc hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index e6afa99,1dd922c..7161859
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@@ -545,7 -545,7 +545,8 @@@ message BucketInfo 
      optional int64 quotaInNamespace = 16 [default = -2];
      optional uint64 usedNamespace = 17;
      optional BucketLayoutProto bucketLayout = 18;
-     optional hadoop.hdds.DefaultReplicationConfig defaultReplicationConfig = 19;
+     optional string owner = 19;
++    optional hadoop.hdds.DefaultReplicationConfig defaultReplicationConfig = 20;
  }
  
  enum StorageTypeProto {
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 9bde8f9,2107754..ffed22e
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@@ -60,8 -60,6 +60,7 @@@ import org.apache.hadoop.fs.FileSystem
  import org.apache.hadoop.hdds.HddsConfigKeys;
  import org.apache.hadoop.hdds.HddsUtils;
  import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 +import org.apache.hadoop.hdds.client.ReplicationConfig;
- import org.apache.hadoop.hdds.client.ReplicationType;
  import org.apache.hadoop.hdds.conf.ConfigurationException;
  import org.apache.hadoop.hdds.conf.OzoneConfiguration;
  import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@@ -3410,15 -3612,6 +3619,10 @@@ public final class OzoneManager extend
          .getTrimmed(OZONE_OM_METADATA_LAYOUT, OZONE_OM_METADATA_LAYOUT_DEFAULT);
    }
  
 +  public ReplicationConfig getDefaultReplicationConfig() {
-     String replication = configuration.get(OZONE_SERVER_DEFAULT_REPLICATION_KEY,
-         OZONE_SERVER_DEFAULT_REPLICATION_DEFAULT);
-     String type = configuration.get(OZONE_SERVER_DEFAULT_REPLICATION_TYPE_KEY,
-         OZONE_SERVER_DEFAULT_REPLICATION_TYPE_DEFAULT);
-     return ReplicationConfig
-         .fromTypeAndString(ReplicationType.valueOf(type), replication);
++    return ReplicationConfig.getDefault(configuration);
 +  }
 +
    public String getOMDefaultBucketLayout() {
      return this.defaultBucketLayout;
    }
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
index 1105166,6c2a862..91d4ed3
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
@@@ -28,10 -29,9 +29,11 @@@ import java.util.Map
  
  import com.google.common.base.Optional;
  import com.google.common.base.Preconditions;
 +import org.apache.hadoop.hdds.client.ECReplicationConfig;
  import org.apache.hadoop.hdds.client.ReplicationConfig;
 +import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
  import org.apache.hadoop.ozone.OzoneAcl;
+ import org.apache.hadoop.ozone.om.OzoneManagerUtils;
  import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
  import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
  import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
@@@ -356,35 -352,22 +358,37 @@@ public class OMDirectoryCreateRequest e
    }
  
    private static OmKeyInfo.Builder dirKeyInfoBuilderNoACL(String keyName,
 -      KeyArgs keyArgs, long objectId) {
 +      KeyArgs keyArgs, long objectId,
 +      ReplicationConfig serverDefaultReplConfig) {
      String dirName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName);
  
 -    return new OmKeyInfo.Builder()
 -        .setVolumeName(keyArgs.getVolumeName())
 -        .setBucketName(keyArgs.getBucketName())
 -        .setKeyName(dirName)
 -        .setOmKeyLocationInfos(Collections.singletonList(
 -            new OmKeyLocationInfoGroup(0, new ArrayList<>())))
 -        .setCreationTime(keyArgs.getModificationTime())
 -        .setModificationTime(keyArgs.getModificationTime())
 -        .setDataSize(0)
 -        .setReplicationConfig(ReplicationConfig
 -                .fromProtoTypeAndFactor(keyArgs.getType(), keyArgs.getFactor()))
 -        .setObjectID(objectId)
 -        .setUpdateID(objectId);
 +    OmKeyInfo.Builder keyInfoBuilder =
-         new OmKeyInfo.Builder().setVolumeName(keyArgs.getVolumeName())
-             .setBucketName(keyArgs.getBucketName()).setKeyName(dirName)
++        new OmKeyInfo.Builder()
++            .setVolumeName(keyArgs.getVolumeName())
++            .setBucketName(keyArgs.getBucketName())
++            .setKeyName(dirName)
 +            .setOmKeyLocationInfos(Collections.singletonList(
 +                new OmKeyLocationInfoGroup(0, new ArrayList<>())))
 +            .setCreationTime(keyArgs.getModificationTime())
-             .setModificationTime(keyArgs.getModificationTime()).setDataSize(0);
++            .setModificationTime(keyArgs.getModificationTime())
++            .setDataSize(0);
 +    if (keyArgs.getFactor() != null && keyArgs
 +        .getFactor() != HddsProtos.ReplicationFactor.ZERO && keyArgs
 +        .getType() != HddsProtos.ReplicationType.EC) {
 +      // Factor available and not an EC replication config.
 +      keyInfoBuilder.setReplicationConfig(ReplicationConfig
-           .fromTypeAndFactor(keyArgs.getType(), keyArgs.getFactor()));
++          .fromProtoTypeAndFactor(keyArgs.getType(), keyArgs.getFactor()));
 +    } else if (keyArgs.getType() == HddsProtos.ReplicationType.EC) {
 +      // Found EC type
 +      keyInfoBuilder.setReplicationConfig(
 +          new ECReplicationConfig(keyArgs.getEcReplicationConfig()));
 +    } else {
 +      // default type
 +      keyInfoBuilder.setReplicationConfig(serverDefaultReplConfig);
 +    }
 +
-     keyInfoBuilder.setObjectID(objectId)
-         .setUpdateID(objectId);
++    keyInfoBuilder.setObjectID(objectId);
 +    return keyInfoBuilder;
    }
  
    static long getMaxNumOfRecursiveDirs() {
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
index 295028d,ad18b8f..bc70862
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
@@@ -25,7 -26,7 +26,8 @@@ import java.util.Map
  
  import com.google.common.base.Optional;
  import com.google.common.base.Preconditions;
 +import org.apache.hadoop.hdds.client.ReplicationConfig;
+ import org.apache.hadoop.ozone.om.OzoneManagerUtils;
  import org.apache.hadoop.ozone.om.helpers.BucketLayout;
  import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
  import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
index e2ac0a8,151564c..b128259
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
@@@ -23,8 -23,8 +23,9 @@@ import com.google.common.base.Precondit
  import org.apache.hadoop.hdds.client.ReplicationConfig;
  import org.apache.hadoop.ozone.audit.OMAction;
  import org.apache.hadoop.ozone.om.OMMetadataManager;
 +import org.apache.hadoop.ozone.om.OzoneConfigUtil;
  import org.apache.hadoop.ozone.om.OzoneManager;
+ import org.apache.hadoop.ozone.om.OzoneManagerUtils;
  import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
  import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
  import org.apache.hadoop.ozone.om.helpers.BucketLayout;
@@@ -169,15 -170,10 +171,14 @@@ public class S3InitiateMultipartUploadR
        // care of in the final complete multipart upload. AWS S3 behavior is
        // also like this, even when key exists in a bucket, user can still
        // initiate MPU.
--
 -      final ReplicationConfig replicationConfig =
 -          ReplicationConfig.fromProtoTypeAndFactor(
 -              keyArgs.getType(), keyArgs.getFactor());
 +      final OmBucketInfo bucketInfo = omMetadataManager.getBucketTable()
 +          .get(omMetadataManager.getBucketKey(volumeName, bucketName));
 +      final ReplicationConfig replicationConfig = OzoneConfigUtil
 +          .resolveReplicationConfigPreference(keyArgs.getType(),
 +              keyArgs.getFactor(), keyArgs.getEcReplicationConfig(),
 +              bucketInfo != null ?
 +                  bucketInfo.getDefaultReplicationConfig() :
 +                  null, ozoneManager.getDefaultReplicationConfig());
  
        multipartKeyInfo = new OmMultipartKeyInfo.Builder()
            .setUploadID(keyArgs.getMultipartUploadID())
diff --cc hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
index c72aae2,a89a5bd..004ff75
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
@@@ -22,8 -22,6 +22,7 @@@ package org.apache.hadoop.ozone.om.requ
  import java.io.IOException;
  import java.util.List;
  
 +import org.apache.hadoop.hdds.client.ReplicationConfig;
- import org.apache.hadoop.hdds.client.ReplicationType;
  import org.apache.hadoop.ozone.om.helpers.BucketLayout;
  import org.apache.hadoop.ozone.om.request.OMClientRequest;
  import org.junit.After;
@@@ -87,10 -83,6 +84,8 @@@ public class TestS3MultipartRequest 
      when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
      auditLogger = Mockito.mock(AuditLogger.class);
      when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
 +    when(ozoneManager.getDefaultReplicationConfig()).thenReturn(
-         ReplicationConfig.fromTypeAndString(ReplicationType
-                 .valueOf(OZONE_SERVER_DEFAULT_REPLICATION_TYPE_DEFAULT),
-             OZONE_SERVER_DEFAULT_REPLICATION_DEFAULT));
++        ReplicationConfig.getDefault(ozoneConfiguration));
      Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
      when(ozoneManager.resolveBucketLink(any(KeyArgs.class),
          any(OMClientRequest.class)))
diff --cc hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java
index 65f2d5a,8d98882..0597eea
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java
@@@ -21,8 -21,6 +21,7 @@@ import com.google.common.base.Optional
  import com.google.common.collect.Iterators;
  import org.apache.commons.lang3.tuple.Pair;
  import org.apache.hadoop.hdds.client.BlockID;
 +import org.apache.hadoop.hdds.client.ReplicationConfig;
- import org.apache.hadoop.hdds.client.ReplicationType;
  import org.apache.hadoop.hdds.conf.OzoneConfiguration;
  import org.apache.hadoop.hdds.protocol.StorageType;
  import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@@ -138,10 -134,6 +135,8 @@@ public class TestCleanupTableInfo 
          );
      when(om.getAclsEnabled()).thenReturn(false);
      when(om.getAuditLogger()).thenReturn(mock(AuditLogger.class));
 +    when(om.getDefaultReplicationConfig()).thenReturn(ReplicationConfig
-         .fromTypeAndString(ReplicationType
-                 .valueOf(OZONE_SERVER_DEFAULT_REPLICATION_TYPE_DEFAULT),
-             OZONE_SERVER_DEFAULT_REPLICATION_DEFAULT));
++        .getDefault(new OzoneConfiguration()));
      addVolumeToMetaTable(aVolumeArgs());
      addBucketToMetaTable(aBucketInfo());
    }
diff --cc hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index 38b1a06,2c3a465..508e22d
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@@ -209,8 -211,8 +222,7 @@@ public class BasicOzoneClientAdapterImp
  
          ReplicationConfig customReplicationConfig =
              ReplicationConfig.adjustReplication(
-                 replConfig, replication
 -                replicationConfig, replication, config
--            );
++                replConfig, replication, config);
          ozoneOutputStream =
              bucket.createFile(key, 0, customReplicationConfig, overWrite,
                  recursive);
diff --cc hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index 1a619f1,2577105..b314f2e
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@@ -344,7 -336,8 +355,8 @@@ public class BasicRootedOzoneClientAdap
            || replication == ReplicationFactor.THREE.getValue()) {
  
          ozoneOutputStream = bucket.createFile(key, 0,
-             ReplicationConfig.adjustReplication(replConfig, replication),
+             ReplicationConfig.adjustReplication(
 -                replicationConfig, replication, config),
++                replConfig, replication, config),
              overWrite, recursive);
        } else {
          ozoneOutputStream =
diff --cc hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java
index cce8948,5c07662..072b907
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java
@@@ -18,14 -18,8 +18,15 @@@
  package org.apache.hadoop.ozone.shell.bucket;
  
  import com.google.common.base.Strings;
 +import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
 +import org.apache.hadoop.hdds.client.ECReplicationConfig;
  import org.apache.hadoop.hdds.client.OzoneQuota;
 +import org.apache.hadoop.hdds.client.ReplicationConfig;
 +import org.apache.hadoop.hdds.client.ReplicationFactor;
 +import org.apache.hadoop.hdds.client.ReplicationType;
++import org.apache.hadoop.hdds.conf.OzoneConfiguration;
  import org.apache.hadoop.hdds.protocol.StorageType;
 +import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
  import org.apache.hadoop.ozone.OzoneConsts;
  import org.apache.hadoop.ozone.client.BucketArgs;
  import org.apache.hadoop.ozone.client.OzoneBucket;
@@@ -110,26 -103,6 +121,26 @@@ public class CreateBucketHandler extend
        }
      }
  
 +    if(replicationType!=null) {
 +      if (replication != null) {
 +        ReplicationConfig replicationConfig = ReplicationConfig
-             .fromTypeAndString(ReplicationType.valueOf(replicationType),
-                 replication);
++            .parse(ReplicationType.valueOf(replicationType),
++                replication, new OzoneConfiguration());
 +        boolean isEC = replicationConfig
 +            .getReplicationType() == HddsProtos.ReplicationType.EC;
 +        bb.setDefaultReplicationConfig(new DefaultReplicationConfig(
 +            ReplicationType.fromProto(replicationConfig.getReplicationType()),
 +            isEC ?
 +                null :
 +                ReplicationFactor.valueOf(replicationConfig.getRequiredNodes()),
 +            isEC ? (ECReplicationConfig) replicationConfig : null));
 +      } else {
 +        throw new IOException(
 +            "Replication can't be null. Replication type passed was : "
 +                + replicationType);
 +      }
 +    }
 +
      if (!Strings.isNullOrEmpty(quotaOptions.getQuotaInBytes())) {
        bb.setQuotaInBytes(OzoneQuota.parseSpaceQuota(
            quotaOptions.getQuotaInBytes()).getQuotaInBytes());

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org