You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ra...@apache.org on 2021/04/25 07:32:40 UTC
[ozone] 01/01: Merge remote-tracking branch 'origin/master' into
HDDS-2939
This is an automated email from the ASF dual-hosted git repository.
rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git
commit f1c67573915f6e825fe774262a939f362989f62f
Merge: e63d80a 4c313b8
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Sun Apr 25 12:59:55 2021 +0530
Merge remote-tracking branch 'origin/master' into HDDS-2939
.github/workflows/cancel-ci.yaml | 9 +-
.github/workflows/post-commit.yml | 180 ++---
.gitignore | 2 +
HISTORY.md | 4 +-
hadoop-hdds/client/pom.xml | 4 +-
.../apache/hadoop/hdds/scm/XceiverClientGrpc.java | 7 +
.../hadoop/hdds/scm/storage/BlockInputStream.java | 7 +-
.../storage/DummyBlockInputStreamWithRetry.java | 7 +-
hadoop-hdds/common/pom.xml | 9 +-
.../org/apache/hadoop/hdds/HddsConfigKeys.java | 3 +-
.../org/apache/hadoop/hdds/scm/ScmConfigKeys.java | 8 -
.../scm/ha/RetriableWithNoFailoverException.java | 25 +-
.../org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java | 61 +-
.../apache/hadoop/hdds/scm/pipeline/Pipeline.java | 67 +-
.../hadoop/hdds/security/x509/SecurityConfig.java | 13 +-
.../org/apache/hadoop/ozone/OzoneConfigKeys.java | 7 +
.../java/org/apache/hadoop/ozone/OzoneConsts.java | 1 +
.../org/apache/hadoop/ozone/common/Storage.java | 12 +
.../ozone/container/common/helpers/BlockData.java | 2 +-
.../common/src/main/resources/ozone-default.xml | 20 +-
.../hadoop/hdds/scm/pipeline/MockPipeline.java | 12 +-
hadoop-hdds/config/pom.xml | 4 +-
hadoop-hdds/container-service/pom.xml | 12 +-
.../apache/hadoop/ozone/HddsDatanodeService.java | 4 +-
.../ozone/container/keyvalue/KeyValueHandler.java | 58 +-
.../container/keyvalue/helpers/BlockUtils.java | 27 +-
.../container/keyvalue/impl/BlockManagerImpl.java | 16 +-
.../container/metadata/AbstractDatanodeStore.java | 26 +-
.../container/common/helpers/TestBlockData.java | 2 +-
.../common/impl/TestContainerDataYaml.java | 13 +-
.../container/keyvalue/TestKeyValueContainer.java | 43 +-
.../TestKeyValueHandlerWithUnhealthyContainer.java | 18 +-
.../container/ozoneimpl/TestContainerReader.java | 1 +
hadoop-hdds/docs/content/feature/OM-HA.zh.md | 13 +-
hadoop-hdds/docs/content/feature/SCM-HA.md | 2 +-
hadoop-hdds/docs/content/feature/SCM-HA.zh.md | 162 +++++
hadoop-hdds/docs/pom.xml | 4 +-
hadoop-hdds/framework/pom.xml | 4 +-
.../java/org/apache/hadoop/hdds}/ExitManager.java | 2 +-
.../java/org/apache/hadoop/hdds}/NodeDetails.java | 2 +-
.../java/org/apache/hadoop/hdds/package-info.java | 17 +-
.../hadoop/hdds/protocol/SCMSecurityProtocol.java | 15 +
.../SCMSecurityProtocolClientSideTranslatorPB.java | 37 +
.../hadoop/hdds/scm/metadata/SCMMetadataStore.java | 11 +
.../scm/protocol/ScmBlockLocationProtocol.java | 25 +-
...lockLocationProtocolClientSideTranslatorPB.java | 47 +-
...inerLocationProtocolClientSideTranslatorPB.java | 1 +
.../SCMBlockLocationFailoverProxyProvider.java | 17 +-
.../SCMContainerLocationFailoverProxyProvider.java | 28 +-
.../SCMSecurityProtocolFailoverProxyProvider.java | 21 +-
.../CRLInfo.java => certificate/CertInfo.java} | 97 +--
.../certificate/authority/CertificateServer.java | 15 +
.../certificate/authority/CertificateStore.java | 28 +
.../certificate/authority/DefaultApprover.java | 2 +-
.../certificate/authority/DefaultCAServer.java | 28 +-
.../x509/certificate/client/CertificateClient.java | 17 +
.../client/DefaultCertificateClient.java | 38 +-
.../security/x509/certificate/package-info.java | 18 +-
.../hadoop/hdds/security/x509/crl/CRLInfo.java | 24 +-
.../java/org/apache/hadoop/hdds/utils/HAUtils.java | 18 +-
.../apache/hadoop/hdds/utils/HddsServerUtil.java | 67 +-
.../org/apache/hadoop/hdds/utils/db/DBProfile.java | 4 +-
.../x509/certificate/authority/MockCAStore.java | 18 +
.../certificate/authority/TestDefaultCAServer.java | 10 +
hadoop-hdds/hadoop-dependency-client/pom.xml | 4 +-
hadoop-hdds/hadoop-dependency-server/pom.xml | 4 +-
hadoop-hdds/hadoop-dependency-test/pom.xml | 4 +-
hadoop-hdds/interface-admin/pom.xml | 4 +-
hadoop-hdds/interface-client/pom.xml | 4 +-
.../interface-client/src/main/proto/hdds.proto | 9 +
hadoop-hdds/interface-server/pom.xml | 4 +-
.../src/main/proto/ScmServerSecurityProtocol.proto | 29 +
hadoop-hdds/pom.xml | 10 +-
hadoop-hdds/server-scm/pom.xml | 8 +-
.../apache/hadoop/hdds/scm/block/BlockManager.java | 9 +-
.../hadoop/hdds/scm/block/BlockManagerImpl.java | 45 +-
.../hdds/scm/container/ContainerManagerImpl.java | 18 +-
.../hdds/scm/container/ContainerManagerV2.java | 9 +-
.../hdds/scm/container/ContainerStateManager.java | 31 +-
.../hdds/scm/container/SCMContainerManager.java | 6 +-
.../apache/hadoop/hdds/scm/ha/HASecurityUtils.java | 39 +-
.../hadoop/hdds/scm/ha/InterSCMGrpcClient.java | 14 +-
.../hdds/scm/ha/InterSCMGrpcProtocolService.java | 1 +
.../hadoop/hdds/scm/ha/SCMHAManagerImpl.java | 16 +-
.../hadoop/hdds/scm/ha/SCMHANodeDetails.java | 3 +-
.../apache/hadoop/hdds/scm/ha/SCMNodeDetails.java | 6 +-
.../hadoop/hdds/scm/ha/SCMRatisServerImpl.java | 30 -
.../hadoop/hdds/scm/ha/SCMSnapshotDownloader.java | 2 +-
.../hadoop/hdds/scm/ha/SCMSnapshotProvider.java | 28 +-
.../apache/hadoop/hdds/scm/ha/SCMStateMachine.java | 21 +-
.../hadoop/hdds/scm/metadata/CertInfoCodec.java | 58 ++
.../hadoop/hdds/scm/metadata/SCMDBDefinition.java | 18 +-
.../hdds/scm/metadata/SCMMetadataStoreImpl.java | 14 +-
.../hdds/scm/node/NodeDecommissionManager.java | 26 +-
.../hadoop/hdds/scm/node/SCMNodeManager.java | 62 +-
.../scm/pipeline/BackgroundPipelineCreator.java | 41 +-
.../scm/pipeline/BackgroundPipelineCreatorV2.java | 38 +-
.../hadoop/hdds/scm/pipeline/PipelineFactory.java | 24 +-
.../hadoop/hdds/scm/pipeline/PipelineManager.java | 40 +-
.../hdds/scm/pipeline/PipelineManagerV2Impl.java | 74 +-
.../hdds/scm/pipeline/PipelinePlacementPolicy.java | 18 +-
.../hadoop/hdds/scm/pipeline/PipelineProvider.java | 29 +-
.../hdds/scm/pipeline/PipelineReportHandler.java | 12 +-
.../hdds/scm/pipeline/PipelineStateManager.java | 34 +-
.../scm/pipeline/PipelineStateManagerV2Impl.java | 35 +-
.../hadoop/hdds/scm/pipeline/PipelineStateMap.java | 163 +----
.../hdds/scm/pipeline/RatisPipelineProvider.java | 54 +-
.../hdds/scm/pipeline/RatisPipelineUtils.java | 14 +-
.../hdds/scm/pipeline/SCMPipelineManager.java | 74 +-
.../hdds/scm/pipeline/SCMPipelineMetrics.java | 4 +-
.../hdds/scm/pipeline/SimplePipelineProvider.java | 27 +-
.../hadoop/hdds/scm/pipeline/StateManager.java | 42 +-
.../SCMSecurityProtocolServerSideTranslatorPB.java | 82 ++-
...lockLocationProtocolServerSideTranslatorPB.java | 19 +-
.../scm/safemode/HealthyPipelineSafeModeRule.java | 6 +-
.../safemode/OneReplicaPipelineSafeModeRule.java | 20 +-
.../hdds/scm/server/SCMBlockProtocolServer.java | 15 +-
.../hadoop/hdds/scm/server/SCMCertStore.java | 106 ++-
.../hdds/scm/server/SCMClientProtocolServer.java | 12 +-
.../hdds/scm/server/SCMSecurityProtocolServer.java | 13 +-
.../hadoop/hdds/scm/server/SCMStorageConfig.java | 19 +-
.../hdds/scm/server/StorageContainerManager.java | 102 +--
.../scm/TestStorageContainerManagerHttpServer.java | 2 +
.../java/org/apache/hadoop/hdds/scm/TestUtils.java | 21 +-
.../hadoop/hdds/scm/block/TestBlockManager.java | 95 +--
.../hadoop/hdds/scm/block/TestDeletedBlockLog.java | 12 +-
.../container/TestCloseContainerEventHandler.java | 10 +-
.../scm/container/TestContainerManagerImpl.java | 18 +-
.../scm/container/TestContainerStateManager.java | 15 +-
.../hadoop/hdds/scm/node/TestDeadNodeHandler.java | 7 +-
.../hdds/scm/node/TestNodeDecommissionManager.java | 34 +-
.../hadoop/hdds/scm/node/TestSCMNodeManager.java | 28 +-
.../hdds/scm/pipeline/MockPipelineManager.java | 53 +-
.../scm/pipeline/MockRatisPipelineProvider.java | 21 +-
.../TestPipelineDatanodesIntersection.java | 13 +-
.../hdds/scm/pipeline/TestPipelineManagerImpl.java | 83 +--
.../scm/pipeline/TestPipelinePlacementPolicy.java | 10 +-
.../scm/pipeline/TestPipelineStateManager.java | 101 +--
.../scm/pipeline/TestRatisPipelineProvider.java | 100 +--
.../hdds/scm/pipeline/TestSCMPipelineManager.java | 71 +-
...TestSCMStoreImplWithOldPipelineIDKeyFormat.java | 6 +
.../scm/pipeline/TestSimplePipelineProvider.java | 28 +-
.../safemode/TestHealthyPipelineSafeModeRule.java | 26 +-
.../TestOneReplicaPipelineSafeModeRule.java | 13 +-
.../hdds/scm/safemode/TestSCMSafeModeManager.java | 18 +-
.../hadoop/hdds/scm/server/TestSCMCertStore.java | 36 +-
hadoop-hdds/test-utils/pom.xml | 4 +-
hadoop-hdds/tools/pom.xml | 4 +-
.../hdds/scm/cli/datanode/ListInfoSubcommand.java | 6 +-
.../scm/cli/pipeline/CreatePipelineSubcommand.java | 3 +-
.../scm/cli/pipeline/ListPipelinesSubcommand.java | 4 +-
hadoop-ozone/client/pom.xml | 4 +-
.../apache/hadoop/ozone/client/rpc/RpcClient.java | 63 +-
hadoop-ozone/common/pom.xml | 4 +-
.../apache/hadoop/ozone/om/helpers/OmKeyInfo.java | 59 +-
.../hadoop/ozone/om/helpers/TestOmKeyInfo.java | 7 +-
hadoop-ozone/csi/pom.xml | 4 +-
hadoop-ozone/datanode/pom.xml | 16 +-
hadoop-ozone/dev-support/checks/_lib.sh | 114 ++++
hadoop-ozone/dev-support/checks/acceptance.sh | 5 +
hadoop-ozone/dev-support/checks/bats.sh | 5 +
hadoop-ozone/dev-support/checks/findbugs.sh | 12 +-
hadoop-ozone/dev-support/checks/kubernetes.sh | 16 +
.../dist/dev-support/bin/dist-layout-stitching | 3 +
hadoop-ozone/dist/pom.xml | 4 +-
.../dist/src/main/assemblies/ozone-src.xml | 7 +
.../compose/ozonesecure-ha/docker-compose.yaml | 26 +
.../src/main/compose/ozonesecure-ha/docker-config | 1 -
.../dist/src/main/compose/ozonesecure-ha/test.sh | 1 +
.../main/compose/ozonesecure/docker-compose.yaml | 1 -
.../dist/src/main/compose/ozonesecure/test.sh | 51 +-
hadoop-ozone/dist/src/main/compose/testlib.sh | 1 -
hadoop-ozone/dist/src/main/license/bin/LICENSE.txt | 15 +-
.../src/main/license/bin/licenses/LICENSE-CDDL.txt | 133 ++++
...-jakarta.validation-jakarta.validation-api.txt} | 233 -------
.../LICENSE-jakarta.ws.rs-jakarta.ws.rs-api.md | 637 +++++++++++++++++
.../LICENSE-javax.ws.rs-javax.ws.rs-api.txt | 759 ---------------------
.../NOTICE-jakarta.ws.rs-jakarta.ws.rs-api.md | 61 ++
.../src/main/smoketest/admincli/pipeline.robot | 7 +-
.../dist/src/main/smoketest/s3/commonawslib.robot | 2 +-
.../fault-injection-test/mini-chaos-tests/pom.xml | 4 +-
.../fault-injection-test/network-tests/pom.xml | 2 +-
hadoop-ozone/fault-injection-test/pom.xml | 4 +-
hadoop-ozone/insight/pom.xml | 12 +-
hadoop-ozone/integration-test/pom.xml | 4 +-
.../apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java | 1 -
.../hadoop/hdds/scm/TestRatisPipelineLeader.java | 11 +-
.../hadoop/hdds/scm/TestSCMInstallSnapshot.java | 10 +-
.../apache/hadoop/hdds/scm/TestSCMSnapshot.java | 6 +-
.../metrics/TestSCMContainerManagerMetrics.java | 9 +-
.../hdds/scm/pipeline/TestLeaderChoosePolicy.java | 25 +-
.../hdds/scm/pipeline/TestNode2PipelineMap.java | 9 +-
.../hadoop/hdds/scm/pipeline/TestNodeFailure.java | 7 +-
.../hdds/scm/pipeline/TestPipelineClose.java | 10 +-
.../TestRatisPipelineCreateAndDestroy.java | 30 +-
.../hadoop/hdds/scm/pipeline/TestSCMRestart.java | 16 +-
.../safemode/TestSCMSafeModeWithPipelineRules.java | 15 +-
.../apache/hadoop/ozone/MiniOzoneClusterImpl.java | 10 +-
.../apache/hadoop/ozone/TestDelegationToken.java | 1 -
.../apache/hadoop/ozone/TestMiniOzoneCluster.java | 6 +-
.../hadoop/ozone/TestOzoneConfigurationFields.java | 3 +-
.../hadoop/ozone/TestSecureOzoneCluster.java | 6 +-
.../hadoop/ozone/TestStorageContainerManager.java | 6 +
.../ozone/client/CertificateClientTestImpl.java | 13 +
.../TestContainerStateMachineFailureOnRead.java | 7 +-
.../client/rpc/TestDeleteWithSlowFollower.java | 5 +-
.../client/rpc/TestHybridPipelineOnDatanode.java | 2 +-
.../client/rpc/TestOzoneAtRestEncryption.java | 25 +
.../client/rpc/TestOzoneRpcClientAbstract.java | 108 ++-
.../ozone/client/rpc/TestSecureOzoneRpcClient.java | 5 +
.../apache/hadoop/ozone/om/TestKeyManagerImpl.java | 13 +-
.../hadoop/ozone/om/TestOMRatisSnapshots.java | 2 +-
.../ozone/om/TestOzoneManagerConfiguration.java | 1 -
.../hadoop/ozone/om/TestSecureOzoneManager.java | 4 +-
.../hadoop/ozone/recon/TestReconAsPassiveScm.java | 13 +-
.../apache/hadoop/ozone/recon/TestReconTasks.java | 5 +-
.../TestSCMContainerPlacementPolicyMetrics.java | 7 +-
.../ozone/scm/TestSCMInstallSnapshotWithHA.java | 65 +-
.../org/apache/hadoop/ozone/scm/TestSCMMXBean.java | 9 +-
.../hadoop/ozone/scm/TestXceiverClientGrpc.java | 7 +-
.../ozone/scm/pipeline/TestSCMPipelineMetrics.java | 11 +-
hadoop-ozone/interface-client/pom.xml | 4 +-
hadoop-ozone/interface-storage/pom.xml | 4 +-
hadoop-ozone/ozone-manager/pom.xml | 4 +-
.../java/org/apache/hadoop/ozone/om/OMStorage.java | 18 -
.../org/apache/hadoop/ozone/om/OzoneManager.java | 59 +-
.../hadoop/ozone/om/OzoneManagerStarter.java | 4 -
.../apache/hadoop/ozone/om/ha/OMNodeDetails.java | 2 +-
.../S3MultipartUploadCompleteRequest.java | 2 +-
.../ozone/om/ScmBlockLocationTestingClient.java | 12 +-
.../apache/hadoop/ozone/om/TestKeyManagerUnit.java | 9 +-
.../ozone/om/TestOzoneManagerHttpServer.java | 2 +
.../ozone/om/request/TestOMRequestUtils.java | 54 +-
.../om/request/key/TestOMKeyCommitRequest.java | 78 ++-
.../request/key/TestOMKeyCommitRequestWithFSO.java | 6 +-
.../ozone/om/request/key/TestOMKeyRequest.java | 6 +-
.../om/response/key/TestOMKeyDeleteResponse.java | 4 +-
hadoop-ozone/ozonefs-common/pom.xml | 4 +-
hadoop-ozone/ozonefs-hadoop2/pom.xml | 4 +-
hadoop-ozone/ozonefs-hadoop3/pom.xml | 4 +-
hadoop-ozone/ozonefs-shaded/pom.xml | 4 +-
hadoop-ozone/ozonefs/pom.xml | 4 +-
hadoop-ozone/pom.xml | 4 +-
hadoop-ozone/recon-codegen/pom.xml | 2 +-
hadoop-ozone/recon/pom.xml | 6 +-
.../org/apache/hadoop/ozone/recon/ReconServer.java | 2 -
.../hadoop/ozone/recon/api/NodeEndpoint.java | 6 +-
.../hadoop/ozone/recon/api/PipelineEndpoint.java | 11 +-
.../ozone/recon/scm/ReconPipelineFactory.java | 9 +-
.../recon/scm/ReconPipelineReportHandler.java | 2 +-
.../scm/ReconStorageContainerManagerFacade.java | 15 +-
.../ozone/recon/OMMetadataManagerTestUtils.java | 9 +-
.../hadoop/ozone/recon/api/TestEndpoints.java | 4 +-
.../ozone/recon/scm/TestReconPipelineManager.java | 12 +-
hadoop-ozone/s3gateway/pom.xml | 20 +-
.../hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 3 +-
.../apache/hadoop/ozone/s3/util/RFC1123Util.java | 3 +-
hadoop-ozone/tools/pom.xml | 12 +-
.../apache/hadoop/ozone/debug/ChunkKeyHandler.java | 5 +-
.../hadoop/ozone/freon/BaseFreonGenerator.java | 3 +-
.../hadoop/ozone/freon/DatanodeChunkGenerator.java | 3 +-
.../hadoop/ozone/freon/DatanodeChunkValidator.java | 5 +-
.../ozone/freon/LeaderAppendLogEntryGenerator.java | 6 +-
.../ozone/genesis/BenchMarkContainerStateMap.java | 92 +--
.../ozone/genesis/BenchMarkOzoneManager.java | 5 +-
.../apache/hadoop/ozone/genesis/BenchMarkSCM.java | 7 +-
.../apache/hadoop/ozone/genesis/GenesisUtil.java | 5 +-
pom.xml | 42 +-
268 files changed, 4208 insertions(+), 3104 deletions(-)
diff --cc hadoop-hdds/common/src/main/resources/ozone-default.xml
index acd4349,ac8169e..cb42e04
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@@ -2770,35 -2781,4 +2781,36 @@@
</description>
</property>
+ <property>
+ <name>ozone.om.metadata.layout</name>
+ <tag>OZONE, OM</tag>
+ <value>SIMPLE</value>
+ <description>
+ This property is used to define the metadata layout of file system
+ paths. If it is configured as PREFIX in combination with
+ ozone.om.enable.filesystem.paths to true then this allows to perform
+ atomic rename and delete of any directory at any level in the namespace.
+ Defaulting to SIMPLE. Supported values: SIMPLE and PREFIX.
+ </description>
+ </property>
+ <property>
+ <name>ozone.directory.deleting.service.interval</name>
+ <value>1m</value>
+ <tag>OZONE, PERFORMANCE, OM</tag>
+ <description>Time interval of the directory deleting service. It runs on OM
+ periodically and cleanup orphan directory and its sub-tree. For every
+ orphan directory it deletes the sub-path tree structure(dirs/files). It
+ sends sub-files to KeyDeletingService to deletes its blocks. Unit could
+ be defined with postfix (ns,ms,s,m,h,d)
+ </description>
+ </property>
+ <property>
+ <name>ozone.path.deleting.limit.per.task</name>
+ <value>10000</value>
+ <tag>OZONE, PERFORMANCE, OM</tag>
+ <description>A maximum number of paths(dirs/files) to be deleted by
+ directory deleting service per time interval.
+ </description>
+ </property>
++
</configuration>
diff --cc hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index dd67cc1,12aab5a..3a16a8d
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@@ -25,11 -25,11 +25,13 @@@ import java.util.List
import java.util.Map;
import java.util.Objects;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileEncryptionInfo;
+ import org.apache.hadoop.hdds.client.BlockID;
+ import org.apache.hadoop.hdds.client.ContainerBlockID;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList;
import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
@@@ -42,7 -44,8 +46,8 @@@ import org.slf4j.LoggerFactory
* This is returned from OM to client, and client use class to talk to
* datanode. Also, this is the metadata written to om.db on server side.
*/
-public final class OmKeyInfo extends WithObjectID {
+public final class OmKeyInfo extends WithParentObjectId {
+ private static final Logger LOG = LoggerFactory.getLogger(OmKeyInfo.class);
private final String volumeName;
private final String bucketName;
// name of key client specified
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index fd72fcc,0b646c2..dcd0b7c
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@@ -269,172 -358,8 +269,172 @@@ public class S3MultipartUploadCompleteR
LOG.error("Unrecognized Result for S3MultipartUploadCommitRequest: {}",
multipartUploadCompleteRequest);
}
+ }
+
+ @SuppressWarnings("checkstyle:ParameterNumber")
+ protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex,
+ KeyArgs keyArgs, String volumeName, String bucketName, String keyName,
+ String multipartKey, OMMetadataManager omMetadataManager,
+ String ozoneKey, TreeMap<Integer, PartKeyInfo> partKeyInfoMap,
+ List<OmKeyLocationInfo> partLocationInfos, long dataSize)
+ throws IOException {
+ HddsProtos.ReplicationType type = partKeyInfoMap.lastEntry().getValue()
+ .getPartKeyInfo().getType();
+ HddsProtos.ReplicationFactor factor =
+ partKeyInfoMap.lastEntry().getValue().getPartKeyInfo().getFactor();
+
+ OmKeyInfo omKeyInfo = getOmKeyInfoFromKeyTable(ozoneKey, keyName,
+ omMetadataManager);
+ if (omKeyInfo == null) {
+ // This is a newly added key, it does not have any versions.
+ OmKeyLocationInfoGroup keyLocationInfoGroup = new
+ OmKeyLocationInfoGroup(0, partLocationInfos, true);
+
+ // Get the objectID of the key from OpenKeyTable
+ OmKeyInfo dbOpenKeyInfo = getOmKeyInfoFromOpenKeyTable(multipartKey,
+ keyName, omMetadataManager);
+
+ // A newly created key, this is the first version.
+ OmKeyInfo.Builder builder =
+ new OmKeyInfo.Builder().setVolumeName(volumeName)
+ .setBucketName(bucketName).setKeyName(dbOpenKeyInfo.getKeyName())
+ .setReplicationFactor(factor).setReplicationType(type)
+ .setCreationTime(keyArgs.getModificationTime())
+ .setModificationTime(keyArgs.getModificationTime())
+ .setDataSize(dataSize)
+ .setFileEncryptionInfo(dbOpenKeyInfo.getFileEncryptionInfo())
+ .setOmKeyLocationInfos(
+ Collections.singletonList(keyLocationInfoGroup))
+ .setAcls(dbOpenKeyInfo.getAcls());
+ // Check if db entry has ObjectID. This check is required because
+ // it is possible that between multipart key uploads and complete,
+ // we had an upgrade.
+ if (dbOpenKeyInfo.getObjectID() != 0) {
+ builder.setObjectID(dbOpenKeyInfo.getObjectID());
+ }
+ updatePrefixFSOInfo(dbOpenKeyInfo, builder);
+ omKeyInfo = builder.build();
+ } else {
+ // Already a version exists, so we should add it as a new version.
+ // But now as versioning is not supported, just following the commit
+ // key approach. When versioning support comes, then we can uncomment
+ // below code keyInfo.addNewVersion(locations);
- omKeyInfo.updateLocationInfoList(partLocationInfos, true);
++ omKeyInfo.updateLocationInfoList(partLocationInfos, true, true);
+ omKeyInfo.setModificationTime(keyArgs.getModificationTime());
+ omKeyInfo.setDataSize(dataSize);
+ }
+ omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+ return omKeyInfo;
+ }
- return omClientResponse;
+ protected void updatePrefixFSOInfo(OmKeyInfo dbOpenKeyInfo,
+ OmKeyInfo.Builder builder) {
+ // FSO is disabled. Do nothing.
+ }
+
+ protected OmKeyInfo getOmKeyInfoFromKeyTable(String dbOzoneKey,
+ String keyName, OMMetadataManager omMetadataManager) throws IOException {
+ return omMetadataManager.getKeyTable().get(dbOzoneKey);
+ }
+
+ protected OmKeyInfo getOmKeyInfoFromOpenKeyTable(String dbMultipartKey,
+ String keyName, OMMetadataManager omMetadataManager) throws IOException {
+ return omMetadataManager.getOpenKeyTable().get(dbMultipartKey);
+ }
+
+ protected int getPartsListSize(String requestedVolume,
+ String requestedBucket, String keyName, String ozoneKey,
+ List<Integer> partNumbers,
+ List<OzoneManagerProtocolProtos.Part> partsList) throws OMException {
+ int prevPartNumber = partsList.get(0).getPartNumber();
+ int partsListSize = partsList.size();
+ partNumbers.add(prevPartNumber);
+ for (int i = 1; i < partsListSize; i++) {
+ int currentPartNumber = partsList.get(i).getPartNumber();
+ if (prevPartNumber >= currentPartNumber) {
+ LOG.error("PartNumber at index {} is {}, and its previous " +
+ "partNumber at index {} is {} for ozonekey is " +
+ "{}", i, currentPartNumber, i - 1, prevPartNumber,
+ ozoneKey);
+ throw new OMException(
+ failureMessage(requestedVolume, requestedBucket, keyName) +
+ " because parts are in Invalid order.",
+ OMException.ResultCodes.INVALID_PART_ORDER);
+ }
+ prevPartNumber = currentPartNumber;
+ partNumbers.add(prevPartNumber);
+ }
+ return partsListSize;
+ }
+
+ @SuppressWarnings("checkstyle:ParameterNumber")
+ protected long getMultipartDataSize(String requestedVolume,
+ String requestedBucket, String keyName, String ozoneKey,
+ TreeMap<Integer, PartKeyInfo> partKeyInfoMap,
+ int partsListSize, List<OmKeyLocationInfo> partLocationInfos,
+ List<OzoneManagerProtocolProtos.Part> partsList,
+ OzoneManager ozoneManager) throws OMException {
+ long dataSize = 0;
+ int currentPartCount = 0;
+ // Now do actual logic, and check for any Invalid part during this.
+ for (OzoneManagerProtocolProtos.Part part : partsList) {
+ currentPartCount++;
+ int partNumber = part.getPartNumber();
+ String partName = part.getPartName();
+
+ PartKeyInfo partKeyInfo = partKeyInfoMap.get(partNumber);
+
+ String dbPartName = null;
+ if (partKeyInfo != null) {
+ dbPartName = preparePartName(requestedVolume, requestedBucket, keyName,
+ partKeyInfo, ozoneManager.getMetadataManager());
+ }
+ if (!StringUtils.equals(partName, dbPartName)) {
+ String omPartName = partKeyInfo == null ? null : dbPartName;
+ throw new OMException(
+ failureMessage(requestedVolume, requestedBucket, keyName) +
+ ". Provided Part info is { " + partName + ", " + partNumber +
+ "}, whereas OM has partName " + omPartName,
+ OMException.ResultCodes.INVALID_PART);
+ }
+
+ OmKeyInfo currentPartKeyInfo = OmKeyInfo
+ .getFromProtobuf(partKeyInfo.getPartKeyInfo());
+
+ // Except for last part all parts should have minimum size.
+ if (currentPartCount != partsListSize) {
+ if (currentPartKeyInfo.getDataSize() <
+ ozoneManager.getMinMultipartUploadPartSize()) {
+ LOG.error("MultipartUpload: {} Part number: {} size {} is less" +
+ " than minimum part size {}", ozoneKey,
+ partKeyInfo.getPartNumber(), currentPartKeyInfo.getDataSize(),
+ ozoneManager.getMinMultipartUploadPartSize());
+ throw new OMException(
+ failureMessage(requestedVolume, requestedBucket, keyName) +
+ ". Entity too small.",
+ OMException.ResultCodes.ENTITY_TOO_SMALL);
+ }
+ }
+
+ // As all part keys will have only one version.
+ OmKeyLocationInfoGroup currentKeyInfoGroup = currentPartKeyInfo
+ .getKeyLocationVersions().get(0);
+
+ // Set partNumber in each block.
+ currentKeyInfoGroup.getLocationList().forEach(
+ omKeyLocationInfo -> omKeyLocationInfo.setPartNumber(partNumber));
+
+ partLocationInfos.addAll(currentKeyInfoGroup.getLocationList());
+ dataSize += currentPartKeyInfo.getDataSize();
+ }
+ return dataSize;
+ }
+
+ protected String preparePartName(String requestedVolume,
+ String requestedBucket, String keyName, PartKeyInfo partKeyInfo,
+ OMMetadataManager omMetadataManager) {
+
+ return partKeyInfo.getPartName();
}
private static String failureMessage(String volume, String bucket,
diff --cc hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index 7f50f73,5285608..ef41253
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@@ -26,8 -26,8 +26,9 @@@ import java.util.List
import java.util.UUID;
import com.google.common.base.Optional;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.client.BlockID;
+ import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
diff --cc hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
index 09d499e,f864426..229c23a
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
@@@ -59,17 -54,90 +59,79 @@@ public class TestOMKeyCommitRequest ext
}
@Test
+ public void testValidateAndUpdateCacheWithUnknownBlockId() throws Exception {
+
+ OMRequest modifiedOmRequest =
+ doPreExecute(createCommitKeyRequest());
+
+ OMKeyCommitRequest omKeyCommitRequest =
- new OMKeyCommitRequest(modifiedOmRequest);
++ getOmKeyCommitRequest(modifiedOmRequest);
+
+ // Append 3 blocks locations.
+ List<OmKeyLocationInfo> allocatedLocationList = getKeyLocation(3)
+ .stream().map(OmKeyLocationInfo::getFromProtobuf)
+ .collect(Collectors.toList());
+
+ TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+
- TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
- clientID, replicationType, replicationFactor, omMetadataManager,
- allocatedLocationList);
-
- String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
- keyName);
++ String ozoneKey = addKeyToOpenKeyTable(allocatedLocationList);
+
+ // Key should not be there in key table, as validateAndUpdateCache is
+ // still not called.
+ OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
+
+ Assert.assertNull(omKeyInfo);
+
+ OMClientResponse omClientResponse =
+ omKeyCommitRequest.validateAndUpdateCache(ozoneManager,
+ 100L, ozoneManagerDoubleBufferHelper);
+
+ Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
+ omClientResponse.getOMResponse().getStatus());
+
+ // Entry should be deleted from openKey Table.
+ omKeyInfo = omMetadataManager.getOpenKeyTable().get(ozoneKey);
+ Assert.assertNull(omKeyInfo);
+
+ // Now entry should be created in key Table.
+ omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
+
+ Assert.assertNotNull(omKeyInfo);
+
+ // Check modification time
+
+ CommitKeyRequest commitKeyRequest = modifiedOmRequest.getCommitKeyRequest();
+ Assert.assertEquals(commitKeyRequest.getKeyArgs().getModificationTime(),
+ omKeyInfo.getModificationTime());
+
+ // Check block location.
+ Assert.assertEquals(allocatedLocationList,
+ omKeyInfo.getLatestVersionLocations().getLocationList());
+
+ }
+
+ @Test
public void testValidateAndUpdateCache() throws Exception {
- OMRequest modifiedOmRequest =
- doPreExecute(createCommitKeyRequest());
+ OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest());
OMKeyCommitRequest omKeyCommitRequest =
- new OMKeyCommitRequest(modifiedOmRequest);
+ getOmKeyCommitRequest(modifiedOmRequest);
+
+ KeyArgs keyArgs = modifiedOmRequest.getCommitKeyRequest().getKeyArgs();
+
+ // Append new blocks
+ List<OmKeyLocationInfo> allocatedLocationList =
+ keyArgs.getKeyLocationsList().stream()
+ .map(OmKeyLocationInfo::getFromProtobuf)
+ .collect(Collectors.toList());
+
TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
omMetadataManager);
- String ozoneKey = addKeyToOpenKeyTable();
- TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
- clientID, replicationType, replicationFactor, omMetadataManager,
- allocatedLocationList);
-
- String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
- keyName);
++ String ozoneKey = addKeyToOpenKeyTable(allocatedLocationList);
// Key should not be there in key table, as validateAndUpdateCache is
// still not called.
@@@ -109,14 -175,9 +171,16 @@@
Assert.assertEquals(locationInfoListFromCommitKeyRequest,
omKeyInfo.getLatestVersionLocations().getLocationList());
+ Assert.assertEquals(allocatedLocationList,
+ omKeyInfo.getLatestVersionLocations().getLocationList());
+ }
+ @Test
+ public void testValidateAndUpdateCacheWithSubDirs() throws Exception {
+ parentDir = "dir1/dir2/dir3/";
+ keyName = parentDir + UUID.randomUUID().toString();
+
+ testValidateAndUpdateCache();
}
@Test
@@@ -298,34 -364,4 +362,36 @@@
return keyLocations;
}
+ protected String getParentDir() {
+ return parentDir;
+ }
+
+ @NotNull
+ protected String getOzonePathKey() throws IOException {
+ return omMetadataManager.getOzoneKey(volumeName, bucketName,
+ keyName);
+ }
+
+ @NotNull
- protected String addKeyToOpenKeyTable() throws Exception {
++ protected String addKeyToOpenKeyTable(List<OmKeyLocationInfo> locationList)
++ throws Exception {
+ TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
- clientID, replicationType, replicationFactor, omMetadataManager);
++ clientID, replicationType, replicationFactor, omMetadataManager,
++ locationList);
+
+ return getOzonePathKey();
+ }
+
+ @NotNull
+ protected OMKeyCommitRequest getOmKeyCommitRequest(OMRequest omRequest) {
+ return new OMKeyCommitRequest(omRequest);
+ }
+
+ protected void verifyKeyName(OmKeyInfo omKeyInfo) {
+ Assert.assertEquals("Incorrect KeyName", keyName,
+ omKeyInfo.getKeyName());
+ String fileName = OzoneFSUtils.getFileName(keyName);
+ Assert.assertEquals("Incorrect FileName", fileName,
+ omKeyInfo.getFileName());
+ }
}
diff --cc hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java
index fde3f7b,0000000..13ad623
mode 100644,000000..100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java
@@@ -1,109 -1,0 +1,113 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
++import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+import java.io.IOException;
++import java.util.List;
+
+/**
+ * Class tests OMKeyCommitRequest with prefix layout.
+ */
+public class TestOMKeyCommitRequestWithFSO extends TestOMKeyCommitRequest {
+
+ private long parentID = Long.MIN_VALUE;
+
+ private long getBucketID() throws java.io.IOException {
+ String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+ OmBucketInfo omBucketInfo =
+ omMetadataManager.getBucketTable().get(bucketKey);
+ if(omBucketInfo!= null){
+ return omBucketInfo.getObjectID();
+ }
+ // bucket doesn't exists in DB
+ return Long.MIN_VALUE;
+ }
+
+ @Override
+ protected String getOzonePathKey() throws IOException {
+ long bucketID = getBucketID();
+ String fileName = OzoneFSUtils.getFileName(keyName);
+ return omMetadataManager.getOzonePathKey(bucketID, fileName);
+ }
+
+ @Override
- protected String addKeyToOpenKeyTable() throws Exception {
++ protected String addKeyToOpenKeyTable(List<OmKeyLocationInfo> locationList)
++ throws Exception {
+ // need to initialize parentID
+ if (getParentDir() == null) {
+ parentID = getBucketID();
+ } else {
+ parentID = TestOMRequestUtils.addParentsToDirTable(volumeName,
+ bucketName, getParentDir(), omMetadataManager);
+ }
+ long objectId = 100;
+
+ OmKeyInfo omKeyInfoFSO =
+ TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+ HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100,
+ Time.now());
++ omKeyInfoFSO.appendNewBlocks(locationList, false);
+
+ String fileName = OzoneFSUtils.getFileName(keyName);
+ TestOMRequestUtils.addFileToKeyTable(true, false,
+ fileName, omKeyInfoFSO, clientID, txnLogId, omMetadataManager);
+
+ return omMetadataManager.getOzonePathKey(parentID, fileName);
+ }
+
+ @NotNull
+ @Override
+ protected OzoneConfiguration getOzoneConfiguration() {
+ OzoneConfiguration config = super.getOzoneConfiguration();
+ // Metadata layout prefix will be set while invoking OzoneManager#start()
+ // and its not invoked in this test. Hence it is explicitly setting
+ // this configuration to populate prefix tables.
+ OzoneManagerRatisUtils.setBucketFSOptimized(true);
+ return config;
+ }
+
+ @NotNull
+ protected OMKeyCommitRequest getOmKeyCommitRequest(OMRequest omRequest) {
+ return new OMKeyCommitRequestWithFSO(omRequest);
+ }
+
+ protected void verifyKeyName(OmKeyInfo omKeyInfo) {
+ // prefix layout format - stores fileName in the keyName DB field.
+ String fileName = OzoneFSUtils.getFileName(keyName);
+ Assert.assertEquals("Incorrect FileName", fileName,
+ omKeyInfo.getFileName());
+ Assert.assertEquals("Incorrect KeyName", fileName,
+ omKeyInfo.getKeyName());
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org