You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by um...@apache.org on 2022/04/20 06:58:27 UTC
[ozone] 01/01: Merge master to EC branch HDDS-3816-ec
This is an automated email from the ASF dual-hosted git repository.
umamahesh pushed a commit to branch HDDS-3816-ec
in repository https://gitbox.apache.org/repos/asf/ozone.git
commit cb0a84b27c964231849e5610c1f7e29aac565102
Merge: 70b547be23 172947815b
Author: Uma Maheswara Rao G <um...@cloudera.com>
AuthorDate: Tue Apr 19 21:22:19 2022 -0700
Merge master to EC branch HDDS-3816-ec
.../org/apache/hadoop/hdds/HddsConfigKeys.java | 7 +
.../org/apache/hadoop/hdds/scm/ScmConfigKeys.java | 2 +
.../org/apache/hadoop/ozone/OzoneConfigKeys.java | 2 +
.../org/apache/hadoop/ozone/audit/AuditLogger.java | 55 +++++-
.../apache/hadoop/ozone/audit/AuditLoggerType.java | 3 +-
.../apache/hadoop/ozone/audit/AuditMessage.java | 30 ++-
.../audit/{AuditLoggerType.java => S3GAction.java} | 41 ++--
.../common/src/main/resources/ozone-default.xml | 56 ++++++
.../hadoop/ozone/audit/TestOzoneAuditLogger.java | 54 +++++-
.../common/src/test/resources/auditlog.properties | 2 +-
.../common/statemachine/StateContext.java | 13 ++
.../states/datanode/RunningDatanodeState.java | 8 +-
.../states/endpoint/RegisterEndpointTask.java | 6 +-
hadoop-hdds/docs/content/feature/OM-HA.md | 37 ++++
.../apache/hadoop/hdds/utils/HddsServerUtil.java | 14 ++
.../apache/hadoop/hdds/scm/ha/SCMHAManager.java | 2 +-
.../hadoop/hdds/scm/ha/SCMHAManagerImpl.java | 3 +-
.../hadoop/hdds/scm/ha/SCMHAManagerStub.java | 18 +-
.../apache/hadoop/hdds/scm/ha/SCMRatisServer.java | 2 +
.../hadoop/hdds/scm/ha/SCMRatisServerImpl.java | 10 +
.../apache/hadoop/hdds/scm/ha/SCMStateMachine.java | 15 +-
.../scm/pipeline/BackgroundPipelineCreator.java | 6 +-
.../scm/server/OzoneStorageContainerManager.java | 2 +
.../hdds/scm/server/StorageContainerManager.java | 9 +-
.../main/resources/webapps/scm/scm-overview.html | 1 +
.../hdds/scm/ha/TestReplicationAnnotation.java | 5 +
.../TestSCMHAUnfinalizedStateValidationAction.java | 8 +
.../hdds/scm/cli/container/InfoSubcommand.java | 7 +-
.../hdds/scm/cli/container/TestInfoSubCommand.java | 2 +-
.../apache/hadoop/ozone/client/rpc/RpcClient.java | 14 +-
.../org/apache/hadoop/ozone/om/OMConfigKeys.java | 5 +
.../ozone/om/ha/OMFailoverProxyProvider.java | 50 +++--
.../dist/dev-support/bin/dist-layout-stitching | 1 +
.../src/main/compose/ozonesecure-ha/docker-config | 2 +-
.../dist/src/main/smoketest/s3/bucketlist.robot | 8 +
.../dist/src/shell/conf/dn-audit-log4j2.properties | 2 +-
.../dist/src/shell/conf/om-audit-log4j2.properties | 2 +-
...g4j2.properties => s3g-audit-log4j2.properties} | 8 +-
.../src/shell/conf/scm-audit-log4j2.properties | 2 +-
hadoop-ozone/dist/src/shell/ozone/ozone | 1 +
.../apache/hadoop/ozone/TestDelegationToken.java | 8 +
.../apache/hadoop/ozone/om/TestKeyManagerImpl.java | 3 +
.../ozone/scm/TestStorageContainerManagerHA.java | 2 +-
.../org/apache/hadoop/ozone/om/OzoneManager.java | 28 ++-
.../ozone/om/ratis/OzoneManagerDoubleBuffer.java | 41 +++-
.../ozone/om/ratis/OzoneManagerRatisServer.java | 2 +-
.../ozone/om/ratis/OzoneManagerStateMachine.java | 18 +-
.../om/snapshot/OzoneManagerSnapshotProvider.java | 9 +-
.../hadoop/ozone/om/TestKeyDeletingService.java | 7 +
.../apache/hadoop/ozone/om/TestKeyManagerUnit.java | 9 +-
.../apache/hadoop/ozone/om/TestTrashService.java | 2 +
...tOzoneManagerDoubleBufferWithDummyResponse.java | 1 +
...TestOzoneManagerDoubleBufferWithOMResponse.java | 7 +-
.../om/ratis/TestOzoneManagerRatisServer.java | 8 +
.../om/ratis/TestOzoneManagerStateMachine.java | 1 +
.../org.apache.hadoop.security.token.DtFetcher} | 2 +-
...rg.apache.hadoop.security.token.TokenIdentifier | 17 ++
.../org.apache.hadoop.security.token.TokenRenewer | 1 +
.../org.apache.hadoop.security.token.TokenRenewer | 1 +
hadoop-ozone/recon/pom.xml | 2 +-
.../hadoop/ozone/recon/scm/ReconNodeManager.java | 2 +-
.../scm/ReconStorageContainerManagerFacade.java | 7 +
hadoop-ozone/s3gateway/pom.xml | 4 +
.../org/apache/hadoop/ozone/s3/ClientIpFilter.java | 69 +++++++
.../hadoop/ozone/s3/endpoint/BucketEndpoint.java | 213 +++++++++++++++------
.../hadoop/ozone/s3/endpoint/EndpointBase.java | 57 +++++-
.../hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 127 +++++++++++-
.../hadoop/ozone/s3/endpoint/RootEndpoint.java | 53 +++--
.../hadoop/ozone/s3/metrics/S3GatewayMetrics.java | 52 ++++-
.../org/apache/hadoop/ozone/s3/util/S3Utils.java | 19 ++
...ewayMetrics.java => TestS3GatewayAuditLog.java} | 83 ++++++--
.../ozone/s3/metrics/TestS3GatewayMetrics.java | 173 ++++++++++++++++-
.../src/test/resources/auditlog.properties | 4 +-
.../ozone/shell/bucket/InfoBucketHandler.java | 6 +
74 files changed, 1310 insertions(+), 243 deletions(-)
diff --cc hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 4b7fda9040,d5794e5670..699d732649
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@@ -460,13 -455,8 +460,15 @@@ public final class OzoneConfigKeys
public static final String OZONE_CLIENT_REQUIRED_OM_VERSION_MIN_DEFAULT =
OzoneManagerVersion.S3G_PERSISTENT_CONNECTIONS.name();
+ public static final String
+ OZONE_CLIENT_BUCKET_REPLICATION_CONFIG_REFRESH_PERIOD_MS =
+ "ozone.client.bucket.replication.config.refresh.time.ms";
+ public static final long
+ OZONE_CLIENT_BUCKET_REPLICATION_CONFIG_REFRESH_PERIOD_DEFAULT_MS =
+ 300 * 1000;
+
+ public static final String OZONE_AUDIT_LOG_DEBUG_CMD_LIST_OMAUDIT =
+ "ozone.audit.log.debug.cmd.list.omaudit";
/**
* There is no need to instantiate this class.
*/
diff --cc hadoop-hdds/common/src/main/resources/ozone-default.xml
index 133528ca87,703e3c5c7b..aaa78591a9
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@@ -3092,15 -3089,33 +3118,45 @@@
</description>
</property>
+ <property>
+ <name>ozone.client.max.ec.stripe.write.retries</name>
+ <value>10</value>
+ <tag>CLIENT</tag>
+ <description>
+ When EC stripe write failed, client will request to allocate new block group and write the failed stripe into new
+ block group. If the same stripe failure continued in newly acquired block group also, then it will retry by
+ requesting to allocate new block group again. This configuration is used to limit these number of retries. By
+ default the number of retries are 10.
+ </description>
+ </property>
++
+ <property>
+ <name>ozone.audit.log.debug.cmd.list.omaudit</name>
+ <value></value>
+ <tag>OM</tag>
+ <description>
+ A comma separated list of OzoneManager commands that are written to the OzoneManager audit logs only if the audit
+ log level is debug. Ex: "ALLOCATE_BLOCK,ALLOCATE_KEY,COMMIT_KEY".
+ </description>
+ </property>
+
+ <property>
+ <name>ozone.audit.log.debug.cmd.list.scmaudit</name>
+ <value></value>
+ <tag>SCM</tag>
+ <description>
+ A comma separated list of SCM commands that are written to the SCM audit logs only if the audit
+ log level is debug. Ex: "GET_VERSION,REGISTER,SEND_HEARTBEAT".
+ </description>
+ </property>
+
+ <property>
+ <name>ozone.audit.log.debug.cmd.list.dnaudit</name>
+ <value></value>
+ <tag>DN</tag>
+ <description>
+ A comma separated list of Datanode commands that are written to the DN audit logs only if the audit
+ log level is debug. Ex: "CREATE_CONTAINER,READ_CONTAINER,UPDATE_CONTAINER".
+ </description>
+ </property>
</configuration>
diff --cc hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java
index 10b5758f44,23a1a0b3a6..c6040191b7
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java
@@@ -82,22 -81,11 +82,22 @@@ public class TestInfoSubCommand
@Test
public void testReplicasIncludedInOutput() throws Exception {
+ testReplicaIncludedInOutput(false);
+ }
+
+ @Test
+ public void testReplicaIndexInOutput() throws Exception {
+ testReplicaIncludedInOutput(true);
+ }
+
+
+ private void testReplicaIncludedInOutput(boolean includeIndex)
+ throws IOException {
Mockito.when(scmClient.getContainerReplicas(anyLong()))
- .thenReturn(getReplicas());
+ .thenReturn(getReplicas(includeIndex));
cmd = new InfoSubcommand();
CommandLine c = new CommandLine(cmd);
- c.parseArgs("1");
+ c.parseArgs("1", "--replicas");
cmd.execute(scmClient);
// Ensure we have a line for Replicas:
diff --cc hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 01d9e91ef6,3a460deb0b..213868cb6c
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@@ -571,19 -532,19 +571,29 @@@ public class RpcClient implements Clien
Preconditions.checkNotNull(bucketArgs);
verifyCountsQuota(bucketArgs.getQuotaInNamespace());
verifySpaceQuota(bucketArgs.getQuotaInBytes());
+ if (omVersion
+ .compareTo(OzoneManagerVersion.ERASURE_CODED_STORAGE_SUPPORT) < 0) {
+ if (bucketArgs.getDefaultReplicationConfig() != null &&
+ bucketArgs.getDefaultReplicationConfig().getType()
+ == ReplicationType.EC) {
+ throw new IOException("Can not set the default replication of the"
+ + " bucket to Erasure Coded replication, as OzoneManager does"
+ + " not support Erasure Coded replication.");
+ }
+ }
- String owner = bucketArgs.getOwner() == null ?
- ugi.getShortUserName() : bucketArgs.getOwner();
+ final String owner;
+ // If S3 auth exists, set owner name to the short user name derived from the
+ // accessId. Similar to RpcClient#getDEK
+ if (getThreadLocalS3Auth() != null) {
+ UserGroupInformation s3gUGI = UserGroupInformation.createRemoteUser(
+ getThreadLocalS3Auth().getAccessID());
+ owner = s3gUGI.getShortUserName();
+ } else {
+ owner = bucketArgs.getOwner() == null ?
+ ugi.getShortUserName() : bucketArgs.getOwner();
+ }
+
Boolean isVersionEnabled = bucketArgs.getVersioning() == null ?
Boolean.FALSE : bucketArgs.getVersioning();
StorageType storageType = bucketArgs.getStorageType() == null ?
diff --cc hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
index e02c8887bb,808a8f0e41..d71a99bde5
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
@@@ -253,10 -297,27 +297,26 @@@ public class BucketEndpoint extends End
OzoneBucket bucket = getBucket(bucketName);
- OzoneMultipartUploadList ozoneMultipartUploadList;
try {
- ozoneMultipartUploadList = bucket.listMultipartUploads(prefix);
+ OzoneMultipartUploadList ozoneMultipartUploadList =
+ bucket.listMultipartUploads(prefix);
+
+ ListMultipartUploadsResult result = new ListMultipartUploadsResult();
+ result.setBucket(bucketName);
+
+ ozoneMultipartUploadList.getUploads().forEach(upload -> result.addUpload(
+ new ListMultipartUploadsResult.Upload(
+ upload.getKeyName(),
+ upload.getUploadId(),
+ upload.getCreationTime(),
- S3StorageType.fromReplicationType(upload.getReplicationType(),
- upload.getReplicationFactor())
++ S3StorageType.fromReplicationConfig(upload.getReplicationConfig())
+ )));
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(s3GAction, auditParams));
+ getMetrics().incListMultipartUploadsSuccess();
+ return Response.ok(result).build();
} catch (OMException exception) {
+ AUDIT.logReadFailure(
+ buildAuditMessageForFailure(s3GAction, auditParams, exception));
getMetrics().incListMultipartUploadsFailure();
if (exception.getResult() == ResultCodes.PERMISSION_DENIED) {
throw newError(S3ErrorTable.ACCESS_DENIED, prefix, exception);
diff --cc hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index 5b37d1f8b3,a852a3c2ab..8824e98fdd
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@@ -166,29 -163,50 +168,43 @@@ public class ObjectEndpoint extends End
@QueryParam("uploadId") @DefaultValue("") String uploadID,
InputStream body) throws IOException, OS3Exception {
- OzoneOutputStream output = null;
-
- if (uploadID != null && !uploadID.equals("")) {
- // If uploadID is specified, it is a request for upload part
- return createMultipartKey(bucketName, keyPath, length,
- partNumber, uploadID, body);
+ S3GAction s3GAction = S3GAction.CREATE_KEY;
+ boolean auditSuccess = true;
+ Map<String, String> auditParams = genAuditParam(
+ "bucket", bucketName,
+ "path", keyPath,
+ "Content-Length", String.valueOf(length),
+ "partNumber", String.valueOf(partNumber)
+ );
+ if (partNumber != 0) {
+ auditParams.put("uploadId", uploadID);
}
+ OzoneOutputStream output = null;
+
String copyHeader = null, storageType = null;
try {
+ if (uploadID != null && !uploadID.equals("")) {
+ s3GAction = S3GAction.CREATE_MULTIPART_KEY;
+ // If uploadID is specified, it is a request for upload part
+ return createMultipartKey(bucketName, keyPath, length,
+ partNumber, uploadID, body);
+ }
+
copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
+ boolean storageTypeDefault = StringUtils.isEmpty(storageType);
- S3StorageType s3StorageType;
- boolean storageTypeDefault;
- if (storageType == null || storageType.equals("")) {
- s3StorageType = S3StorageType.getDefault(ozoneConfiguration);
- storageTypeDefault = true;
- } else {
- s3StorageType = toS3StorageType(storageType);
- storageTypeDefault = false;
- }
- ReplicationType replicationType = s3StorageType.getType();
- ReplicationFactor replicationFactor = s3StorageType.getFactor();
+ // Normal put object
+ OzoneBucket bucket = getBucket(bucketName);
+ ReplicationConfig replicationConfig =
+ getReplicationConfig(bucket, storageType);
if (copyHeader != null) {
//Copy object, as copy source available.
+ s3GAction = S3GAction.COPY_OBJECT;
CopyObjectResponse copyObjectResponse = copyObject(
- copyHeader, bucketName, keyPath, replicationType,
- replicationFactor, storageTypeDefault);
+ copyHeader, bucket, keyPath, replicationConfig, storageTypeDefault);
return Response.status(Status.OK).entity(copyObjectResponse).header(
"Connection", "close").build();
}
diff --cc hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java
index d644162a8e,7a07a89791..5f110e8409
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java
@@@ -17,19 -17,15 +17,22 @@@
*/
package org.apache.hadoop.ozone.s3.util;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.s3.exception.OS3Exception;
+ import org.apache.commons.lang3.StringUtils;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.net.URLEncoder;
+ import java.util.Map;
+ import java.util.TreeMap;
import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_ARGUMENT;
+import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError;
/**
* Utilities.
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org