You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by bh...@apache.org on 2021/11/02 19:10:34 UTC
[ozone] branch HDDS-4440-s3-performance updated: HDDS 5781. Enable
ACLs and support for all s3 file operations. (#2739)
This is an automated email from the ASF dual-hosted git repository.
bharat pushed a commit to branch HDDS-4440-s3-performance
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/HDDS-4440-s3-performance by this push:
new 1ffbedd HDDS 5781. Enable ACLs and support for all s3 file operations. (#2739)
1ffbedd is described below
commit 1ffbeddcec9042de8b618daf7865d5d11d2871dd
Author: Neil Joshi <ne...@gmail.com>
AuthorDate: Tue Nov 2 13:10:17 2021 -0600
HDDS 5781. Enable ACLs and support for all s3 file operations. (#2739)
---
.../ozone/om/protocolPB/GrpcOmTransport.java | 13 +++++++-
.../src/main/compose/ozonesecure/docker-config | 1 +
.../dist/src/main/compose/ozonesecure/test.sh | 10 +++----
.../org/apache/hadoop/ozone/om/OzoneManager.java | 2 +-
.../hadoop/ozone/om/request/OMClientRequest.java | 6 +++-
.../hadoop/ozone/s3/endpoint/EndpointBase.java | 35 +++++++++++++++++++++-
6 files changed, 58 insertions(+), 9 deletions(-)
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
index 22d49a9..c4ebbe0 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
@@ -26,6 +26,7 @@ import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
+import io.grpc.Status;
import org.apache.hadoop.hdds.conf.Config;
import org.apache.hadoop.hdds.conf.ConfigGroup;
import org.apache.hadoop.hdds.conf.ConfigTag;
@@ -137,7 +138,17 @@ public class GrpcOmTransport implements OmTransport {
.build();
}
}
- return client.submitRequest(payload);
+ OMResponse resp = null;
+ try {
+ resp = client.submitRequest(payload);
+ } catch (io.grpc.StatusRuntimeException e) {
+ ResultCodes resultCode = ResultCodes.INTERNAL_ERROR;
+ if (e.getStatus().getCode() == Status.Code.UNAVAILABLE) {
+ resultCode = ResultCodes.TIMEOUT;
+ }
+ throw new OMException(e.getCause(), resultCode);
+ }
+ return resp;
}
// stub implementation for interface
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
index 103a997..9246337 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
@@ -69,6 +69,7 @@ OZONE-SITE.XML_hdds.scm.replication.event.timeout=10s
OZONE-SITE.XML_ozone.scm.stale.node.interval=30s
OZONE-SITE.XML_ozone.scm.dead.node.interval=45s
OZONE-SITE.XML_hdds.container.report.interval=60s
+OZONE-SITE.XML_ozone.om.s3.grpc.server_enabled=true
HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/dn@EXAMPLE.COM
HDFS-SITE.XML_dfs.datanode.keytab.file=/etc/security/keytabs/dn.keytab
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
index f3971f5..5477a76 100755
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
@@ -33,9 +33,9 @@ execute_command_in_container kms hadoop key create ${OZONE_BUCKET_KEY_NAME}
execute_robot_test scm kinit.robot
-#execute_robot_test scm basic
+execute_robot_test scm basic
-#execute_robot_test scm security
+execute_robot_test scm security
for scheme in ofs o3fs; do
for bucket in link bucket; do
@@ -43,9 +43,9 @@ for scheme in ofs o3fs; do
done
done
-#for bucket in link generated; do
-# execute_robot_test s3g -v BUCKET:${bucket} -N s3-${bucket} s3
-#done
+for bucket in link generated; do
+ execute_robot_test s3g -v BUCKET:${bucket} -N s3-${bucket} s3
+done
#expects 4 pipelines, should be run before
#admincli which creates STANDALONE pipeline
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 29688ba..bbef051 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -4038,7 +4038,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
if (isAclEnabled) {
InetAddress remoteIp = Server.getRemoteIp();
resolved = resolveBucketLink(requested, new HashSet<>(),
- Server.getRemoteUser(),
+ getRemoteUser(),
remoteIp,
remoteIp != null ? remoteIp.getHostName() :
omRpcAddress.getHostName());
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
index bc75291..215e420 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
@@ -136,6 +136,11 @@ public abstract class OMClientRequest implements RequestAuditor {
userInfo.setUserName(user.getUserName());
}
+ // for gRPC s3g omRequests that contain user name
+ if (user == null && omRequest.hasUserInfo()) {
+ userInfo.setUserName(omRequest.getUserInfo().getUserName());
+ }
+
if (remoteAddress != null) {
userInfo.setHostName(remoteAddress.getHostName());
userInfo.setRemoteAddress(remoteAddress.getHostAddress()).build();
@@ -273,7 +278,6 @@ public abstract class OMClientRequest implements RequestAuditor {
if (userGroupInformation != null) {
return userGroupInformation;
}
-
if (omRequest.hasUserInfo() &&
!StringUtils.isBlank(omRequest.getUserInfo().getUserName())) {
userGroupInformation = UserGroupInformation.createRemoteUser(
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
index 137b8ea..183c929 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.security.UserGroupInformation;
/**
* Basic helpers for all the REST endpoints.
@@ -50,6 +51,12 @@ public class EndpointBase {
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName);
+ } else if (ex.getResult() == ResultCodes.S3_SECRET_NOT_FOUND) {
+ throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED,
+ UserGroupInformation.getCurrentUser().getUserName());
+ } else if (ex.getResult() == ResultCodes.TIMEOUT ||
+ ex.getResult() == ResultCodes.INTERNAL_ERROR) {
+ throw S3ErrorTable.newError(S3ErrorTable.INTERNAL_ERROR, bucketName);
} else {
throw ex;
}
@@ -66,8 +73,14 @@ public class EndpointBase {
if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND
|| ex.getResult() == ResultCodes.VOLUME_NOT_FOUND) {
throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName);
+ } else if (ex.getResult() == ResultCodes.S3_SECRET_NOT_FOUND) {
+ throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED,
+ UserGroupInformation.getCurrentUser().getUserName());
} else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, bucketName);
+ } else if (ex.getResult() == ResultCodes.TIMEOUT ||
+ ex.getResult() == ResultCodes.INTERNAL_ERROR) {
+ throw S3ErrorTable.newError(S3ErrorTable.INTERNAL_ERROR, bucketName);
} else {
throw ex;
}
@@ -95,6 +108,12 @@ public class EndpointBase {
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, bucketName);
+ } else if (ex.getResult() == ResultCodes.S3_SECRET_NOT_FOUND) {
+ throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED,
+ UserGroupInformation.getCurrentUser().getUserName());
+ } else if (ex.getResult() == ResultCodes.TIMEOUT ||
+ ex.getResult() == ResultCodes.INTERNAL_ERROR) {
+ throw S3ErrorTable.newError(S3ErrorTable.INTERNAL_ERROR, bucketName);
} else if (ex.getResult() != ResultCodes.BUCKET_ALREADY_EXISTS) {
// S3 does not return error for bucket already exists, it just
// returns the location.
@@ -117,8 +136,15 @@ public class EndpointBase {
if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED,
s3BucketName);
+ } else if (ex.getResult() == ResultCodes.S3_SECRET_NOT_FOUND) {
+ throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED,
+ UserGroupInformation.getCurrentUser().getUserName());
+ } else if (ex.getResult() == ResultCodes.TIMEOUT ||
+ ex.getResult() == ResultCodes.INTERNAL_ERROR) {
+ throw S3ErrorTable.newError(S3ErrorTable.INTERNAL_ERROR, s3BucketName);
+ } else {
+ throw ex;
}
- throw ex;
}
}
@@ -161,6 +187,13 @@ public class EndpointBase {
} else if (e.getResult() == ResultCodes.PERMISSION_DENIED) {
throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED,
"listBuckets");
+ } else if (e.getResult() == ResultCodes.S3_SECRET_NOT_FOUND) {
+ throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED,
+ UserGroupInformation.getCurrentUser().getUserName());
+ } else if (e.getResult() == ResultCodes.TIMEOUT ||
+ e.getResult() == ResultCodes.INTERNAL_ERROR) {
+ throw S3ErrorTable.newError(S3ErrorTable.INTERNAL_ERROR,
+ "listBuckets");
} else {
throw e;
}
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org