You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ar...@apache.org on 2020/07/22 16:35:10 UTC
[hadoop-ozone] branch master updated: HDDS-3993. Create volume
required for S3G during OM startup. (#1227)
This is an automated email from the ASF dual-hosted git repository.
arp pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 7dac140 HDDS-3993. Create volume required for S3G during OM startup. (#1227)
7dac140 is described below
commit 7dac140024214c2189b72fad0566a0252d63e93c
Author: Bharat Viswanadham <bh...@apache.org>
AuthorDate: Wed Jul 22 09:34:59 2020 -0700
HDDS-3993. Create volume required for S3G during OM startup. (#1227)
---
.../dist/src/main/smoketest/s3/commonawslib.robot | 2 -
.../hadoop/fs/ozone/TestRootedOzoneFileSystem.java | 6 +-
.../client/rpc/TestOzoneRpcClientAbstract.java | 19 +++-
.../ozone/client/rpc/TestSecureOzoneRpcClient.java | 3 -
.../org/apache/hadoop/ozone/om/TestOmMetrics.java | 10 +-
.../ozone/om/TestOzoneManagerListVolumes.java | 18 ++--
.../org/apache/hadoop/ozone/om/OzoneManager.java | 105 +++++++++++++++++++++
7 files changed, 144 insertions(+), 19 deletions(-)
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
index c263988..74dba38 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
@@ -93,8 +93,6 @@ Create bucket with name
Setup s3 tests
Run Keyword Install aws cli
Run Keyword if '${OZONE_S3_SET_CREDENTIALS}' == 'true' Setup v4 headers
- ${result} = Execute And Ignore Error ozone sh volume create o3://${OM_SERVICE_ID}/s3v
- Should not contain ${result} Failed
${BUCKET} = Run Keyword if '${BUCKET}' == 'generated' Create bucket
... ELSE Set Variable ${BUCKET}
Set Suite Variable ${BUCKET}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
index 3aec3e8..75b3843 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
@@ -573,7 +573,8 @@ public class TestRootedOzoneFileSystem {
// listStatus("/")
Path root = new Path(OZONE_URI_DELIMITER);
FileStatus[] fileStatusRoot = ofs.listStatus(root);
- Assert.assertEquals(2, fileStatusRoot.length);
+ // Default volume "s3v" is created by OM during start up.
+ Assert.assertEquals(2 + 1, fileStatusRoot.length);
}
/**
@@ -687,7 +688,8 @@ public class TestRootedOzoneFileSystem {
FileStatus[] fileStatusesOver = customListStatus(new Path("/"),
false, "", 8);
// There are only 5 volumes
- Assert.assertEquals(5, fileStatusesOver.length);
+ // Default volume "s3v" is created during startup.
+ Assert.assertEquals(5 + 1, fileStatusesOver.length);
// numEntries = 5
FileStatus[] fileStatusesExact = customListStatus(new Path("/"),
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 3b90815..ac9faa6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -87,6 +87,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
import org.apache.hadoop.ozone.security.acl.OzoneAclConfig;
@@ -162,8 +163,6 @@ public abstract class TestOzoneRpcClientAbstract {
cluster.waitForClusterToBeReady();
ozClient = OzoneClientFactory.getRpcClient(conf);
store = ozClient.getObjectStore();
- String volumeName = HddsClientUtils.getS3VolumeName(conf);
- store.createVolume(volumeName);
storageContainerLocationClient =
cluster.getStorageContainerLocationClient();
ozoneManager = cluster.getOzoneManager();
@@ -237,6 +236,22 @@ public abstract class TestOzoneRpcClientAbstract {
}
@Test
+ public void testDefaultS3GVolumeExists() throws Exception {
+ String s3VolumeName = HddsClientUtils.getS3VolumeName(cluster.getConf());
+ OzoneVolume ozoneVolume = store.getVolume(s3VolumeName);
+ Assert.assertEquals(ozoneVolume.getName(), s3VolumeName);
+ OMMetadataManager omMetadataManager =
+ cluster.getOzoneManager().getMetadataManager();
+ long transactionID = Long.MAX_VALUE -1 >> 8;
+ long objectID = transactionID << 8;
+ OmVolumeArgs omVolumeArgs =
+ cluster.getOzoneManager().getMetadataManager().getVolumeTable().get(
+ omMetadataManager.getVolumeKey(s3VolumeName));
+ Assert.assertEquals(objectID, omVolumeArgs.getObjectID());
+ Assert.assertEquals(transactionID, omVolumeArgs.getUpdateID());
+ }
+
+ @Test
public void testVolumeSetOwner() throws IOException {
String volumeName = UUID.randomUUID().toString();
store.createVolume(volumeName);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
index 60a1a1e..72ce91a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.security.token.BlockTokenVerifier;
@@ -126,8 +125,6 @@ public class TestSecureOzoneRpcClient extends TestOzoneRpcClient {
cluster.waitForClusterToBeReady();
ozClient = OzoneClientFactory.getRpcClient(conf);
store = ozClient.getObjectStore();
- String volumeName = HddsClientUtils.getS3VolumeName(conf);
- store.createVolume(volumeName);
storageContainerLocationClient =
cluster.getStorageContainerLocationClient();
ozoneManager = cluster.getOzoneManager();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
index b80e357..a53a758 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
@@ -111,7 +111,7 @@ public class TestOmMetrics {
assertCounter("NumVolumeCheckAccesses", 1L, omMetrics);
assertCounter("NumVolumeDeletes", 1L, omMetrics);
assertCounter("NumVolumeLists", 1L, omMetrics);
- assertCounter("NumVolumes", 0L, omMetrics);
+ assertCounter("NumVolumes", 1L, omMetrics);
ozoneManager.createVolume(null);
ozoneManager.createVolume(null);
@@ -119,7 +119,9 @@ public class TestOmMetrics {
ozoneManager.deleteVolume(null);
omMetrics = getMetrics("OMMetrics");
- assertCounter("NumVolumes", 2L, omMetrics);
+
+ // Accounting 's3v' volume which is created by default.
+ assertCounter("NumVolumes", 3L, omMetrics);
// inject exception to test for Failure Metrics
@@ -152,10 +154,10 @@ public class TestOmMetrics {
// As last call for volumesOps does not increment numVolumes as those are
// failed.
- assertCounter("NumVolumes", 2L, omMetrics);
+ assertCounter("NumVolumes", 3L, omMetrics);
cluster.restartOzoneManager();
- assertCounter("NumVolumes", 2L, omMetrics);
+ assertCounter("NumVolumes", 3L, omMetrics);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java
index a8b1eef..d7aaf37 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java
@@ -203,14 +203,16 @@ public class TestOzoneManagerListVolumes {
UserGroupInformation.setLoginUser(user1);
checkUser(cluster, user2, Arrays.asList("volume2", "volume3", "volume5"),
true);
+
+ // Add "s3v" created default by OM.
checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2", "volume3",
- "volume4", "volume5"), true);
+ "volume4", "volume5", "s3v"), true);
UserGroupInformation.setLoginUser(user2);
checkUser(cluster, user1, Arrays.asList("volume1", "volume4", "volume5"),
true);
checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2", "volume3",
- "volume4", "volume5"), true);
+ "volume4", "volume5", "s3v"), true);
stopCluster(cluster);
}
@@ -229,8 +231,9 @@ public class TestOzoneManagerListVolumes {
UserGroupInformation.setLoginUser(user1);
checkUser(cluster, user2, Arrays.asList("volume2", "volume3", "volume5"),
false);
+ // Add "s3v" created default by OM.
checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2", "volume3",
- "volume4", "volume5"), false);
+ "volume4", "volume5", "s3v"), false);
// While admin should be able to list volumes just fine.
UserGroupInformation.setLoginUser(adminUser);
@@ -250,8 +253,10 @@ public class TestOzoneManagerListVolumes {
true);
checkUser(cluster, user2, Arrays.asList("volume2", "volume3", "volume5"),
true);
+
+ // Add "s3v" created default by OM.
checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2", "volume3",
- "volume4", "volume5"), true);
+ "volume4", "volume5", "s3v"), true);
stopCluster(cluster);
}
@@ -268,8 +273,9 @@ public class TestOzoneManagerListVolumes {
checkUser(cluster, user2, Arrays.asList("volume2", "volume3", "volume5"),
false);
UserGroupInformation.setLoginUser(adminUser);
- checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2", "volume3",
- "volume4", "volume5"), true);
+ // Add "s3v" created default by OM.
+ checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2",
+ "volume3", "volume4", "volume5", "s3v"), true);
stopCluster(cluster);
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 43ae998..16d83da 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -47,6 +47,7 @@ import java.util.TimerTask;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
+import com.google.common.base.Optional;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.crypto.key.KeyProvider;
@@ -61,6 +62,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
@@ -79,9 +81,12 @@ import org.apache.hadoop.hdds.tracing.TracingUtil;
import org.apache.hadoop.hdds.utils.HddsServerUtil;
import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
import org.apache.hadoop.hdds.utils.RetriableTask;
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper;
import org.apache.hadoop.hdds.utils.db.SequenceNumberNotFoundException;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.ipc.Client;
@@ -133,6 +138,7 @@ import org.apache.hadoop.ozone.om.ratis.OMRatisSnapshotInfo;
import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo;
import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.om.snapshot.OzoneManagerSnapshotProvider;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest;
@@ -140,6 +146,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRoleInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserVolumeInfo;
import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB;
import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
import org.apache.hadoop.ozone.security.OzoneDelegationTokenSecretManager;
@@ -185,6 +192,7 @@ import static org.apache.hadoop.hdds.security.x509.certificates.utils.Certificat
import static org.apache.hadoop.hdds.server.ServerUtils.getRemoteUserName;
import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithFixedSleep;
+import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS;
@@ -219,6 +227,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER;
import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneManagerService.newReflectiveBlockingService;
+import org.apache.hadoop.util.Time;
import org.apache.ratis.proto.RaftProtos.RaftPeerRole;
import org.apache.ratis.server.protocol.TermIndex;
import org.apache.ratis.util.ExitUtils;
@@ -426,6 +435,10 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
}
instantiateServices();
+
+ // Create special volume s3v which is required for S3G.
+ addS3GVolumeToDB();
+
this.omRatisSnapshotInfo = new OMRatisSnapshotInfo();
initializeRatisServer();
if (isRatisEnabled) {
@@ -1146,6 +1159,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
startJVMPauseMonitor();
setStartTime();
omState = State.RUNNING;
+
}
/**
@@ -3502,4 +3516,95 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
return configuration.getBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS,
OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT);
}
+
+ /**
+ * Create volume which is required for S3Gateway operations.
+ * @throws IOException
+ */
+ private void addS3GVolumeToDB() throws IOException {
+ String s3VolumeName = HddsClientUtils.getS3VolumeName(configuration);
+ String dbVolumeKey = metadataManager.getVolumeKey(s3VolumeName);
+
+ if (!s3VolumeName.equals(OzoneConfigKeys.OZONE_S3_VOLUME_NAME_DEFAULT)) {
+ LOG.warn("Make sure that all S3Gateway use same volume name." +
+ " Otherwise user need to manually create/configure Volume " +
+ "configured by S3Gateway");
+ }
+ if (!metadataManager.getVolumeTable().isExist(dbVolumeKey)) {
+ long transactionID = (Long.MAX_VALUE - 1) >> 8;
+ long objectID = OMFileRequest.getObjIDFromTxId(transactionID);
+ String userName =
+ UserGroupInformation.getCurrentUser().getShortUserName();
+
+ // Add volume and user info to DB and cache.
+
+ OmVolumeArgs omVolumeArgs = createS3VolumeInfo(s3VolumeName,
+ transactionID, objectID);
+
+ String dbUserKey = metadataManager.getUserKey(userName);
+ UserVolumeInfo userVolumeInfo = UserVolumeInfo.newBuilder()
+ .setObjectID(objectID)
+ .setUpdateID(transactionID)
+ .addVolumeNames(s3VolumeName).build();
+
+
+ // Commit to DB.
+ BatchOperation batchOperation =
+ metadataManager.getStore().initBatchOperation();
+
+ metadataManager.getVolumeTable().putWithBatch(batchOperation, dbVolumeKey,
+ omVolumeArgs);
+ metadataManager.getUserTable().putWithBatch(batchOperation, dbUserKey,
+ userVolumeInfo);
+
+ metadataManager.getStore().commitBatchOperation(batchOperation);
+
+ // Add to cache.
+ metadataManager.getVolumeTable().addCacheEntry(
+ new CacheKey<>(dbVolumeKey),
+ new CacheValue<>(Optional.of(omVolumeArgs), transactionID));
+ metadataManager.getUserTable().addCacheEntry(
+ new CacheKey<>(dbUserKey),
+ new CacheValue<>(Optional.of(userVolumeInfo), transactionID));
+ LOG.info("Created Volume {} With Owner {} required for S3Gateway " +
+ "operations.", s3VolumeName, userName);
+ }
+ }
+
+ private OmVolumeArgs createS3VolumeInfo(String s3Volume, long transactionID,
+ long objectID) throws IOException {
+ String userName = UserGroupInformation.getCurrentUser().getShortUserName();
+ long time = Time.now();
+
+ OmVolumeArgs.Builder omVolumeArgs = new OmVolumeArgs.Builder()
+ .setVolume(s3Volume)
+ .setUpdateID(transactionID)
+ .setObjectID(objectID)
+ .setCreationTime(time)
+ .setModificationTime(time)
+ .setOwnerName(userName)
+ .setAdminName(userName)
+ .setQuotaInBytes(OzoneConsts.MAX_QUOTA_IN_BYTES);
+
+ // Provide ACLType of ALL which is default acl rights for user and group.
+ List<OzoneAcl> listOfAcls = new ArrayList<>();
+ //User ACL
+ listOfAcls.add(new OzoneAcl(ACLIdentityType.USER,
+ userName, ACLType.ALL, ACCESS));
+ //Group ACLs of the User
+ List<String> userGroups = Arrays.asList(UserGroupInformation
+ .createRemoteUser(userName).getGroupNames());
+
+ userGroups.stream().forEach((group) -> listOfAcls.add(
+ new OzoneAcl(ACLIdentityType.GROUP, group, ACLType.ALL, ACCESS)));
+
+ // Add ACLs
+ for (OzoneAcl ozoneAcl : listOfAcls) {
+ omVolumeArgs.addOzoneAcls(OzoneAcl.toProtobuf(ozoneAcl));
+ }
+
+ return omVolumeArgs.build();
+
+ }
+
}
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org