You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by sa...@apache.org on 2020/09/16 04:02:35 UTC
[hadoop-ozone] branch master updated: HDDS-4053. Volume space: add
quotaUsageInBytes and update it when write and delete key. (#1296)
This is an automated email from the ASF dual-hosted git repository.
sammichen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 7beb2d0 HDDS-4053. Volume space: add quotaUsageInBytes and update it when write and delete key. (#1296)
7beb2d0 is described below
commit 7beb2d09e34d7a20f1205a05b30112cc40fc25e4
Author: micah zhao <mi...@tencent.com>
AuthorDate: Wed Sep 16 12:02:19 2020 +0800
HDDS-4053. Volume space: add quotaUsageInBytes and update it when write and delete key. (#1296)
---
.../java/org/apache/hadoop/ozone/OzoneConsts.java | 1 +
.../apache/hadoop/ozone/client/OzoneVolume.java | 17 ++
.../apache/hadoop/ozone/client/rpc/RpcClient.java | 6 +-
.../hadoop/ozone/om/helpers/OmVolumeArgs.java | 34 ++-
.../client/rpc/TestOzoneRpcClientAbstract.java | 269 ++++++++++++++++++++-
.../src/main/proto/OmClientProtocol.proto | 2 +
.../ozone/om/request/file/OMFileCreateRequest.java | 37 +--
.../om/request/key/OMAllocateBlockRequest.java | 34 +--
.../ozone/om/request/key/OMKeyCommitRequest.java | 30 ++-
.../ozone/om/request/key/OMKeyCreateRequest.java | 42 ++--
.../ozone/om/request/key/OMKeyDeleteRequest.java | 29 ++-
.../hadoop/ozone/om/request/key/OMKeyRequest.java | 16 ++
.../ozone/om/request/key/OMKeysDeleteRequest.java | 20 +-
.../multipart/S3MultipartUploadAbortRequest.java | 37 ++-
.../S3MultipartUploadCommitPartRequest.java | 17 +-
.../om/response/file/OMFileCreateResponse.java | 8 +-
.../om/response/key/OMAllocateBlockResponse.java | 10 +-
.../ozone/om/response/key/OMKeyCommitResponse.java | 11 +-
.../ozone/om/response/key/OMKeyCreateResponse.java | 12 +-
.../ozone/om/response/key/OMKeyDeleteResponse.java | 11 +-
.../om/response/key/OMKeysDeleteResponse.java | 12 +-
.../multipart/S3MultipartUploadAbortResponse.java | 12 +-
.../S3MultipartUploadCommitPartResponse.java | 10 +-
.../response/key/TestOMAllocateBlockResponse.java | 14 +-
.../om/response/key/TestOMKeyCommitResponse.java | 12 +-
.../om/response/key/TestOMKeyCreateResponse.java | 16 +-
.../om/response/key/TestOMKeyDeleteResponse.java | 17 +-
.../om/response/key/TestOMKeysDeleteResponse.java | 23 +-
.../s3/multipart/TestS3MultipartResponse.java | 5 +-
.../TestS3MultipartUploadAbortResponse.java | 14 +-
30 files changed, 641 insertions(+), 137 deletions(-)
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index e48982e..d980065 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -269,6 +269,7 @@ public final class OzoneConsts {
public static final String KEY = "key";
public static final String SRC_KEY = "srcKey";
public static final String DST_KEY = "dstKey";
+ public static final String USED_BYTES = "usedBytes";
public static final String QUOTA_IN_BYTES = "quotaInBytes";
public static final String QUOTA_IN_COUNTS = "quotaInCounts";
public static final String OBJECT_ID = "objectID";
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
index fb4677d..ca9235e 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
@@ -83,6 +83,8 @@ public class OzoneVolume extends WithMetadata {
private int listCacheSize;
+ private long usedBytes;
+
/**
* Constructs OzoneVolume instance.
* @param conf Configuration object.
@@ -134,6 +136,17 @@ public class OzoneVolume extends WithMetadata {
@SuppressWarnings("parameternumber")
public OzoneVolume(ConfigurationSource conf, ClientProtocol proxy,
String name, String admin, String owner, long quotaInBytes,
+ long quotaInCounts, long creationTime, long modificationTime,
+ List<OzoneAcl> acls, Map<String, String> metadata,
+ long usedBytes) {
+ this(conf, proxy, name, admin, owner, quotaInBytes, quotaInCounts,
+ creationTime, acls, metadata);
+ this.usedBytes = usedBytes;
+ }
+
+ @SuppressWarnings("parameternumber")
+ public OzoneVolume(ConfigurationSource conf, ClientProtocol proxy,
+ String name, String admin, String owner, long quotaInBytes,
long quotaInCounts, long creationTime, List<OzoneAcl> acls) {
this(conf, proxy, name, admin, owner, quotaInBytes, quotaInCounts,
creationTime, acls, new HashMap<>());
@@ -254,6 +267,10 @@ public class OzoneVolume extends WithMetadata {
return acls;
}
+ public long getUsedBytes() {
+ return usedBytes;
+ }
+
/**
* Sets/Changes the owner of this Volume.
* @param userName new owner
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 7928805..fdd93fa 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -364,7 +364,8 @@ public class RpcClient implements ClientProtocol {
volume.getModificationTime(),
volume.getAclMap().ozoneAclGetProtobuf().stream().
map(OzoneAcl::fromProtobuf).collect(Collectors.toList()),
- volume.getMetadata());
+ volume.getMetadata(),
+ volume.getUsedBytes().sum());
}
@Override
@@ -420,7 +421,8 @@ public class RpcClient implements ClientProtocol {
volume.getModificationTime(),
volume.getAclMap().ozoneAclGetProtobuf().stream().
map(OzoneAcl::fromProtobuf).collect(Collectors.toList()),
- volume.getMetadata()))
+ volume.getMetadata(),
+ volume.getUsedBytes().sum()))
.collect(Collectors.toList());
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
index 1d8e954..fa7b697 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
@@ -23,6 +23,7 @@ import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
+import java.util.concurrent.atomic.LongAdder;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -46,6 +47,7 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable {
private long quotaInBytes;
private long quotaInCounts;
private final OmOzoneAclMap aclMap;
+ private final LongAdder usedBytes = new LongAdder();
/**
* Private constructor, constructed via builder.
@@ -55,6 +57,7 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable {
* @param quotaInBytes - Volume Quota in bytes.
* @param quotaInCounts - Volume Quota in counts.
* @param metadata - metadata map for custom key/value data.
+ * @param usedBytes - Volume Quota Usage in bytes.
* @param aclMap - User to access rights map.
* @param creationTime - Volume creation time.
* @param objectID - ID of this object.
@@ -65,14 +68,15 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable {
"builder."})
private OmVolumeArgs(String adminName, String ownerName, String volume,
long quotaInBytes, long quotaInCounts, Map<String, String> metadata,
- OmOzoneAclMap aclMap, long creationTime, long modificationTime,
- long objectID, long updateID) {
+ long usedBytes, OmOzoneAclMap aclMap, long creationTime,
+ long modificationTime, long objectID, long updateID) {
this.adminName = adminName;
this.ownerName = ownerName;
this.volume = volume;
this.quotaInBytes = quotaInBytes;
this.quotaInCounts = quotaInCounts;
this.metadata = metadata;
+ this.usedBytes.add(usedBytes);
this.aclMap = aclMap;
this.creationTime = creationTime;
this.modificationTime = modificationTime;
@@ -172,6 +176,11 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable {
public OmOzoneAclMap getAclMap() {
return aclMap;
}
+
+ public LongAdder getUsedBytes() {
+ return usedBytes;
+ }
+
/**
* Returns new builder class that builds a OmVolumeArgs.
*
@@ -195,6 +204,8 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable {
String.valueOf(this.quotaInCounts));
auditMap.put(OzoneConsts.OBJECT_ID, String.valueOf(this.getObjectID()));
auditMap.put(OzoneConsts.UPDATE_ID, String.valueOf(this.getUpdateID()));
+ auditMap.put(OzoneConsts.USED_BYTES,
+ String.valueOf(this.usedBytes));
return auditMap;
}
@@ -230,6 +241,7 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable {
private OmOzoneAclMap aclMap;
private long objectID;
private long updateID;
+ private long usedBytes;
/**
* Sets the Object ID for this Object.
@@ -307,6 +319,11 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable {
return this;
}
+ public Builder setUsedBytes(long quotaUsage) {
+ this.usedBytes = quotaUsage;
+ return this;
+ }
+
public Builder addOzoneAcls(OzoneAclInfo acl) throws IOException {
aclMap.addAcl(acl);
return this;
@@ -321,8 +338,8 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable {
Preconditions.checkNotNull(ownerName);
Preconditions.checkNotNull(volume);
return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes,
- quotaInCounts, metadata, aclMap, creationTime, modificationTime,
- objectID, updateID);
+ quotaInCounts, metadata, usedBytes, aclMap, creationTime,
+ modificationTime, objectID, updateID);
}
}
@@ -342,6 +359,7 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable {
.setModificationTime(modificationTime)
.setObjectID(objectID)
.setUpdateID(updateID)
+ .setUsedBytes(usedBytes.sum())
.build();
}
@@ -356,6 +374,7 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable {
volInfo.getQuotaInBytes(),
volInfo.getQuotaInCounts(),
KeyValueUtil.getFromProtobuf(volInfo.getMetadataList()),
+ volInfo.getUsedBytes(),
aclMap,
volInfo.getCreationTime(),
volInfo.getModificationTime(),
@@ -370,7 +389,8 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable {
", admin='" + adminName + '\'' +
", owner='" + ownerName + '\'' +
", creationTime='" + creationTime + '\'' +
- ", quota='" + quotaInBytes + '\'' +
+ ", quotaInBytes='" + quotaInBytes + '\'' +
+ ", usedBytes='" + usedBytes.sum() + '\'' +
'}';
}
@@ -386,7 +406,7 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable {
OmOzoneAclMap cloneAclMap = aclMap.copyObject();
return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes,
- quotaInCounts, cloneMetadata, cloneAclMap, creationTime,
- modificationTime, objectID, updateID);
+ quotaInCounts, cloneMetadata, usedBytes.sum(), cloneAclMap,
+ creationTime, modificationTime, objectID, updateID);
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index a92b644..7b6f93e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -35,6 +35,7 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.OzoneQuota;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
@@ -106,9 +107,12 @@ import org.apache.commons.lang3.StringUtils;
import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
+import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_RENAME;
@@ -704,6 +708,254 @@ public abstract class TestOzoneRpcClientAbstract {
}
@Test
+ public void testVolumeUsedBytes() throws IOException {
+ String volumeName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+ OzoneVolume volume = null;
+
+ int blockSize = (int) ozoneManager.getConfiguration().getStorageSize(
+ OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES);
+
+ // Write data larger than one block size.
+ String value = generateData(blockSize + 100,
+ (byte) RandomUtils.nextLong()).toString();
+
+ int valueLength = value.getBytes().length;
+ long currentQuotaUsage = 0L;
+ store.createVolume(volumeName);
+ volume = store.getVolume(volumeName);
+ // The initial value should be 0
+ Assert.assertEquals(0L, volume.getUsedBytes());
+ volume.createBucket(bucketName);
+ OzoneBucket bucket = volume.getBucket(bucketName);
+
+ //Case1: Test the volumeUsedBytes of ONE replications.
+ String keyName1 = UUID.randomUUID().toString();
+ writeKey(bucket, keyName1, ONE, value, valueLength);
+ volume = store.getVolume(volumeName);
+ Assert.assertEquals(valueLength, volume.getUsedBytes());
+ currentQuotaUsage += valueLength;
+
+ // Case2: Test overwrite the same KeyName under ONE Replicates, the
+ // keyLocationVersions of the Key is 2.
+ String keyName2 = UUID.randomUUID().toString();
+ writeKey(bucket, keyName2, ONE, value, valueLength);
+ // Overwrite the keyName2
+ writeKey(bucket, keyName2, ONE, value, valueLength);
+ volume = store.getVolume(volumeName);
+ Assert.assertEquals(valueLength * 2 + currentQuotaUsage,
+ volume.getUsedBytes());
+ currentQuotaUsage += valueLength * 2;
+
+ // Case3: Test the volumeUsedBytes of THREE replications.
+ String keyName3 = UUID.randomUUID().toString();
+ writeKey(bucket, keyName3, THREE, value, valueLength);
+ volume = store.getVolume(volumeName);
+ Assert.assertEquals(valueLength * 3 + currentQuotaUsage,
+ volume.getUsedBytes());
+ currentQuotaUsage += valueLength * 3;
+
+ // Case4: Test overwrite the same KeyName under THREE Replicates, the
+ // keyLocationVersions of the Key is 2.
+ String keyName4 = UUID.randomUUID().toString();
+ writeKey(bucket, keyName4, THREE, value, valueLength);
+ // Overwrite the keyName4
+ writeKey(bucket, keyName4, THREE, value, valueLength);
+ volume = store.getVolume(volumeName);
+ Assert.assertEquals(valueLength * 3 * 2 + currentQuotaUsage,
+ volume.getUsedBytes());
+ currentQuotaUsage += valueLength * 3 * 2;
+
+ //Case5: Do not specify the value Length, simulate HDFS api writing.
+ // Test the volumeUsedBytes of ONE replications.
+ String keyName5 = UUID.randomUUID().toString();
+ writeFile(bucket, keyName5, ONE, value, 0);
+ volume = store.getVolume(volumeName);
+ Assert.assertEquals(valueLength + currentQuotaUsage,
+ volume.getUsedBytes());
+ currentQuotaUsage += valueLength;
+
+ // Case6: Do not specify the value Length, simulate HDFS api writing.
+ // Test overwrite the same KeyName under ONE Replicates, the
+ // keyLocationVersions of the Key is 2.
+ String keyName6 = UUID.randomUUID().toString();
+ writeFile(bucket, keyName6, ONE, value, 0);
+ // Overwrite the keyName6
+ writeFile(bucket, keyName6, ONE, value, 0);
+ volume = store.getVolume(volumeName);
+ Assert.assertEquals(valueLength * 2 + currentQuotaUsage,
+ volume.getUsedBytes());
+ currentQuotaUsage += valueLength * 2;
+
+ // Case7: Do not specify the value Length, simulate HDFS api writing.
+ // Test the volumeUsedBytes of THREE replications.
+ String keyName7 = UUID.randomUUID().toString();
+ writeFile(bucket, keyName7, THREE, value, 0);
+ volume = store.getVolume(volumeName);
+ Assert.assertEquals(valueLength * 3 + currentQuotaUsage,
+ volume.getUsedBytes());
+ currentQuotaUsage += valueLength * 3;
+
+ // Case8: Do not specify the value Length, simulate HDFS api writing.
+ // Test overwrite the same KeyName under THREE Replicates, the
+ // keyLocationVersions of the Key is 2.
+ String keyName8 = UUID.randomUUID().toString();
+ writeFile(bucket, keyName8, THREE, value, 0);
+ // Overwrite the keyName8
+ writeFile(bucket, keyName8, THREE, value, 0);
+ volume = store.getVolume(volumeName);
+ Assert.assertEquals(valueLength * 3 * 2 + currentQuotaUsage,
+ volume.getUsedBytes());
+ currentQuotaUsage += valueLength * 3 * 2;
+
+ // Case9: Test volumeUsedBytes when delete key of ONE replications.
+ bucket.deleteKey(keyName1);
+ volume = store.getVolume(volumeName);
+ Assert.assertEquals(currentQuotaUsage - valueLength,
+ volume.getUsedBytes());
+ currentQuotaUsage -= valueLength;
+
+ // Case10: Test volumeUsedBytes when delete key of THREE
+ // replications.
+ bucket.deleteKey(keyName3);
+ volume = store.getVolume(volumeName);
+ Assert.assertEquals(currentQuotaUsage - valueLength * 3,
+ volume.getUsedBytes());
+ currentQuotaUsage -= valueLength * 3;
+
+ // Case11: Test volumeUsedBytes when Test Delete keys. At this
+ // point all keys are deleted, volumeUsedBytes should be 0
+ List<String> keyList = new ArrayList<>();
+ keyList.add(keyName2);
+ keyList.add(keyName4);
+ keyList.add(keyName5);
+ keyList.add(keyName6);
+ keyList.add(keyName7);
+ keyList.add(keyName8);
+ bucket.deleteKeys(keyList);
+ volume = store.getVolume(volumeName);
+ Assert.assertEquals(0, volume.getUsedBytes());
+ }
+
+ @Test
+ public void testVolumeQuotaWithMultiThread() throws IOException,
+ InterruptedException{
+ String volumeName = UUID.randomUUID().toString();
+
+ int blockSize = (int) ozoneManager.getConfiguration().getStorageSize(
+ OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES);
+ // Write data larger than one block size.
+ String value = generateData(blockSize + 100,
+ (byte) RandomUtils.nextLong()).toString();
+
+ int valueLength = value.getBytes().length;
+ long currentQuotaUsage = 0L;
+ store.createVolume(volumeName);
+ OzoneVolume volume = store.getVolume(volumeName);
+ // The initial value should be 0
+ Assert.assertEquals(0L, volume.getUsedBytes());
+
+ CountDownLatch latch = new CountDownLatch(2);
+ AtomicInteger failCount = new AtomicInteger(0);
+
+ // Multiple threads write different buckets and ensure that the volume
+ // quota is correct.
+ Runnable r = () -> {
+ try {
+ for (int i = 0; i < 10; i++) {
+ String keyName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+ volume.createBucket(bucketName);
+ OzoneBucket bucket = volume.getBucket(bucketName);
+ OzoneOutputStream out = bucket.createKey(keyName, valueLength,
+ STAND_ALONE, ONE, new HashMap<>());
+ out.write(value.getBytes());
+ out.close();
+ }
+ latch.countDown();
+ } catch (IOException ex) {
+ latch.countDown();
+ failCount.incrementAndGet();
+ }
+ };
+
+ Thread thread1 = new Thread(r);
+ Thread thread2 = new Thread(r);
+
+ thread1.start();
+ thread2.start();
+
+ latch.await(6000, TimeUnit.SECONDS);
+
+ if (failCount.get() > 0) {
+ fail("testVolumeQuotaWithMultiThread failed");
+ }
+ currentQuotaUsage += valueLength * 10 * 2;
+ Assert.assertEquals(currentQuotaUsage,
+ store.getVolume(volumeName).getUsedBytes());
+
+ }
+
+ private void writeKey(OzoneBucket bucket, String keyName,
+ ReplicationFactor replication, String value, int valueLength)
+ throws IOException{
+ OzoneOutputStream out = bucket.createKey(keyName, valueLength, STAND_ALONE,
+ replication, new HashMap<>());
+ out.write(value.getBytes());
+ out.close();
+ }
+
+ private void writeFile(OzoneBucket bucket, String keyName,
+ ReplicationFactor replication, String value, int valueLength)
+ throws IOException{
+ OzoneOutputStream out = bucket.createFile(keyName, valueLength, STAND_ALONE,
+ replication, true, true);
+ out.write(value.getBytes());
+ out.close();
+ }
+
+ @Test
+ public void testVolumeQuotaWithUploadPart() throws IOException {
+ String volumeName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+ String keyName = UUID.randomUUID().toString();
+ int blockSize = (int) ozoneManager.getConfiguration().getStorageSize(
+ OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES);
+ String sampleData = generateData(blockSize + 100,
+ (byte) RandomUtils.nextLong()).toString();
+ int valueLength = sampleData.getBytes().length;
+
+ store.createVolume(volumeName);
+ OzoneVolume volume = store.getVolume(volumeName);
+ // The initial value should be 0
+ Assert.assertEquals(0L, volume.getUsedBytes());
+ volume.createBucket(bucketName);
+ OzoneBucket bucket = volume.getBucket(bucketName);
+ OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
+ STAND_ALONE, ONE);
+
+ assertNotNull(multipartInfo);
+ String uploadID = multipartInfo.getUploadID();
+ Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+ Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+ Assert.assertEquals(keyName, multipartInfo.getKeyName());
+ assertNotNull(multipartInfo.getUploadID());
+
+ OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+ sampleData.length(), 1, uploadID);
+ ozoneOutputStream.write(string2Bytes(sampleData), 0,
+ sampleData.length());
+ ozoneOutputStream.close();
+
+ Assert.assertEquals(valueLength, store.getVolume(volumeName)
+ .getUsedBytes());
+
+ // Abort uploaded partKey and the usedBytes of volume should be 0.
+ bucket.abortMultipartUpload(keyName, uploadID);
+ Assert.assertEquals(0, store.getVolume(volumeName).getUsedBytes());
+ }
+
+ @Test
public void testValidateBlockLengthWithCommitKey() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
@@ -787,7 +1039,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneOutputStream out = bucket.createKey(keyName,
value.getBytes().length, ReplicationType.RATIS,
- ReplicationFactor.THREE, new HashMap<>());
+ THREE, new HashMap<>());
out.write(value.getBytes());
out.close();
OzoneKey key = bucket.getKey(keyName);
@@ -798,7 +1050,7 @@ public abstract class TestOzoneRpcClientAbstract {
is.close();
Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
keyName, ReplicationType.RATIS,
- ReplicationFactor.THREE));
+ THREE));
Assert.assertEquals(value, new String(fileContent));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
@@ -828,7 +1080,7 @@ public abstract class TestOzoneRpcClientAbstract {
(byte) RandomUtils.nextLong()).toString();
OzoneOutputStream out = bucket.createKey(keyName,
data.getBytes().length, ReplicationType.RATIS,
- ReplicationFactor.THREE, new HashMap<>());
+ THREE, new HashMap<>());
out.write(data.getBytes());
out.close();
OzoneKey key = bucket.getKey(keyName);
@@ -839,7 +1091,7 @@ public abstract class TestOzoneRpcClientAbstract {
is.close();
Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
keyName, ReplicationType.RATIS,
- ReplicationFactor.THREE));
+ THREE));
Assert.assertEquals(data, new String(fileContent));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
@@ -1121,7 +1373,7 @@ public abstract class TestOzoneRpcClientAbstract {
// Write data into a key
OzoneOutputStream out = bucket.createKey(keyName,
value.getBytes().length, ReplicationType.RATIS,
- ReplicationFactor.THREE, new HashMap<>());
+ THREE, new HashMap<>());
out.write(value.getBytes());
out.close();
@@ -1698,7 +1950,6 @@ public abstract class TestOzoneRpcClientAbstract {
assertNotNull(multipartInfo.getUploadID());
}
-
@Test
public void testUploadPartWithNoOverride() throws IOException {
String volumeName = UUID.randomUUID().toString();
@@ -1800,7 +2051,7 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
- ReplicationType.RATIS, ReplicationFactor.THREE);
+ ReplicationType.RATIS, THREE);
assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
@@ -2693,7 +2944,7 @@ public abstract class TestOzoneRpcClientAbstract {
throws Exception {
// Initiate Multipart upload request
String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType
- .RATIS, ReplicationFactor.THREE);
+ .RATIS, THREE);
// Upload parts
Map<Integer, String> partsMap = new TreeMap<>();
@@ -2730,7 +2981,7 @@ public abstract class TestOzoneRpcClientAbstract {
Assert.assertTrue(verifyRatisReplication(bucket.getVolumeName(),
bucket.getName(), keyName, ReplicationType.RATIS,
- ReplicationFactor.THREE));
+ THREE));
StringBuilder sb = new StringBuilder(length);
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 1fc1b16..fff2968 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -360,6 +360,8 @@ message VolumeInfo {
optional uint64 updateID = 9;
optional uint64 modificationTime = 10;
optional uint64 quotaInCounts = 11;
+ optional uint64 usedBytes = 12;
+
}
/**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
index 3226f78..f76ac98 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse;
import org.slf4j.Logger;
@@ -50,18 +51,12 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .CreateFileRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .CreateFileResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .Type;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.util.Time;
@@ -69,10 +64,10 @@ import org.apache.hadoop.hdds.utils.UniqueId;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS;
import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
/**
* Handles create file request.
@@ -191,6 +186,7 @@ public class OMFileCreateRequest extends OMKeyRequest {
boolean acquiredLock = false;
OmKeyInfo omKeyInfo = null;
+ OmVolumeArgs omVolumeArgs = null;
final List<OmKeyLocationInfo> locations = new ArrayList<>();
List<OmKeyInfo> missingParentInfos;
@@ -275,9 +271,10 @@ public class OMFileCreateRequest extends OMKeyRequest {
pathInfo.getMissingParents(), inheritAcls, trxnLogIndex);
// Append new blocks
- omKeyInfo.appendNewBlocks(keyArgs.getKeyLocationsList().stream()
- .map(OmKeyLocationInfo::getFromProtobuf)
- .collect(Collectors.toList()), false);
+ List<OmKeyLocationInfo> newLocationList = keyArgs.getKeyLocationsList()
+ .stream().map(OmKeyLocationInfo::getFromProtobuf)
+ .collect(Collectors.toList());
+ omKeyInfo.appendNewBlocks(newLocationList, false);
// Add to cache entry can be done outside of lock for this openKey.
// Even if bucket gets deleted, when commitKey we shall identify if
@@ -292,6 +289,12 @@ public class OMFileCreateRequest extends OMKeyRequest {
bucketName, Optional.absent(), Optional.of(missingParentInfos),
trxnLogIndex);
+ long scmBlockSize = ozoneManager.getScmBlockSize();
+ omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
+ // update usedBytes atomically.
+ omVolumeArgs.getUsedBytes().add(newLocationList.size() * scmBlockSize
+ * omKeyInfo.getFactor().getNumber());
+
// Prepare response
omResponse.setCreateFileResponse(CreateFileResponse.newBuilder()
.setKeyInfo(omKeyInfo.getProtobuf())
@@ -299,7 +302,7 @@ public class OMFileCreateRequest extends OMKeyRequest {
.setOpenVersion(openVersion).build())
.setCmdType(Type.CreateFile);
omClientResponse = new OMFileCreateResponse(omResponse.build(),
- omKeyInfo, missingParentInfos, clientID);
+ omKeyInfo, missingParentInfos, clientID, omVolumeArgs);
result = Result.SUCCESS;
} catch (IOException ex) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
index 94d700f..a3239a4 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
@@ -25,6 +25,7 @@ import java.util.Map;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
@@ -45,22 +46,15 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.key.OMAllocateBlockResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .AllocateBlockRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .AllocateBlockResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes
- .KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
/**
* Handles allocate block request.
@@ -171,6 +165,7 @@ public class OMAllocateBlockRequest extends OMKeyRequest {
OmKeyInfo openKeyInfo;
IOException exception = null;
+ OmVolumeArgs omVolumeArgs = null;
try {
keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
@@ -196,8 +191,9 @@ public class OMAllocateBlockRequest extends OMKeyRequest {
}
// Append new block
- openKeyInfo.appendNewBlocks(Collections.singletonList(
- OmKeyLocationInfo.getFromProtobuf(blockLocation)), false);
+ List<OmKeyLocationInfo> newLocationList = Collections.singletonList(
+ OmKeyLocationInfo.getFromProtobuf(blockLocation));
+ openKeyInfo.appendNewBlocks(newLocationList, false);
// Set modification time.
openKeyInfo.setModificationTime(keyArgs.getModificationTime());
@@ -210,10 +206,16 @@ public class OMAllocateBlockRequest extends OMKeyRequest {
new CacheKey<>(openKeyName),
new CacheValue<>(Optional.of(openKeyInfo), trxnLogIndex));
+ long scmBlockSize = ozoneManager.getScmBlockSize();
+ omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
+ // update usedBytes atomically.
+ omVolumeArgs.getUsedBytes().add(newLocationList.size() * scmBlockSize
+ * openKeyInfo.getFactor().getNumber());
+
omResponse.setAllocateBlockResponse(AllocateBlockResponse.newBuilder()
.setKeyLocation(blockLocation).build());
omClientResponse = new OMAllocateBlockResponse(omResponse.build(),
- openKeyInfo, clientID);
+ openKeyInfo, clientID, omVolumeArgs);
LOG.debug("Allocated block for Volume:{}, Bucket:{}, OpenKey:{}",
volumeName, bucketName, openKeyName);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index 8ee3f17..c86ea56 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -29,6 +29,7 @@ import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
@@ -45,16 +46,11 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .CommitKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .KeyLocation;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
@@ -126,6 +122,7 @@ public class OMKeyCommitRequest extends OMKeyRequest {
IOException exception = null;
OmKeyInfo omKeyInfo = null;
+ OmVolumeArgs omVolumeArgs = null;
OMClientResponse omClientResponse = null;
boolean bucketLockAcquired = false;
Result result;
@@ -193,8 +190,19 @@ public class OMKeyCommitRequest extends OMKeyRequest {
new CacheKey<>(dbOzoneKey),
new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+ long scmBlockSize = ozoneManager.getScmBlockSize();
+ int factor = omKeyInfo.getFactor().getNumber();
+ omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
+ // update usedBytes atomically.
+ // Block was pre-requested and UsedBytes updated when createKey and
+ // AllocatedBlock. The space occupied by the Key shall be based on
+ // the actual Key size, and the total Block size applied before should
+ // be subtracted.
+ omVolumeArgs.getUsedBytes().add(omKeyInfo.getDataSize() * factor -
+ locationInfoList.size() * scmBlockSize * factor);
+
omClientResponse = new OMKeyCommitResponse(omResponse.build(),
- omKeyInfo, dbOzoneKey, dbOpenKey);
+ omKeyInfo, dbOzoneKey, dbOpenKey, omVolumeArgs);
result = Result.SUCCESS;
} catch (IOException ex) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
index 1b712fb..0966cc5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest;
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
@@ -51,18 +52,12 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .CreateKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .CreateKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .Type;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.util.Time;
@@ -202,6 +197,7 @@ public class OMKeyCreateRequest extends OMKeyRequest {
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
OmKeyInfo omKeyInfo = null;
+ OmVolumeArgs omVolumeArgs = null;
final List< OmKeyLocationInfo > locations = new ArrayList<>();
boolean acquireLock = false;
@@ -284,9 +280,10 @@ public class OMKeyCreateRequest extends OMKeyRequest {
bucketName, keyName, clientID);
// Append new blocks
- omKeyInfo.appendNewBlocks(keyArgs.getKeyLocationsList().stream()
- .map(OmKeyLocationInfo::getFromProtobuf)
- .collect(Collectors.toList()), false);
+ List<OmKeyLocationInfo> newLocationList = keyArgs.getKeyLocationsList()
+ .stream().map(OmKeyLocationInfo::getFromProtobuf)
+ .collect(Collectors.toList());
+ omKeyInfo.appendNewBlocks(newLocationList, false);
// Add to cache entry can be done outside of lock for this openKey.
// Even if bucket gets deleted, when commitKey we shall identify if
@@ -295,6 +292,19 @@ public class OMKeyCreateRequest extends OMKeyRequest {
new CacheKey<>(dbOpenKeyName),
new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+ long scmBlockSize = ozoneManager.getScmBlockSize();
+ omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
+
+ // Here we refer to the implementation of HDFS:
+ // If the key size is 600MB, when createKey, keyLocationInfo in
+ // keyLocationList is 3, and the every pre-allocated block length is
+ // 256MB. If the number of factor is 3, the total pre-allocated block
+ // ize is 256MB * 3 * 3. We will allocate more 256MB * 3 * 3 - 600mb * 3
+ // = 504MB in advance, and we will subtract this part when we finally
+ // commitKey.
+ omVolumeArgs.getUsedBytes().add(newLocationList.size() * scmBlockSize
+ * omKeyInfo.getFactor().getNumber());
+
// Prepare response
omResponse.setCreateKeyResponse(CreateKeyResponse.newBuilder()
.setKeyInfo(omKeyInfo.getProtobuf())
@@ -302,7 +312,7 @@ public class OMKeyCreateRequest extends OMKeyRequest {
.setOpenVersion(openVersion).build())
.setCmdType(Type.CreateKey);
omClientResponse = new OMKeyCreateResponse(omResponse.build(),
- omKeyInfo, missingParentInfos, clientID);
+ omKeyInfo, missingParentInfos, clientID, omVolumeArgs);
result = Result.SUCCESS;
} catch (IOException ex) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
index 4d8562c..bb820b7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
@@ -22,6 +22,9 @@ import java.io.IOException;
import java.util.Map;
import com.google.common.base.Optional;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
@@ -40,14 +43,10 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .DeleteKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .DeleteKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
@@ -111,6 +110,7 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
boolean acquiredLock = false;
OMClientResponse omClientResponse = null;
Result result = null;
+ OmVolumeArgs omVolumeArgs = null;
try {
keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
volumeName = keyArgs.getVolumeName();
@@ -143,6 +143,17 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
keyName)),
new CacheValue<>(Optional.absent(), trxnLogIndex));
+ long quotaReleased = 0;
+ int keyFactor = omKeyInfo.getFactor().getNumber();
+ omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
+ OmKeyLocationInfoGroup keyLocationGroup =
+ omKeyInfo.getLatestVersionLocations();
+ for(OmKeyLocationInfo locationInfo: keyLocationGroup.getLocationList()){
+ quotaReleased += locationInfo.getLength() * keyFactor;
+ }
+ // update usedBytes atomically.
+ omVolumeArgs.getUsedBytes().add(-quotaReleased);
+
// No need to add cache entries to delete table. As delete table will
// be used by DeleteKeyService only, not used for any client response
// validation, so we don't need to add to cache.
@@ -150,7 +161,7 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
omClientResponse = new OMKeyDeleteResponse(omResponse
.setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(),
- omKeyInfo, ozoneManager.isRatisEnabled());
+ omKeyInfo, ozoneManager.isRatisEnabled(), omVolumeArgs);
result = Result.SUCCESS;
} catch (IOException ex) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index 3cefa43..d30eb6b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -31,6 +31,7 @@ import java.util.Map;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.om.PrefixManager;
import org.apache.hadoop.ozone.om.ResolvedBucket;
@@ -41,6 +42,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
@@ -549,4 +551,18 @@ public abstract class OMKeyRequest extends OMClientRequest {
}
return false;
}
+
+ /**
+ * Return volume info for the specified volume.
+ * @param omMetadataManager
+ * @param volume
+ * @return OmVolumeArgs
+ * @throws IOException
+ */
+ protected OmVolumeArgs getVolumeInfo(OMMetadataManager omMetadataManager,
+ String volume) {
+ return omMetadataManager.getVolumeTable().getCacheValue(
+ new CacheKey<>(omMetadataManager.getVolumeKey(volume)))
+ .getCacheValue();
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java
index 68b5a95..cb7edd6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java
@@ -28,6 +28,9 @@ import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.ResolvedBucket;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
@@ -151,21 +154,32 @@ public class OMKeysDeleteRequest extends OMKeyRequest {
}
}
+ long quotaReleased = 0;
+ OmVolumeArgs omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
+
// Mark all keys which can be deleted, in cache as deleted.
for (OmKeyInfo omKeyInfo : omKeyInfoList) {
omMetadataManager.getKeyTable().addCacheEntry(
new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName,
omKeyInfo.getKeyName())),
new CacheValue<>(Optional.absent(), trxnLogIndex));
+
+ int keyFactor = omKeyInfo.getFactor().getNumber();
+ OmKeyLocationInfoGroup keyLocationGroup =
+ omKeyInfo.getLatestVersionLocations();
+ for(OmKeyLocationInfo locationInfo: keyLocationGroup.getLocationList()){
+ quotaReleased += locationInfo.getLength() * keyFactor;
+ }
}
+ // update usedBytes atomically.
+ omVolumeArgs.getUsedBytes().add(-quotaReleased);
omClientResponse = new OMKeysDeleteResponse(omResponse
.setDeleteKeysResponse(DeleteKeysResponse.newBuilder()
.setStatus(deleteStatus).setUnDeletedKeys(unDeletedKeys))
.setStatus(deleteStatus ? OK : PARTIAL_DELETE)
- .setSuccess(deleteStatus).build(),
- omKeyInfoList, trxnLogIndex,
- ozoneManager.isRatisEnabled());
+ .setSuccess(deleteStatus).build(), omKeyInfoList, trxnLogIndex,
+ ozoneManager.isRatisEnabled(), omVolumeArgs);
result = Result.SUCCESS;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
index c0ef8b3..9c52e39 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
@@ -19,9 +19,11 @@
package org.apache.hadoop.ozone.om.request.s3.multipart;
import java.io.IOException;
+import java.util.Iterator;
import java.util.Map;
import com.google.common.base.Optional;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.slf4j.Logger;
@@ -38,16 +40,12 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.s3.multipart
.S3MultipartUploadAbortResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .MultipartUploadAbortRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .MultipartUploadAbortResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadAbortRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadAbortResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
@@ -107,6 +105,7 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
getOmRequest());
OMClientResponse omClientResponse = null;
Result result = null;
+ OmVolumeArgs omVolumeArgs = null;
try {
keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
volumeName = keyArgs.getVolumeName();
@@ -124,6 +123,7 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
OmKeyInfo omKeyInfo =
omMetadataManager.getOpenKeyTable().get(multipartKey);
+ omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
// If there is no entry in openKeyTable, then there is no multipart
// upload initiated for this key.
@@ -137,6 +137,20 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
.get(multipartKey);
multipartKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+ // When abort uploaded key, we need to subtract the PartKey length from
+ // the volume usedBytes.
+ long quotaReleased = 0;
+ int keyFactor = omKeyInfo.getFactor().getNumber();
+ Iterator iter =
+ multipartKeyInfo.getPartKeyInfoMap().entrySet().iterator();
+ while(iter.hasNext()) {
+ Map.Entry entry = (Map.Entry)iter.next();
+ PartKeyInfo iterPartKeyInfo = (PartKeyInfo)entry.getValue();
+ quotaReleased +=
+ iterPartKeyInfo.getPartKeyInfo().getDataSize() * keyFactor;
+ }
+ omVolumeArgs.getUsedBytes().add(-quotaReleased);
+
// Update cache of openKeyTable and multipartInfo table.
// No need to add the cache entries to delete table, as the entries
// in delete table are not used by any read/write operations.
@@ -150,7 +164,8 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
omClientResponse = new S3MultipartUploadAbortResponse(
omResponse.setAbortMultiPartUploadResponse(
MultipartUploadAbortResponse.newBuilder()).build(),
- multipartKey, multipartKeyInfo, ozoneManager.isRatisEnabled());
+ multipartKey, multipartKeyInfo, ozoneManager.isRatisEnabled(),
+ omVolumeArgs);
result = Result.SUCCESS;
} catch (IOException ex) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
index 1e29d5f..d50f32d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
@@ -114,6 +115,7 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
String multipartKey = null;
OmMultipartKeyInfo multipartKeyInfo = null;
Result result = null;
+ OmVolumeArgs omVolumeArgs = null;
try {
keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
volumeName = keyArgs.getVolumeName();
@@ -207,13 +209,24 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
new CacheKey<>(openKey),
new CacheValue<>(Optional.absent(), trxnLogIndex));
+ long scmBlockSize = ozoneManager.getScmBlockSize();
+ int factor = omKeyInfo.getFactor().getNumber();
+ omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
+ // update usedBytes atomically.
+ // Block was pre-requested and UsedBytes updated when createKey and
+ // AllocatedBlock. The space occupied by the Key shall be based on
+ // the actual Key size, and the total Block size applied before should
+ // be subtracted.
+ omVolumeArgs.getUsedBytes().add(omKeyInfo.getDataSize() * factor -
+ keyArgs.getKeyLocationsList().size() * scmBlockSize * factor);
+
omResponse.setCommitMultiPartUploadResponse(
MultipartCommitUploadPartResponse.newBuilder()
.setPartName(partName));
omClientResponse = new S3MultipartUploadCommitPartResponse(
omResponse.build(), multipartKey, openKey,
multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
- ozoneManager.isRatisEnabled());
+ ozoneManager.isRatisEnabled(), omVolumeArgs);
result = Result.SUCCESS;
} catch (IOException ex) {
@@ -222,7 +235,7 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
omClientResponse = new S3MultipartUploadCommitPartResponse(
createErrorOMResponse(omResponse, exception), multipartKey, openKey,
multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
- ozoneManager.isRatisEnabled());
+ ozoneManager.isRatisEnabled(), omVolumeArgs);
} finally {
addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
omDoubleBufferHelper);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java
index e54379b..9d7df23 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.om.response.file;
import javax.annotation.Nonnull;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMResponse;
@@ -33,9 +34,10 @@ import java.util.List;
public class OMFileCreateResponse extends OMKeyCreateResponse {
public OMFileCreateResponse(@Nonnull OMResponse omResponse,
- @Nonnull OmKeyInfo omKeyInfo,
- @Nonnull List<OmKeyInfo> parentKeyInfos, long openKeySessionID) {
- super(omResponse, omKeyInfo, parentKeyInfos, openKeySessionID);
+ @Nonnull OmKeyInfo omKeyInfo, @Nonnull List<OmKeyInfo> parentKeyInfos,
+ long openKeySessionID, OmVolumeArgs omVolumeArgs) {
+ super(omResponse, omKeyInfo, parentKeyInfos, openKeySessionID,
+ omVolumeArgs);
}
/**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
index 5ea44a7..cbaef70 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.response.key;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@@ -39,12 +40,14 @@ public class OMAllocateBlockResponse extends OMClientResponse {
private OmKeyInfo omKeyInfo;
private long clientID;
+ private OmVolumeArgs omVolumeArgs;
public OMAllocateBlockResponse(@Nonnull OMResponse omResponse,
- @Nonnull OmKeyInfo omKeyInfo, long clientID) {
+ @Nonnull OmKeyInfo omKeyInfo, long clientID, OmVolumeArgs omVolumeArgs) {
super(omResponse);
this.omKeyInfo = omKeyInfo;
this.clientID = clientID;
+ this.omVolumeArgs = omVolumeArgs;
}
/**
@@ -64,5 +67,10 @@ public class OMAllocateBlockResponse extends OMClientResponse {
omKeyInfo.getBucketName(), omKeyInfo.getKeyName(), clientID);
omMetadataManager.getOpenKeyTable().putWithBatch(batchOperation, openKey,
omKeyInfo);
+
+ // update volume usedBytes.
+ omMetadataManager.getVolumeTable().putWithBatch(batchOperation,
+ omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()),
+ omVolumeArgs);
}
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
index c0216eb..0d7a6ba 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.response.key;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
@@ -40,13 +41,16 @@ public class OMKeyCommitResponse extends OMClientResponse {
private OmKeyInfo omKeyInfo;
private String ozoneKeyName;
private String openKeyName;
+ private OmVolumeArgs omVolumeArgs;
public OMKeyCommitResponse(@Nonnull OMResponse omResponse,
- @Nonnull OmKeyInfo omKeyInfo, String ozoneKeyName, String openKeyName) {
+ @Nonnull OmKeyInfo omKeyInfo, String ozoneKeyName, String openKeyName,
+ OmVolumeArgs omVolumeArgs) {
super(omResponse);
this.omKeyInfo = omKeyInfo;
this.ozoneKeyName = ozoneKeyName;
this.openKeyName = openKeyName;
+ this.omVolumeArgs = omVolumeArgs;
}
/**
@@ -68,6 +72,11 @@ public class OMKeyCommitResponse extends OMClientResponse {
omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKeyName,
omKeyInfo);
+
+ // update volume usedBytes.
+ omMetadataManager.getVolumeTable().putWithBatch(batchOperation,
+ omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()),
+ omVolumeArgs);
}
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
index 4d0899d..7e48a8f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
@@ -24,6 +24,7 @@ import javax.annotation.Nonnull;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@@ -45,14 +46,16 @@ public class OMKeyCreateResponse extends OMClientResponse {
private OmKeyInfo omKeyInfo;
private long openKeySessionID;
private List<OmKeyInfo> parentKeyInfos;
+ private OmVolumeArgs omVolumeArgs;
public OMKeyCreateResponse(@Nonnull OMResponse omResponse,
- @Nonnull OmKeyInfo omKeyInfo,
- List<OmKeyInfo> parentKeyInfos, long openKeySessionID) {
+ @Nonnull OmKeyInfo omKeyInfo, List<OmKeyInfo> parentKeyInfos,
+ long openKeySessionID, OmVolumeArgs omVolumeArgs) {
super(omResponse);
this.omKeyInfo = omKeyInfo;
this.openKeySessionID = openKeySessionID;
this.parentKeyInfos = parentKeyInfos;
+ this.omVolumeArgs = omVolumeArgs;
}
/**
@@ -91,6 +94,11 @@ public class OMKeyCreateResponse extends OMClientResponse {
omKeyInfo.getBucketName(), omKeyInfo.getKeyName(), openKeySessionID);
omMetadataManager.getOpenKeyTable().putWithBatch(batchOperation,
openKey, omKeyInfo);
+
+ // update volume usedBytes.
+ omMetadataManager.getVolumeTable().putWithBatch(batchOperation,
+ omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()),
+ omVolumeArgs);
}
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
index f0ba991..8c4b7fd 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
@@ -44,12 +45,15 @@ public class OMKeyDeleteResponse extends OMClientResponse {
private OmKeyInfo omKeyInfo;
private boolean isRatisEnabled;
+ private OmVolumeArgs omVolumeArgs;
public OMKeyDeleteResponse(@Nonnull OMResponse omResponse,
- @Nonnull OmKeyInfo omKeyInfo, boolean isRatisEnabled) {
+ @Nonnull OmKeyInfo omKeyInfo, boolean isRatisEnabled,
+ @Nonnull OmVolumeArgs omVolumeArgs) {
super(omResponse);
this.omKeyInfo = omKeyInfo;
this.isRatisEnabled = isRatisEnabled;
+ this.omVolumeArgs = omVolumeArgs;
}
/**
@@ -89,6 +93,11 @@ public class OMKeyDeleteResponse extends OMClientResponse {
isRatisEnabled);
omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
ozoneKey, repeatedOmKeyInfo);
+
+ // update volume usedBytes.
+ omMetadataManager.getVolumeTable().putWithBatch(batchOperation,
+ omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()),
+ omVolumeArgs);
}
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java
index 9d2cd53..c98794a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
@@ -43,14 +44,16 @@ public class OMKeysDeleteResponse extends OMClientResponse {
private List<OmKeyInfo> omKeyInfoList;
private boolean isRatisEnabled;
private long trxnLogIndex;
+ private OmVolumeArgs omVolumeArgs;
public OMKeysDeleteResponse(@Nonnull OMResponse omResponse,
- @Nonnull List<OmKeyInfo> keyDeleteList,
- long trxnLogIndex, boolean isRatisEnabled) {
+ @Nonnull List<OmKeyInfo> keyDeleteList, long trxnLogIndex,
+ boolean isRatisEnabled, OmVolumeArgs omVolumeArgs) {
super(omResponse);
this.omKeyInfoList = keyDeleteList;
this.isRatisEnabled = isRatisEnabled;
this.trxnLogIndex = trxnLogIndex;
+ this.omVolumeArgs = omVolumeArgs;
}
/**
@@ -105,5 +108,10 @@ public class OMKeysDeleteResponse extends OMClientResponse {
omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
deleteKey, repeatedOmKeyInfo);
}
+
+ // update volume usedBytes.
+ omMetadataManager.getVolumeTable().putWithBatch(batchOperation,
+ omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()),
+ omVolumeArgs);
}
}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
index 47cde08..1b2ed8d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
@@ -50,14 +51,16 @@ public class S3MultipartUploadAbortResponse extends OMClientResponse {
private String multipartKey;
private OmMultipartKeyInfo omMultipartKeyInfo;
private boolean isRatisEnabled;
+ private OmVolumeArgs omVolumeArgs;
public S3MultipartUploadAbortResponse(@Nonnull OMResponse omResponse,
- String multipartKey,
- @Nonnull OmMultipartKeyInfo omMultipartKeyInfo, boolean isRatisEnabled) {
+ String multipartKey, @Nonnull OmMultipartKeyInfo omMultipartKeyInfo,
+ boolean isRatisEnabled, @Nonnull OmVolumeArgs omVolumeArgs) {
super(omResponse);
this.multipartKey = multipartKey;
this.omMultipartKeyInfo = omMultipartKeyInfo;
this.isRatisEnabled = isRatisEnabled;
+ this.omVolumeArgs = omVolumeArgs;
}
/**
@@ -96,6 +99,11 @@ public class S3MultipartUploadAbortResponse extends OMClientResponse {
omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
partKeyInfo.getPartName(), repeatedOmKeyInfo);
+
+ // update volume usedBytes.
+ omMetadataManager.getVolumeTable().putWithBatch(batchOperation,
+ omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()),
+ omVolumeArgs);
}
}
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
index f68af4a..0cbab3c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
@@ -56,6 +57,7 @@ public class S3MultipartUploadCommitPartResponse extends OMClientResponse {
private OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo;
private OmKeyInfo openPartKeyInfoToBeDeleted;
private boolean isRatisEnabled;
+ private OmVolumeArgs omVolumeArgs;
/**
* Regular response.
@@ -68,12 +70,13 @@ public class S3MultipartUploadCommitPartResponse extends OMClientResponse {
* @param omMultipartKeyInfo
* @param oldPartKeyInfo
*/
+ @SuppressWarnings("checkstyle:ParameterNumber")
public S3MultipartUploadCommitPartResponse(@Nonnull OMResponse omResponse,
String multipartKey, String openKey,
@Nullable OmMultipartKeyInfo omMultipartKeyInfo,
@Nullable OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo,
@Nullable OmKeyInfo openPartKeyInfoToBeDeleted,
- boolean isRatisEnabled) {
+ boolean isRatisEnabled, OmVolumeArgs omVolumeArgs) {
super(omResponse);
this.multipartKey = multipartKey;
this.openKey = openKey;
@@ -81,6 +84,7 @@ public class S3MultipartUploadCommitPartResponse extends OMClientResponse {
this.oldPartKeyInfo = oldPartKeyInfo;
this.openPartKeyInfoToBeDeleted = openPartKeyInfoToBeDeleted;
this.isRatisEnabled = isRatisEnabled;
+ this.omVolumeArgs = omVolumeArgs;
}
@Override
@@ -143,6 +147,10 @@ public class S3MultipartUploadCommitPartResponse extends OMClientResponse {
// safely delete part key info from open key table.
omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
openKey);
+ // update volume usedBytes.
+ omMetadataManager.getVolumeTable().putWithBatch(batchOperation,
+ omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()),
+ omVolumeArgs);
}
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java
index d066a08..a483455 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.ozone.om.response.key;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Test;
@@ -39,6 +41,9 @@ public class TestOMAllocateBlockResponse extends TestOMKeyResponse {
OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
bucketName, keyName, replicationType, replicationFactor);
+ OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
+ .setOwnerName(keyName).setAdminName(keyName)
+ .setVolume(volumeName).setCreationTime(Time.now()).build();
OMResponse omResponse = OMResponse.newBuilder()
.setAllocateBlockResponse(
@@ -47,7 +52,8 @@ public class TestOMAllocateBlockResponse extends TestOMKeyResponse {
.setCmdType(OzoneManagerProtocolProtos.Type.AllocateBlock)
.build();
OMAllocateBlockResponse omAllocateBlockResponse =
- new OMAllocateBlockResponse(omResponse, omKeyInfo, clientID);
+ new OMAllocateBlockResponse(omResponse, omKeyInfo, clientID,
+ omVolumeArgs);
String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
keyName, clientID);
@@ -66,6 +72,9 @@ public class TestOMAllocateBlockResponse extends TestOMKeyResponse {
public void testAddToDBBatchWithErrorResponse() throws Exception {
OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
bucketName, keyName, replicationType, replicationFactor);
+ OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
+ .setOwnerName(keyName).setAdminName(keyName)
+ .setVolume(volumeName).setCreationTime(Time.now()).build();
OMResponse omResponse = OMResponse.newBuilder()
.setAllocateBlockResponse(
@@ -74,7 +83,8 @@ public class TestOMAllocateBlockResponse extends TestOMKeyResponse {
.setCmdType(OzoneManagerProtocolProtos.Type.AllocateBlock)
.build();
OMAllocateBlockResponse omAllocateBlockResponse =
- new OMAllocateBlockResponse(omResponse, omKeyInfo, clientID);
+ new OMAllocateBlockResponse(omResponse, omKeyInfo, clientID,
+ omVolumeArgs);
// Before calling addToDBBatch
String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
index a35494e..2169665 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.ozone.om.response.key;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Test;
@@ -35,6 +37,9 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
bucketName, keyName, replicationType, replicationFactor);
+ OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
+ .setOwnerName(keyName).setAdminName(keyName)
+ .setVolume(volumeName).setCreationTime(Time.now()).build();
OzoneManagerProtocolProtos.OMResponse omResponse =
OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse(
@@ -55,7 +60,7 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
keyName);
OMKeyCommitResponse omKeyCommitResponse = new OMKeyCommitResponse(
- omResponse, omKeyInfo, ozoneKey, openKey);
+ omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs);
omKeyCommitResponse.addToDBBatch(omMetadataManager, batchOperation);
@@ -73,6 +78,9 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
bucketName, keyName, replicationType, replicationFactor);
+ OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
+ .setOwnerName(keyName).setAdminName(keyName)
+ .setVolume(volumeName).setCreationTime(Time.now()).build();
OzoneManagerProtocolProtos.OMResponse omResponse =
OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse(
@@ -87,7 +95,7 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
keyName);
OMKeyCommitResponse omKeyCommitResponse = new OMKeyCommitResponse(
- omResponse, omKeyInfo, ozoneKey, openKey);
+ omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs);
// As during commit Key, entry will be already there in openKeyTable.
// Adding it here.
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
index 7aeba72..006d65f 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.ozone.om.response.key;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Test;
@@ -41,6 +43,10 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
bucketName, keyName, replicationType, replicationFactor);
+ OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
+ .setOwnerName(keyName).setAdminName(keyName)
+ .setVolume(volumeName).setCreationTime(Time.now()).build();
+
OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse(
CreateKeyResponse.getDefaultInstance())
.setStatus(OzoneManagerProtocolProtos.Status.OK)
@@ -48,7 +54,8 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
.build();
OMKeyCreateResponse omKeyCreateResponse =
- new OMKeyCreateResponse(omResponse, omKeyInfo, null, clientID);
+ new OMKeyCreateResponse(omResponse, omKeyInfo, null, clientID,
+ omVolumeArgs);
String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
keyName, clientID);
@@ -66,6 +73,10 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
bucketName, keyName, replicationType, replicationFactor);
+ OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
+ .setOwnerName(keyName).setAdminName(keyName)
+ .setVolume(volumeName).setCreationTime(Time.now()).build();
+
OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse(
CreateKeyResponse.getDefaultInstance())
.setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND)
@@ -73,7 +84,8 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
.build();
OMKeyCreateResponse omKeyCreateResponse =
- new OMKeyCreateResponse(omResponse, omKeyInfo, null, clientID);
+ new OMKeyCreateResponse(omResponse, omKeyInfo, null, clientID,
+ omVolumeArgs);
// Before calling addToDBBatch
String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
index b2626da..bbf22ce 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
@@ -22,6 +22,8 @@ import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Test;
@@ -42,6 +44,9 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
bucketName, keyName, replicationType, replicationFactor);
+ OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
+ .setOwnerName(keyName).setAdminName(keyName)
+ .setVolume(volumeName).setCreationTime(Time.now()).build();
OzoneManagerProtocolProtos.OMResponse omResponse =
OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse(
@@ -51,7 +56,7 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
.build();
OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse(
- omResponse, omKeyInfo, true);
+ omResponse, omKeyInfo, true, omVolumeArgs);
String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
keyName);
@@ -78,6 +83,9 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
bucketName, keyName, replicationType, replicationFactor);
+ OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
+ .setOwnerName(keyName).setAdminName(keyName)
+ .setVolume(volumeName).setCreationTime(Time.now()).build();
// Add block to key.
List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
@@ -113,7 +121,7 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
.build();
OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse(
- omResponse, omKeyInfo, true);
+ omResponse, omKeyInfo, true, omVolumeArgs);
Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
@@ -133,6 +141,9 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
public void testAddToDBBatchWithErrorResponse() throws Exception {
OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
bucketName, keyName, replicationType, replicationFactor);
+ OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
+ .setOwnerName(keyName).setAdminName(keyName)
+ .setVolume(volumeName).setCreationTime(Time.now()).build();
OzoneManagerProtocolProtos.OMResponse omResponse =
OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse(
@@ -142,7 +153,7 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
.build();
OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse(
- omResponse, omKeyInfo, true);
+ omResponse, omKeyInfo, true, omVolumeArgs);
String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
keyName);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java
index c5dd96b..de8d95d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java
@@ -19,11 +19,13 @@
package org.apache.hadoop.ozone.om.response.key;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Test;
@@ -73,11 +75,16 @@ public class TestOMKeysDeleteResponse extends TestOMKeyResponse {
.setSuccess(true)
.setDeleteKeysResponse(DeleteKeysResponse.newBuilder()
.setStatus(true)).build();
- OMClientResponse omKeysDeleteResponse =
- new OMKeysDeleteResponse(omResponse, omKeyInfoList, 10L, true);
- omKeysDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
+ OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
+ .setOwnerName(keyName).setAdminName(keyName)
+ .setVolume(volumeName).setCreationTime(Time.now()).build();
+
+ OMClientResponse omKeysDeleteResponse = new OMKeysDeleteResponse(
+ omResponse, omKeyInfoList, 10L, true,
+ omVolumeArgs);
+ omKeysDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
omMetadataManager.getStore().commitBatchOperation(batchOperation);
for (String ozKey : ozoneKeys) {
@@ -90,7 +97,6 @@ public class TestOMKeysDeleteResponse extends TestOMKeyResponse {
Assert.assertEquals(1, repeatedOmKeyInfo.getOmKeyInfoList().size());
Assert.assertEquals(10L,
repeatedOmKeyInfo.getOmKeyInfoList().get(0).getUpdateID());
-
}
}
@@ -105,13 +111,16 @@ public class TestOMKeysDeleteResponse extends TestOMKeyResponse {
.setDeleteKeysResponse(DeleteKeysResponse.newBuilder()
.setStatus(false)).build();
+ OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
+ .setOwnerName(keyName).setAdminName(keyName)
+ .setVolume(volumeName).setCreationTime(Time.now()).build();
- OMClientResponse omKeysDeleteResponse =
- new OMKeysDeleteResponse(omResponse, omKeyInfoList, 10L, true);
+ OMClientResponse omKeysDeleteResponse = new OMKeysDeleteResponse(
+ omResponse, omKeyInfoList, 10L, true,
+ omVolumeArgs);
omKeysDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
-
for (String ozKey : ozoneKeys) {
Assert.assertNotNull(omMetadataManager.getKeyTable().get(ozKey));
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
index 973783f..6900bbb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.UUID;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.TemporaryFolder;
@@ -108,7 +109,7 @@ public class TestS3MultipartResponse {
public S3MultipartUploadAbortResponse createS3AbortMPUResponse(
String multipartKey, long timeStamp,
- OmMultipartKeyInfo omMultipartKeyInfo) {
+ OmMultipartKeyInfo omMultipartKeyInfo, OmVolumeArgs omVolumeArgs) {
OMResponse omResponse = OMResponse.newBuilder()
.setCmdType(OzoneManagerProtocolProtos.Type.AbortMultiPartUpload)
.setStatus(OzoneManagerProtocolProtos.Status.OK)
@@ -117,7 +118,7 @@ public class TestS3MultipartResponse {
MultipartUploadAbortResponse.newBuilder().build()).build();
return new S3MultipartUploadAbortResponse(omResponse, multipartKey,
- omMultipartKeyInfo, true);
+ omMultipartKeyInfo, true, omVolumeArgs);
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
index 60aacd5..fd53721 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.response.s3.multipart;
import java.util.UUID;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.junit.Assert;
import org.junit.Test;
@@ -46,7 +47,9 @@ public class TestS3MultipartUploadAbortResponse
String multipartUploadID = UUID.randomUUID().toString();
String multipartKey = omMetadataManager.getMultipartKey(volumeName,
bucketName, keyName, multipartUploadID);
-
+ OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
+ .setOwnerName(keyName).setAdminName(keyName)
+ .setVolume(volumeName).setCreationTime(Time.now()).build();
S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponse =
createS3InitiateMPUResponse(volumeName, bucketName, keyName,
multipartUploadID);
@@ -56,7 +59,8 @@ public class TestS3MultipartUploadAbortResponse
S3MultipartUploadAbortResponse s3MultipartUploadAbortResponse =
createS3AbortMPUResponse(multipartKey, Time.now(),
- s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo());
+ s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo(),
+ omVolumeArgs);
s3MultipartUploadAbortResponse.addToDBBatch(omMetadataManager,
batchOperation);
@@ -81,6 +85,9 @@ public class TestS3MultipartUploadAbortResponse
String multipartUploadID = UUID.randomUUID().toString();
String multipartKey = omMetadataManager.getMultipartKey(volumeName,
bucketName, keyName, multipartUploadID);
+ OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
+ .setOwnerName(keyName).setAdminName(keyName)
+ .setVolume(volumeName).setCreationTime(Time.now()).build();
S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponse =
createS3InitiateMPUResponse(volumeName, bucketName, keyName,
@@ -109,7 +116,8 @@ public class TestS3MultipartUploadAbortResponse
long timeStamp = Time.now();
S3MultipartUploadAbortResponse s3MultipartUploadAbortResponse =
createS3AbortMPUResponse(multipartKey, timeStamp,
- s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo());
+ s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo(),
+ omVolumeArgs);
s3MultipartUploadAbortResponse.addToDBBatch(omMetadataManager,
batchOperation);
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org