You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by um...@apache.org on 2022/06/16 01:34:31 UTC

[ozone] branch master updated: HDDS-6577. Configurations to reserve HDDS volume space. (#3484)

This is an automated email from the ASF dual-hosted git repository.

umamahesh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new e4ed9d436c HDDS-6577. Configurations to reserve HDDS volume space. (#3484)
e4ed9d436c is described below

commit e4ed9d436c210206d261d36c5d72649a4766591a
Author: Aswin Shakil Balasubramanian <as...@gmail.com>
AuthorDate: Wed Jun 15 18:34:25 2022 -0700

    HDDS-6577. Configurations to reserve HDDS volume space. (#3484)
---
 .../org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java  |  8 ++++++++
 .../hadoop/ozone/container/common/impl/ContainerData.java   |  7 ++++++-
 .../hadoop/ozone/container/common/volume/StorageVolume.java | 12 ++++++++++++
 .../hadoop/ozone/container/common/volume/VolumeInfo.java    |  8 ++++++++
 .../hadoop/ozone/container/common/volume/VolumeUsage.java   |  8 ++++++++
 .../statemachine/background/BlockDeletingService.java       |  4 +++-
 .../ozone/container/metadata/AbstractDatanodeStore.java     |  8 ++++++++
 .../ozone/container/common/TestKeyValueContainerData.java   |  4 ++++
 .../java/org/apache/hadoop/hdds/utils/db/DBProfile.java     |  2 +-
 .../commandhandler/TestRefreshVolumeUsageHandler.java       | 13 ++++++-------
 10 files changed, 64 insertions(+), 10 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java
index 319fefdf4e..7808dccaf5 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java
@@ -89,6 +89,14 @@ public class CachingSpaceUsageSource implements SpaceUsageSource {
     return cachedValue.get();
   }
 
+  public void incrementUsedSpace(long usedSpace) {
+    cachedValue.addAndGet(usedSpace);
+  }
+
+  public void decrementUsedSpace(long reclaimedSpace) {
+    cachedValue.addAndGet(-1 * reclaimedSpace);
+  }
+
   public void start() {
     if (executor != null) {
       long initialDelay = cachedValue.get() > 0 ? refresh.toMillis() : 0;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index bc6147e500..dbbd457447 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -425,7 +425,12 @@ public abstract class ContainerData {
     long unused = getMaxSize() - getBytesUsed();
 
     this.writeBytes.addAndGet(bytes);
-
+    /*
+       Increase the cached Used Space in VolumeInfo as it
+       maybe not updated, DU or DedicatedDiskSpaceUsage runs
+       periodically to update the Used Space in VolumeInfo.
+     */
+    this.getVolume().incrementUsedSpace(bytes);
     // only if container size < max size
     if (committedSpace && unused > 0) {
       //with this write, container size might breach max size
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
index 18892b68a2..561708b852 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
@@ -396,6 +396,18 @@ public abstract class StorageVolume
     return this.volumeInfo;
   }
 
+  public void incrementUsedSpace(long usedSpace) {
+    if (this.volumeInfo != null) {
+      this.volumeInfo.incrementUsedSpace(usedSpace);
+    }
+  }
+
+  public void decrementUsedSpace(long reclaimedSpace) {
+    if (this.volumeInfo != null) {
+      this.volumeInfo.decrementUsedSpace(reclaimedSpace);
+    }
+  }
+
   public VolumeSet getVolumeSet() {
     return this.volumeSet;
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
index 621f5f9c16..71063e23fc 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
@@ -206,6 +206,14 @@ public final class VolumeInfo {
     return Math.max(Math.min(avail, usage.getAvailable()), 0);
   }
 
+  public void incrementUsedSpace(long usedSpace) {
+    usage.incrementUsedSpace(usedSpace);
+  }
+
+  public void decrementUsedSpace(long reclaimedSpace) {
+    usage.decrementUsedSpace(reclaimedSpace);
+  }
+
   public void refreshNow() {
     usage.refreshNow();
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
index 1fcac8327f..fe12d4be9c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
@@ -61,6 +61,14 @@ public class VolumeUsage implements SpaceUsageSource {
     return source.getUsedSpace();
   }
 
+  public void incrementUsedSpace(long usedSpace) {
+    source.incrementUsedSpace(usedSpace);
+  }
+
+  public void decrementUsedSpace(long reclaimedSpace) {
+    source.decrementUsedSpace(reclaimedSpace);
+  }
+
   /**
    * Get the space used by others except hdds.
    * DU is refreshed periodically and could be not exact,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
index 85d6591e30..084493daeb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
@@ -379,6 +379,7 @@ public class BlockDeletingService extends BackgroundService {
           containerData.decrPendingDeletionBlocks(deletedBlocksCount);
           containerData.decrBlockCount(deletedBlocksCount);
           containerData.decrBytesUsed(releasedBytes);
+          containerData.getVolume().decrementUsedSpace(releasedBytes);
         }
 
         if (!succeedBlocks.isEmpty()) {
@@ -489,10 +490,11 @@ public class BlockDeletingService extends BackgroundService {
               deletedBlocksCount, releasedBytes);
 
           // update count of pending deletion blocks, block count and used
-          // bytes in in-memory container status.
+          // bytes in in-memory container status and used space in volume.
           containerData.decrPendingDeletionBlocks(deletedBlocksCount);
           containerData.decrBlockCount(deletedBlocksCount);
           containerData.decrBytesUsed(releasedBytes);
+          containerData.getVolume().decrementUsedSpace(releasedBytes);
         }
 
         LOG.debug("Container: {}, deleted blocks: {}, space reclaimed: {}, " +
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
index c43b37cd89..d23e1bd981 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
@@ -18,12 +18,14 @@
 package org.apache.hadoop.ozone.container.metadata;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.StringUtils;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
 import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
 import org.apache.hadoop.hdds.utils.db.BatchOperationHandler;
+import org.apache.hadoop.hdds.utils.db.DBProfile;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import org.apache.hadoop.hdds.utils.db.Table;
@@ -103,6 +105,12 @@ public abstract class AbstractDatanodeStore implements DatanodeStore {
       options.setCreateIfMissing(true);
       options.setCreateMissingColumnFamilies(true);
 
+      if (this.dbDef instanceof DatanodeSchemaOneDBDefinition ||
+          this.dbDef instanceof DatanodeSchemaTwoDBDefinition) {
+        long maxWalSize = DBProfile.toLong(StorageUnit.MB.toBytes(2));
+        options.setMaxTotalWalSize(maxWalSize);
+      }
+
       String rocksDbStat = config.getTrimmed(
               OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
               OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
index 394231260c..86701bb068 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
@@ -22,12 +22,14 @@ import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.keyvalue.ContainerTestVersionInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
+import org.mockito.Mockito;
 
 import static org.junit.Assert.assertEquals;
 
@@ -70,10 +72,12 @@ public class TestKeyValueContainerData {
     AtomicLong val = new AtomicLong(0);
     UUID pipelineId = UUID.randomUUID();
     UUID datanodeId = UUID.randomUUID();
+    HddsVolume vol = Mockito.mock(HddsVolume.class);
 
     KeyValueContainerData kvData = new KeyValueContainerData(containerId,
         layout,
         MAXSIZE, pipelineId.toString(), datanodeId.toString());
+    kvData.setVolume(vol);
 
     assertEquals(containerType, kvData.getContainerType());
     assertEquals(containerId, kvData.getContainerID());
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java
index 2fa5d22187..55c9397e47 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java
@@ -114,7 +114,7 @@ public enum DBProfile {
     }
   };
 
-  private static long toLong(double value) {
+  public static long toLong(double value) {
     BigDecimal temp = BigDecimal.valueOf(value);
     return temp.longValue();
   }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java
index f8a90f41cd..7bf88c01f1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java
@@ -104,7 +104,8 @@ public class TestRefreshVolumeUsageHandler {
     key.close();
 
     //a new key is created , but the datanode default REFRESH_PERIOD is 1 hour,
-    //so scm does not get the latest usage info of this datanode for now.
+    //still the cache is updated, so the scm will eventually get the new
+    //used space from the datanode through node report.
     Assert.assertTrue(cluster.getStorageContainerManager()
             .getScmNodeManager().getUsageInfo(datanodeDetails)
             .getScmNodeStat().getScmUsed().isEqual(currentScmUsed));
@@ -113,18 +114,16 @@ public class TestRefreshVolumeUsageHandler {
       GenericTestUtils.waitFor(() -> isUsageInfoRefreshed(cluster,
           datanodeDetails, currentScmUsed), 500, 5 * 1000);
     } catch (TimeoutException te) {
-      //no op , this is to show that if we do not trigger refresh volume
-      //usage info command, we can not get the latest usage info within
-      // a refresh period
+      //no op
     } catch (InterruptedException ie) {
       //no op
     }
 
-    //after waiting for several node report , this usage info in scm
-    //is still not updated
+    //after waiting for several node report , this usage info
+    //in SCM should be updated as we have updated the DN's cached usage info.
     Assert.assertTrue(cluster.getStorageContainerManager()
         .getScmNodeManager().getUsageInfo(datanodeDetails)
-        .getScmNodeStat().getScmUsed().isEqual(currentScmUsed));
+        .getScmNodeStat().getScmUsed().isGreater(currentScmUsed));
 
     //send refresh volume usage command to datanode
     cluster.getStorageContainerManager()


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org