You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ca...@apache.org on 2022/10/20 13:09:28 UTC

[ozone] branch ozone-1.3 updated (409022b320 -> 95514764de)

This is an automated email from the ASF dual-hosted git repository.

captainzmc pushed a change to branch ozone-1.3
in repository https://gitbox.apache.org/repos/asf/ozone.git


    from 409022b320 Update proto.lock files
     new add7b9f874 HDDS-7352. OM log flooded by AWSV4AuthValidator (#3857)
     new 36490993db HDDS-7341. EC: Close pipelines with unregistered nodes (#3850)
     new 4d563bd1c4 HDDS-7305. Fix Hadoop imports (#3822)
     new 95514764de HDDS-7354. SchemaV3 blockData not deleted in table (#3860)

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../background/BlockDeletingService.java           |  4 +-
 .../container/common/TestBlockDeletingService.java | 64 ++++++++++++++++++++--
 hadoop-hdds/framework/pom.xml                      |  8 +++
 hadoop-hdds/hadoop-dependency-server/pom.xml       |  4 ++
 hadoop-hdds/server-scm/pom.xml                     | 10 ++++
 .../hdds/scm/pipeline/PipelineManagerImpl.java     | 42 ++++++++++++--
 .../hdds/scm/pipeline/TestPipelineManagerImpl.java | 41 +++++++++++---
 hadoop-ozone/dist/src/main/license/jar-report.txt  |  1 -
 .../hadoop/ozone/security/AWSV4AuthValidator.java  |  4 +-
 9 files changed, 156 insertions(+), 22 deletions(-)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 02/04: HDDS-7341. EC: Close pipelines with unregistered nodes (#3850)

Posted by ca...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

captainzmc pushed a commit to branch ozone-1.3
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 36490993db7fd123fe29e63848da6d4185aa7cbf
Author: Stephen O'Donnell <st...@gmail.com>
AuthorDate: Wed Oct 19 09:53:30 2022 +0100

    HDDS-7341. EC: Close pipelines with unregistered nodes (#3850)
---
 .../hdds/scm/pipeline/PipelineManagerImpl.java     | 40 ++++++++++++++++++---
 .../hdds/scm/pipeline/TestPipelineManagerImpl.java | 41 +++++++++++++++++-----
 2 files changed, 68 insertions(+), 13 deletions(-)

diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
index aaa3088f9a..044cbc0166 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
@@ -509,22 +509,52 @@ public class PipelineManagerImpl implements PipelineManager {
       if (p.getPipelineState() == Pipeline.PipelineState.ALLOCATED &&
           (currentTime.toEpochMilli() - p.getCreationTimestamp()
               .toEpochMilli() >= pipelineScrubTimeoutInMills)) {
-        LOG.info("Scrubbing pipeline: id: " + p.getId().toString() +
-            " since it stays at ALLOCATED stage for " +
+        LOG.info("Scrubbing pipeline: id: {} since it stays at ALLOCATED " +
+            "stage for {} mins.", p.getId(),
             Duration.between(currentTime, p.getCreationTimestamp())
-                .toMinutes() + " mins.");
+                .toMinutes());
         closePipeline(p, false);
       }
       // scrub pipelines who stay CLOSED for too long.
       if (p.getPipelineState() == Pipeline.PipelineState.CLOSED) {
-        LOG.info("Scrubbing pipeline: id: " + p.getId().toString() +
-            " since it stays at CLOSED stage.");
+        LOG.info("Scrubbing pipeline: id: {} since it stays at CLOSED stage.",
+            p.getId());
         closeContainersForPipeline(p.getId());
         removePipeline(p);
       }
+      // If a datanode is stopped and then SCM is restarted, a pipeline can get
+      // stuck in an open state. For Ratis, provided some other DNs that were
+      // part of the open pipeline register to SCM after the restart, the Ratis
+      // pipeline close will get triggered by the DNs. For EC that will never
+      // happen, as the DNs are not aware of the pipeline. Therefore we should
+      // close any pipelines in the scrubber if they have nodes which are not
+      // registered
+      if (isOpenWithUnregisteredNodes(p)) {
+        LOG.info("Scrubbing pipeline: id: {} as it has unregistered nodes",
+            p.getId());
+        closeContainersForPipeline(p.getId());
+        closePipeline(p, true);
+      }
     }
   }
 
+  /**
+   * @param pipeline The pipeline to check
+   * @return True if the pipeline is open and contains unregistered nodes. False
+   *         otherwise.
+   */
+  private boolean isOpenWithUnregisteredNodes(Pipeline pipeline) {
+    if (!pipeline.isOpen()) {
+      return false;
+    }
+    for (DatanodeDetails dn : pipeline.getNodes()) {
+      if (nodeManager.getNodeByUuid(dn.getUuidString()) == null) {
+        return true;
+      }
+    }
+    return false;
+  }
+
   /**
    * Schedules a fixed interval job to create pipelines.
    */
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
index 8fa45f5842..1ed9b845ac 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdds.scm.pipeline;
 import com.google.common.base.Supplier;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -29,12 +30,14 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.scm.HddsTestUtils;
 import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.container.MockNodeManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.ha.SCMHADBTransactionBuffer;
 import org.apache.hadoop.hdds.scm.ha.SCMHADBTransactionBufferStub;
@@ -125,6 +128,10 @@ public class TestPipelineManagerImpl {
         GenericTestUtils.getRandomizedTempPath());
     scm = HddsTestUtils.getScm(conf);
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
+    // Mock Node Manager is not able to correctly set up things for the EC
+    // placement policy (Rack Scatter), so just use the random one.
+    conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_EC_IMPL_KEY,
+        SCMContainerPlacementRandom.class.getName());
     dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition());
     nodeManager = new MockNodeManager(true, 20);
     maxPipelineCount = nodeManager.getNodeCount(
@@ -151,7 +158,7 @@ public class TestPipelineManagerImpl {
       throws IOException {
     return PipelineManagerImpl.newPipelineManager(conf,
         SCMHAManagerStub.getInstance(isLeader),
-        new MockNodeManager(true, 20),
+        nodeManager,
         SCMDBDefinition.PIPELINES.getTable(dbStore),
         new EventQueue(),
         scmContext,
@@ -163,7 +170,7 @@ public class TestPipelineManagerImpl {
       boolean isLeader, SCMHADBTransactionBuffer buffer) throws IOException {
     return PipelineManagerImpl.newPipelineManager(conf,
         SCMHAManagerStub.getInstance(isLeader, buffer),
-        new MockNodeManager(true, 20),
+        nodeManager,
         SCMDBDefinition.PIPELINES.getTable(dbStore),
         new EventQueue(),
         SCMContext.emptyContext(),
@@ -341,7 +348,6 @@ public class TestPipelineManagerImpl {
   @Test
   public void testRemovePipeline() throws Exception {
     PipelineManagerImpl pipelineManager = createPipelineManager(true);
-    pipelineManager.setScmContext(scmContext);
     // Create a pipeline
     Pipeline pipeline = pipelineManager.createPipeline(
         RatisReplicationConfig.getInstance(ReplicationFactor.THREE));
@@ -391,7 +397,6 @@ public class TestPipelineManagerImpl {
   @Test
   public void testClosePipelineShouldFailOnFollower() throws Exception {
     PipelineManagerImpl pipelineManager = createPipelineManager(true);
-    pipelineManager.setScmContext(scmContext);
     Pipeline pipeline = pipelineManager.createPipeline(
         RatisReplicationConfig.getInstance(ReplicationFactor.THREE));
     Assertions.assertEquals(1, pipelineManager.getPipelines().size());
@@ -413,7 +418,6 @@ public class TestPipelineManagerImpl {
   @Test
   public void testPipelineReport() throws Exception {
     PipelineManagerImpl pipelineManager = createPipelineManager(true);
-    pipelineManager.setScmContext(scmContext);
     SCMSafeModeManager scmSafeModeManager =
         new SCMSafeModeManager(conf, new ArrayList<>(), null, pipelineManager,
             new EventQueue(), serviceManager, scmContext);
@@ -571,7 +575,6 @@ public class TestPipelineManagerImpl {
         OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, 50, TimeUnit.SECONDS);
 
     PipelineManagerImpl pipelineManager = createPipelineManager(true);
-    pipelineManager.setScmContext(scmContext);
     Pipeline allocatedPipeline = pipelineManager
         .createPipeline(RatisReplicationConfig
             .getInstance(ReplicationFactor.THREE));
@@ -628,13 +631,36 @@ public class TestPipelineManagerImpl {
     pipelineManager.close();
   }
 
+  @Test
+  public void testScrubOpenWithUnregisteredNodes() throws Exception {
+    PipelineManagerImpl pipelineManager = createPipelineManager(true);
+    Pipeline pipeline = pipelineManager
+        .createPipeline(new ECReplicationConfig(3, 2));
+    pipelineManager.openPipeline(pipeline.getId());
+
+    // Scrubbing the pipelines should not affect this pipeline
+    pipelineManager.scrubPipelines();
+    pipeline = pipelineManager.getPipeline(pipeline.getId());
+    Assertions.assertEquals(Pipeline.PipelineState.OPEN,
+        pipeline.getPipelineState());
+
+    // Now, "unregister" one of the nodes in the pipeline
+    DatanodeDetails firstDN = nodeManager.getNodeByUuid(
+        pipeline.getNodes().get(0).getUuidString());
+    nodeManager.getClusterNetworkTopologyMap().remove(firstDN);
+
+    pipelineManager.scrubPipelines();
+    pipeline = pipelineManager.getPipeline(pipeline.getId());
+    Assertions.assertEquals(Pipeline.PipelineState.CLOSED,
+        pipeline.getPipelineState());
+  }
+
   @Test
   public void testScrubPipelinesShouldFailOnFollower() throws Exception {
     conf.setTimeDuration(
         OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, 10, TimeUnit.SECONDS);
 
     PipelineManagerImpl pipelineManager = createPipelineManager(true);
-    pipelineManager.setScmContext(scmContext);
     Pipeline pipeline = pipelineManager
         .createPipeline(RatisReplicationConfig
             .getInstance(ReplicationFactor.THREE));
@@ -765,7 +791,6 @@ public class TestPipelineManagerImpl {
     GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
             .captureLogs(LoggerFactory.getLogger(PipelineManagerImpl.class));
     PipelineManagerImpl pipelineManager = createPipelineManager(true);
-    pipelineManager.setScmContext(scmContext);
     Pipeline pipeline = pipelineManager.createPipeline(
             RatisReplicationConfig
                 .getInstance(HddsProtos.ReplicationFactor.THREE));


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 01/04: HDDS-7352. OM log flooded by AWSV4AuthValidator (#3857)

Posted by ca...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

captainzmc pushed a commit to branch ozone-1.3
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit add7b9f874c59bdc105a077044778e36aa37eb90
Author: Christos Bisias <ch...@gmail.com>
AuthorDate: Wed Oct 19 11:28:32 2022 +0300

    HDDS-7352. OM log flooded by AWSV4AuthValidator (#3857)
---
 .../java/org/apache/hadoop/ozone/security/AWSV4AuthValidator.java     | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/AWSV4AuthValidator.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/AWSV4AuthValidator.java
index 1b526e7873..3374039af3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/AWSV4AuthValidator.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/AWSV4AuthValidator.java
@@ -84,7 +84,9 @@ final class AWSV4AuthValidator {
     byte[] kRegion = sign(kDate, regionName);
     byte[] kService = sign(kRegion, serviceName);
     byte[] kSigning = sign(kService, "aws4_request");
-    LOG.info(Hex.encode(kSigning));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(Hex.encode(kSigning));
+    }
     return kSigning;
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 03/04: HDDS-7305. Fix Hadoop imports (#3822)

Posted by ca...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

captainzmc pushed a commit to branch ozone-1.3
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 4d563bd1c499d714b18f3e93e2883bd5abed5d15
Author: Doroszlai, Attila <64...@users.noreply.github.com>
AuthorDate: Wed Oct 19 13:28:30 2022 +0200

    HDDS-7305. Fix Hadoop imports (#3822)
---
 hadoop-hdds/framework/pom.xml                                  |  8 ++++++++
 hadoop-hdds/hadoop-dependency-server/pom.xml                   |  4 ++++
 hadoop-hdds/server-scm/pom.xml                                 | 10 ++++++++++
 .../apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java   |  2 +-
 hadoop-ozone/dist/src/main/license/jar-report.txt              |  1 -
 5 files changed, 23 insertions(+), 2 deletions(-)

diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml
index c602e4c1ac..29e92f77f8 100644
--- a/hadoop-hdds/framework/pom.xml
+++ b/hadoop-hdds/framework/pom.xml
@@ -59,6 +59,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <groupId>org.slf4j</groupId>
       <artifactId>slf4j-reload4j</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-compress</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-configuration2</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.apache.logging.log4j</groupId>
       <artifactId>log4j-core</artifactId>
diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml
index a69902015c..38b7616dd7 100644
--- a/hadoop-hdds/hadoop-dependency-server/pom.xml
+++ b/hadoop-hdds/hadoop-dependency-server/pom.xml
@@ -51,6 +51,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
           <groupId>org.apache.zookeeper</groupId>
           <artifactId>zookeeper</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>org.apache.commons</groupId>
+          <artifactId>*</artifactId>
+        </exclusion>
         <exclusion>
           <groupId>org.codehaus.jackson</groupId>
           <artifactId>jackson-mapper-asl</artifactId>
diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml
index 413439f621..4a9d031773 100644
--- a/hadoop-hdds/server-scm/pom.xml
+++ b/hadoop-hdds/server-scm/pom.xml
@@ -73,6 +73,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <scope>provided</scope>
     </dependency>
 
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-math3</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-text</artifactId>
+    </dependency>
+
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdfs-client</artifactId>
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
index 044cbc0166..d943dc21e2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdds.scm.pipeline;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Lists;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -44,7 +45,6 @@ import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.ozone.ClientVersion;
 import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
-import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
 import org.apache.ratis.protocol.exceptions.NotLeaderException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt
index 664d4bf39f..6147466757 100644
--- a/hadoop-ozone/dist/src/main/license/jar-report.txt
+++ b/hadoop-ozone/dist/src/main/license/jar-report.txt
@@ -28,7 +28,6 @@ share/ozone/lib/commons-io.jar
 share/ozone/lib/commons-lang3.jar
 share/ozone/lib/commons-lang.jar
 share/ozone/lib/commons-logging.jar
-share/ozone/lib/commons-math3.jar
 share/ozone/lib/commons-net.jar
 share/ozone/lib/commons-pool2.jar
 share/ozone/lib/commons-text.jar


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 04/04: HDDS-7354. SchemaV3 blockData not deleted in table (#3860)

Posted by ca...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

captainzmc pushed a commit to branch ozone-1.3
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 95514764de171d1bb66acfcf0c9e82d5957d4f23
Author: Nibiru <ax...@qq.com>
AuthorDate: Thu Oct 20 09:40:13 2022 +0800

    HDDS-7354. SchemaV3 blockData not deleted in table (#3860)
---
 .../background/BlockDeletingService.java           |  4 +-
 .../container/common/TestBlockDeletingService.java | 64 ++++++++++++++++++++--
 2 files changed, 62 insertions(+), 6 deletions(-)

diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
index 68c2bcc6fe..ee57666a0a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
@@ -501,8 +501,8 @@ public class BlockDeletingService extends BackgroundService {
           for (DeletedBlocksTransaction delTx : delBlocks) {
             deleter.apply(deleteTxns, batch, delTx.getTxID());
             for (Long blk : delTx.getLocalIDList()) {
-              String bID = blk.toString();
-              meta.getStore().getBlockDataTable().deleteWithBatch(batch, bID);
+              blockDataTable.deleteWithBatch(batch,
+                  containerData.blockKey(blk));
             }
           }
 
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index fe39959688..357cd7c0c9 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.utils.BackgroundService;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
@@ -42,6 +43,7 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy;
+import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.DBHandle;
@@ -372,8 +374,8 @@ public class TestBlockDeletingService {
       DatanodeStoreSchemaTwoImpl dnStoreTwoImpl =
           (DatanodeStoreSchemaTwoImpl) ds;
       try (
-          TableIterator<Long, ? extends Table.KeyValue<Long, 
-              StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction>> 
+          TableIterator<Long, ? extends Table.KeyValue<Long,
+              StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction>>
               iter = dnStoreTwoImpl.getDeleteTransactionTable().iterator()) {
         while (iter.hasNext()) {
           StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction
@@ -430,16 +432,18 @@ public class TestBlockDeletingService {
     containerSet.listContainer(0L, 1, containerData);
     Assert.assertEquals(1, containerData.size());
     KeyValueContainerData data = (KeyValueContainerData) containerData.get(0);
+    KeyPrefixFilter filter = Objects.equals(schemaVersion, SCHEMA_V1) ?
+        data.getDeletingBlockKeyFilter() : data.getUnprefixedKeyFilter();
 
     try (DBHandle meta = BlockUtils.getDB(data, conf)) {
       Map<Long, Container<?>> containerMap = containerSet.getContainerMapCopy();
+      assertBlockDataTableRecordCount(3, meta, filter, data.getContainerID());
       // NOTE: this test assumes that all the container is KetValueContainer and
       // have DeleteTransactionId in KetValueContainerData. If other
       // types is going to be added, this test should be checked.
       long transactionId = ((KeyValueContainerData) containerMap
           .get(containerData.get(0).getContainerID()).getContainerData())
           .getDeleteTransactionId();
-
       long containerSpace = containerData.get(0).getBytesUsed();
       // Number of deleted blocks in container should be equal to 0 before
       // block delete
@@ -488,6 +492,9 @@ public class TestBlockDeletingService {
       Assert.assertEquals(3,
           deletingServiceMetrics.getSuccessCount()
               - deleteSuccessCount);
+
+      // check if blockData get deleted
+      assertBlockDataTableRecordCount(0, meta, filter, data.getContainerID());
     }
 
     svc.shutdown();
@@ -501,7 +508,6 @@ public class TestBlockDeletingService {
     if (Objects.equals(schemaVersion, SCHEMA_V1)) {
       return;
     }
-
     DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class);
     dnConf.setBlockDeletionLimit(2);
     this.blockLimitPerInterval = dnConf.getBlockDeletionLimit();
@@ -526,6 +532,8 @@ public class TestBlockDeletingService {
     Assert.assertEquals(2, containerData.size());
     KeyValueContainerData ctr1 = (KeyValueContainerData) containerData.get(0);
     KeyValueContainerData ctr2 = (KeyValueContainerData) containerData.get(1);
+    KeyPrefixFilter filter = Objects.equals(schemaVersion, SCHEMA_V1) ?
+        ctr1.getDeletingBlockKeyFilter() : ctr1.getUnprefixedKeyFilter();
 
     try (DBHandle meta = BlockUtils.getDB(ctr1, conf)) {
       // create unrecorded blocks in a new txn and update metadata,
@@ -545,6 +553,8 @@ public class TestBlockDeletingService {
       Assert.assertEquals(7, getUnderDeletionBlocksCount(meta, ctr1));
     }
 
+    assertBlockDataTableRecordCount(3, ctr1, filter);
+    assertBlockDataTableRecordCount(3, ctr2, filter);
     Assert.assertEquals(3, ctr2.getNumPendingDeletionBlocks());
 
     // Totally 2 container * 3 blocks + 4 unrecorded block = 10 blocks
@@ -564,6 +574,9 @@ public class TestBlockDeletingService {
     Assert.assertEquals(0, ctr2.getBlockCount());
     Assert.assertEquals(0, ctr2.getBytesUsed());
 
+    // check if blockData get deleted
+    assertBlockDataTableRecordCount(0, ctr1, filter);
+    assertBlockDataTableRecordCount(0, ctr2, filter);
     svc.shutdown();
   }
 
@@ -832,4 +845,47 @@ public class TestBlockDeletingService {
       service.shutdown();
     }
   }
+
+  /**
+   *  Check blockData record count of certain container (DBHandle not provided).
+   *
+   * @param expectedCount expected records in the table
+   * @param containerData KeyValueContainerData
+   * @param filter KeyPrefixFilter
+   * @throws IOException
+   */
+  private void assertBlockDataTableRecordCount(int expectedCount,
+      KeyValueContainerData containerData, KeyPrefixFilter filter)
+      throws IOException {
+    try (DBHandle handle = BlockUtils.getDB(containerData, conf)) {
+      long containerID = containerData.getContainerID();
+      assertBlockDataTableRecordCount(expectedCount, handle, filter,
+          containerID);
+    }
+  }
+
+  /**
+   *  Check blockData record count of certain container (DBHandle provided).
+   *
+   * @param expectedCount expected records in the table
+   * @param handle DB handle
+   * @param filter KeyPrefixFilter
+   * @param containerID the container ID to filter results
+   * @throws IOException
+   */
+  private void assertBlockDataTableRecordCount(int expectedCount,
+      DBHandle handle, KeyPrefixFilter filter, long containerID)
+      throws IOException {
+    long count = 0L;
+    BlockIterator<BlockData> iterator = handle.getStore().
+        getBlockIterator(containerID, filter);
+    iterator.seekToFirst();
+    while (iterator.hasNext()) {
+      iterator.nextBlock();
+      count += 1;
+    }
+    Assert.assertEquals("Excepted: " + expectedCount
+        + ", but actual: " + count + " in the blockData table of container: "
+        + containerID + ".", expectedCount, count);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org