You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-issues@hadoop.apache.org by GitBox <gi...@apache.org> on 2019/08/19 21:44:30 UTC

[GitHub] [hadoop] hgadre commented on a change in pull request #1154: [HDDS-1200] Add support for checksum verification in data scrubber

hgadre commented on a change in pull request #1154: [HDDS-1200] Add support for checksum verification in data scrubber
URL: https://github.com/apache/hadoop/pull/1154#discussion_r315425380
 
 

 ##########
 File path: hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
 ##########
 @@ -120,10 +133,70 @@ public TestKeyValueContainerCheck(String metadataImpl) {
     container.close();
 
     // next run checks on a Closed Container
-    valid = kvCheck.fullCheck();
+    valid = kvCheck.fullCheck(new DataTransferThrottler(
+        c.getBandwidthPerVolume()), null);
     assertTrue(valid);
   }
 
+  /**
+   * Sanity test, when there are corruptions induced.
+   * @throws Exception
+   */
+  @Test
+  public void testKeyValueContainerCheckCorruption() throws Exception {
+    long containerID = 102;
+    int deletedBlocks = 1;
+    int normalBlocks = 3;
+    int chunksPerBlock = 4;
+    boolean valid = false;
+    ContainerScrubberConfiguration sc = conf.getObject(
+        ContainerScrubberConfiguration.class);
+
+    // test Closed Container
+    createContainerWithBlocks(containerID, normalBlocks, deletedBlocks, 65536,
+        chunksPerBlock);
+    File chunksPath = new File(containerData.getChunksPath());
+    assertTrue(chunksPath.listFiles().length
+        == (deletedBlocks + normalBlocks) * chunksPerBlock);
+
+    container.close();
+
+    KeyValueContainerCheck kvCheck =
+        new KeyValueContainerCheck(containerData.getMetadataPath(), conf,
+            containerID);
+
+    File metaDir = new File(containerData.getMetadataPath());
+    File dbFile = KeyValueContainerLocationUtil
+        .getContainerDBFile(metaDir, containerID);
+    containerData.setDbFile(dbFile);
+    try(ReferenceCountedDB db =
+            BlockUtils.getDB(containerData, conf);
+        KeyValueBlockIterator kvIter = new KeyValueBlockIterator(containerID,
+            new File(containerData.getContainerPath()))) {
+      BlockData block = kvIter.nextBlock();
+      assertTrue(!block.getChunks().isEmpty());
+      ContainerProtos.ChunkInfo c = block.getChunks().get(0);
+      File chunkFile = ChunkUtils.getChunkFile(containerData,
+          ChunkInfo.getFromProtoBuf(c));
+      long length = chunkFile.length();
+      assertTrue(length > 0);
+      // forcefully truncate the file to induce failure.
+      try (RandomAccessFile file = new RandomAccessFile(chunkFile, "rws")) {
+        file.setLength(length / 2);
+      }
+      assertEquals(length/2, chunkFile.length());
+    }
+
+    // metadata check should pass.
+    valid = kvCheck.fastCheck();
+    assertTrue(valid);
+
+    // checksum validation should fail.
+    valid = kvCheck.fullCheck(new DataTransferThrottler(
+            sc.getBandwidthPerVolume()), null);
+    assertFalse(valid);
+  }
+
   /**
    * Creates a container with normal and deleted blocks.
    * First it will insert normal blocks, and then it will insert
 
 Review comment:
   Not sure I am following you. Can you elaborate which part do you find misleading? This function was present before this patch ...

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-issues-help@hadoop.apache.org