You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ww...@apache.org on 2017/07/18 04:12:58 UTC

hadoop git commit: HDFS-11996. Ozone : add an UT to test partial read of chunks. Contributed by Chen Liang.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 e0687dde4 -> 0981119eb


HDFS-11996. Ozone : add an UT to test partial read of chunks. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0981119e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0981119e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0981119e

Branch: refs/heads/HDFS-7240
Commit: 0981119eb3d9337bb7b72a94dd7903fd0d1f1972
Parents: e0687dd
Author: Weiwei Yang <ww...@apache.org>
Authored: Tue Jul 18 12:12:20 2017 +0800
Committer: Weiwei Yang <ww...@apache.org>
Committed: Tue Jul 18 12:12:20 2017 +0800

----------------------------------------------------------------------
 .../common/impl/TestContainerPersistence.java   | 38 ++++++++++++++++++++
 1 file changed, 38 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0981119e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index 7a23b87..b33ee86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -57,6 +57,7 @@ import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -72,6 +73,8 @@ import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk;
 import static org.apache.hadoop.ozone.container.ContainerTestHelper.getData;
 import static org.apache.hadoop.ozone.container.ContainerTestHelper
     .setDataChecksum;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 /**
@@ -455,6 +458,41 @@ public class TestContainerPersistence {
   }
 
   /**
+   * Test partial within a single chunk.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testPartialRead() throws Exception {
+    final int datalen = 1024;
+    final int start = datalen/4;
+    final int length = datalen/2;
+
+    String containerName = OzoneUtils.getRequestID();
+    String keyName = OzoneUtils.getRequestID();
+    Pipeline pipeline = createSingleNodePipeline(containerName);
+
+    pipeline.setContainerName(containerName);
+    ContainerData cData = new ContainerData(containerName);
+    cData.addMetadata("VOLUME", "shire");
+    cData.addMetadata("owner)", "bilbo");
+    containerManager.createContainer(pipeline, cData);
+    ChunkInfo info = getChunk(keyName, 0, 0, datalen);
+    byte[] data = getData(datalen);
+    setDataChecksum(info, data);
+    chunkManager.writeChunk(pipeline, keyName, info, data);
+
+    byte[] readData = chunkManager.readChunk(pipeline, keyName, info);
+    assertTrue(Arrays.equals(data, readData));
+
+    ChunkInfo info2 = getChunk(keyName, 0, start, length);
+    byte[] readData2 = chunkManager.readChunk(pipeline, keyName, info2);
+    assertEquals(length, readData2.length);
+    assertTrue(Arrays.equals(
+        Arrays.copyOfRange(data, start, start + length), readData2));
+  }
+
+  /**
    * Writes a single chunk and tries to overwrite that chunk without over write
    * flag then re-tries with overwrite flag.
    *


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org