You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by we...@apache.org on 2019/06/18 21:43:24 UTC

[hadoop] branch branch-2.8 updated: HDFS-14101. Random failure of testListCorruptFilesCorruptedBlock. Contributed by Zsolt Venczel, Nikhil Navadia.

This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-2.8
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.8 by this push:
     new 87b6a88  HDFS-14101. Random failure of testListCorruptFilesCorruptedBlock. Contributed by Zsolt Venczel, Nikhil Navadia.
87b6a88 is described below

commit 87b6a889b1212c63601d814ed6b486fae8e2fac4
Author: Zsolt Venczel <zv...@cloudera.com>
AuthorDate: Tue Jun 18 14:36:19 2019 -0700

    HDFS-14101. Random failure of testListCorruptFilesCorruptedBlock. Contributed by Zsolt Venczel, Nikhil Navadia.
    
    Signed-off-by: Wei-Chiu Chuang <we...@apache.org>
    Co-authored-by: Nikhil Navadia <ni...@cloudera.com>
    (cherry picked from commit 7c00756aff6bf8d5e1d05748ba32b679e397b53f)
    (cherry picked from commit 4ea3b04bf36af56612c6b47d9ff8aa49cbbbcc9e)
    (cherry picked from commit 76faa41f18f1cb20a6469f53b475f52b662f8107)
    (cherry picked from commit a68de439577715e960ed776c5172fd1fbb0506f7)
    (cherry picked from commit 1a5e40f802a932f45b1fca1524c88cbba40f8cd6)
---
 .../server/namenode/TestListCorruptFileBlocks.java | 30 ++++++++++++----------
 1 file changed, 16 insertions(+), 14 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
index 1f31bdc..b1039e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
@@ -63,7 +63,6 @@ public class TestListCorruptFileBlocks {
   @Test (timeout=300000)
   public void testListCorruptFilesCorruptedBlock() throws Exception {
     MiniDFSCluster cluster = null;
-    Random random = new Random();
     
     try {
       Configuration conf = new HdfsConfiguration();
@@ -74,10 +73,13 @@ public class TestListCorruptFileBlocks {
       cluster = new MiniDFSCluster.Builder(conf).build();
       FileSystem fs = cluster.getFileSystem();
 
+      // Files are corrupted with 2 bytes before the end of the file,
+      // so that's the minimum length.
+      final int corruptionLength = 2;
       // create two files with one block each
       DFSTestUtil util = new DFSTestUtil.Builder().
           setName("testCorruptFilesCorruptedBlock").setNumFiles(2).
-          setMaxLevels(1).setMaxSize(512).build();
+          setMaxLevels(1).setMinSize(corruptionLength).setMaxSize(512).build();
       util.createFiles(fs, "/srcdat10");
 
       // fetch bad file list from namenode. There should be none.
@@ -98,14 +100,13 @@ public class TestListCorruptFileBlocks {
       File metaFile = metaFiles.get(0);
       RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
       FileChannel channel = file.getChannel();
-      long position = channel.size() - 2;
-      int length = 2;
-      byte[] buffer = new byte[length];
-      random.nextBytes(buffer);
+      long position = channel.size() - corruptionLength;
+      byte[] buffer = new byte[corruptionLength];
+      new Random(13L).nextBytes(buffer);
       channel.write(ByteBuffer.wrap(buffer), position);
       file.close();
       LOG.info("Deliberately corrupting file " + metaFile.getName() +
-          " at offset " + position + " length " + length);
+          " at offset " + position + " length " + corruptionLength);
 
       // read all files to trigger detection of corrupted replica
       try {
@@ -134,7 +135,6 @@ public class TestListCorruptFileBlocks {
   @Test (timeout=300000)
   public void testListCorruptFileBlocksInSafeMode() throws Exception {
     MiniDFSCluster cluster = null;
-    Random random = new Random();
 
     try {
       Configuration conf = new HdfsConfiguration();
@@ -155,10 +155,13 @@ public class TestListCorruptFileBlocks {
           HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
       FileSystem fs = cluster.getFileSystem();
 
+      // Files are corrupted with 2 bytes before the end of the file,
+      // so that's the minimum length.
+      final int corruptionLength = 2;
       // create two files with one block each
       DFSTestUtil util = new DFSTestUtil.Builder().
           setName("testListCorruptFileBlocksInSafeMode").setNumFiles(2).
-          setMaxLevels(1).setMaxSize(512).build();
+          setMaxLevels(1).setMinSize(corruptionLength).setMaxSize(512).build();
       util.createFiles(fs, "/srcdat10");
 
       // fetch bad file list from namenode. There should be none.
@@ -178,14 +181,13 @@ public class TestListCorruptFileBlocks {
       File metaFile = metaFiles.get(0);
       RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
       FileChannel channel = file.getChannel();
-      long position = channel.size() - 2;
-      int length = 2;
-      byte[] buffer = new byte[length];
-      random.nextBytes(buffer);
+      long position = channel.size() - corruptionLength;
+      byte[] buffer = new byte[corruptionLength];
+      new Random(13L).nextBytes(buffer);
       channel.write(ByteBuffer.wrap(buffer), position);
       file.close();
       LOG.info("Deliberately corrupting file " + metaFile.getName() +
-          " at offset " + position + " length " + length);
+          " at offset " + position + " length " + corruptionLength);
 
       // read all files to trigger detection of corrupted replica
       try {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org