You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cm...@apache.org on 2014/08/01 22:41:06 UTC

svn commit: r1615223 [2/2] - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/server/datanode/ src/main/java/org/apache...

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java Fri Aug  1 20:41:05 2014
@@ -25,6 +25,7 @@ import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
 import java.util.Collection;
+import java.util.List;
 import java.util.Random;
 
 import org.apache.commons.logging.Log;
@@ -39,7 +40,11 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.TestFileCorruption;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Test;
 
@@ -87,36 +92,29 @@ public class TestListCorruptFileBlocks {
       File storageDir = cluster.getInstanceStorageDir(0, 1);
       File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
       assertTrue("data directory does not exist", data_dir.exists());
-      File[] blocks = data_dir.listFiles();
-      assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
-      for (int idx = 0; idx < blocks.length; idx++) {
-        if (blocks[idx].getName().startsWith("blk_") &&
-            blocks[idx].getName().endsWith(".meta")) {
-          //
-          // shorten .meta file
-          //
-          RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
-          FileChannel channel = file.getChannel();
-          long position = channel.size() - 2;
-          int length = 2;
-          byte[] buffer = new byte[length];
-          random.nextBytes(buffer);
-          channel.write(ByteBuffer.wrap(buffer), position);
-          file.close();
-          LOG.info("Deliberately corrupting file " + blocks[idx].getName() +
-              " at offset " + position + " length " + length);
-
-          // read all files to trigger detection of corrupted replica
-          try {
-            util.checkFiles(fs, "/srcdat10");
-          } catch (BlockMissingException e) {
-            System.out.println("Received BlockMissingException as expected.");
-          } catch (IOException e) {
-            assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " +
-                " but received IOException " + e, false);
-          }
-          break;
-        }
+      List<File> metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
+      assertTrue("Data directory does not contain any blocks or there was an "
+          + "IO error", metaFiles != null && !metaFiles.isEmpty());
+      File metaFile = metaFiles.get(0);
+      RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
+      FileChannel channel = file.getChannel();
+      long position = channel.size() - 2;
+      int length = 2;
+      byte[] buffer = new byte[length];
+      random.nextBytes(buffer);
+      channel.write(ByteBuffer.wrap(buffer), position);
+      file.close();
+      LOG.info("Deliberately corrupting file " + metaFile.getName() +
+          " at offset " + position + " length " + length);
+
+      // read all files to trigger detection of corrupted replica
+      try {
+        util.checkFiles(fs, "/srcdat10");
+      } catch (BlockMissingException e) {
+        System.out.println("Received BlockMissingException as expected.");
+      } catch (IOException e) {
+        assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " +
+            " but received IOException " + e, false);
       }
 
       // fetch bad file list from namenode. There should be one file.
@@ -174,38 +172,30 @@ public class TestListCorruptFileBlocks {
       File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, 
           cluster.getNamesystem().getBlockPoolId());
       assertTrue("data directory does not exist", data_dir.exists());
-      File[] blocks = data_dir.listFiles();
-      assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
-                 (blocks.length > 0));
-      for (int idx = 0; idx < blocks.length; idx++) {
-        if (blocks[idx].getName().startsWith("blk_") &&
-            blocks[idx].getName().endsWith(".meta")) {
-          //
-          // shorten .meta file
-          //
-          RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
-          FileChannel channel = file.getChannel();
-          long position = channel.size() - 2;
-          int length = 2;
-          byte[] buffer = new byte[length];
-          random.nextBytes(buffer);
-          channel.write(ByteBuffer.wrap(buffer), position);
-          file.close();
-          LOG.info("Deliberately corrupting file " + blocks[idx].getName() +
-              " at offset " + position + " length " + length);
-
-          // read all files to trigger detection of corrupted replica
-          try {
-            util.checkFiles(fs, "/srcdat10");
-          } catch (BlockMissingException e) {
-            System.out.println("Received BlockMissingException as expected.");
-          } catch (IOException e) {
-            assertTrue("Corrupted replicas not handled properly. " +
-                       "Expecting BlockMissingException " +
-                       " but received IOException " + e, false);
-          }
-          break;
-        }
+      List<File> metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
+      assertTrue("Data directory does not contain any blocks or there was an "
+          + "IO error", metaFiles != null && !metaFiles.isEmpty());
+      File metaFile = metaFiles.get(0);
+      RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
+      FileChannel channel = file.getChannel();
+      long position = channel.size() - 2;
+      int length = 2;
+      byte[] buffer = new byte[length];
+      random.nextBytes(buffer);
+      channel.write(ByteBuffer.wrap(buffer), position);
+      file.close();
+      LOG.info("Deliberately corrupting file " + metaFile.getName() +
+          " at offset " + position + " length " + length);
+
+      // read all files to trigger detection of corrupted replica
+      try {
+        util.checkFiles(fs, "/srcdat10");
+      } catch (BlockMissingException e) {
+        System.out.println("Received BlockMissingException as expected.");
+      } catch (IOException e) {
+        assertTrue("Corrupted replicas not handled properly. " +
+                   "Expecting BlockMissingException " +
+                   " but received IOException " + e, false);
       }
 
       // fetch bad file list from namenode. There should be one file.
@@ -295,17 +285,18 @@ public class TestListCorruptFileBlocks {
         for (int j = 0; j <= 1; j++) {
           File storageDir = cluster.getInstanceStorageDir(i, j);
           File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-          File[] blocks = data_dir.listFiles();
-          if (blocks == null)
+          List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
+              data_dir);
+          if (metadataFiles == null)
             continue;
           // assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
           // (blocks.length > 0));
-          for (int idx = 0; idx < blocks.length; idx++) {
-            if (!blocks[idx].getName().startsWith("blk_")) {
-              continue;
-            }
-            LOG.info("Deliberately removing file " + blocks[idx].getName());
-            assertTrue("Cannot remove file.", blocks[idx].delete());
+          for (File metadataFile : metadataFiles) {
+            File blockFile = Block.metaToBlockFile(metadataFile);
+            LOG.info("Deliberately removing file " + blockFile.getName());
+            assertTrue("Cannot remove file.", blockFile.delete());
+            LOG.info("Deliberately removing file " + metadataFile.getName());
+            assertTrue("Cannot remove file.", metadataFile.delete());
             // break;
           }
         }
@@ -405,17 +396,18 @@ public class TestListCorruptFileBlocks {
       for (int i = 0; i < 2; i++) {
         File storageDir = cluster.getInstanceStorageDir(0, i);
         File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-        File[] blocks = data_dir.listFiles();
-        if (blocks == null)
+        List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
+            data_dir);
+        if (metadataFiles == null)
           continue;
         // assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
         // (blocks.length > 0));
-        for (int idx = 0; idx < blocks.length; idx++) {
-          if (!blocks[idx].getName().startsWith("blk_")) {
-            continue;
-          }
-          LOG.info("Deliberately removing file " + blocks[idx].getName());
-          assertTrue("Cannot remove file.", blocks[idx].delete());
+        for (File metadataFile : metadataFiles) {
+          File blockFile = Block.metaToBlockFile(metadataFile);
+          LOG.info("Deliberately removing file " + blockFile.getName());
+          assertTrue("Cannot remove file.", blockFile.delete());
+          LOG.info("Deliberately removing file " + metadataFile.getName());
+          assertTrue("Cannot remove file.", metadataFile.delete());
           // break;
         }
       }
@@ -482,15 +474,14 @@ public class TestListCorruptFileBlocks {
           File storageDir = cluster.getInstanceStorageDir(i, j);
           File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
           LOG.info("Removing files from " + data_dir);
-          File[] blocks = data_dir.listFiles();
-          if (blocks == null)
+          List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
+              data_dir);
+          if (metadataFiles == null)
             continue;
-  
-          for (int idx = 0; idx < blocks.length; idx++) {
-            if (!blocks[idx].getName().startsWith("blk_")) {
-              continue;
-            }
-            assertTrue("Cannot remove file.", blocks[idx].delete());
+          for (File metadataFile : metadataFiles) {
+            File blockFile = Block.metaToBlockFile(metadataFile);
+            assertTrue("Cannot remove file.", blockFile.delete());
+            assertTrue("Cannot remove file.", metadataFile.delete());
           }
         }
       }

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-24-datanode-dir.tgz
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-24-datanode-dir.tgz?rev=1615223&view=auto
==============================================================================
Binary file - no diff available.

Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-24-datanode-dir.tgz
------------------------------------------------------------------------------
    svn:mime-type = application/x-gzip

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-datanode-dir.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-datanode-dir.txt?rev=1615223&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-datanode-dir.txt (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-datanode-dir.txt Fri Aug  1 20:41:05 2014
@@ -0,0 +1,23 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Similar to hadoop-dfs-dir.txt, except this is used for a datanode layout
+# upgrade test.
+# Uncomment the following line to produce checksum info for a new DFS image.
+#printChecksums
+
+/small 	 2976363016
+overallCRC 	 4099869518
+