You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by el...@apache.org on 2011/04/21 23:55:19 UTC

svn commit: r1095827 - in /hadoop/hdfs/trunk: CHANGES.txt src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java

Author: eli
Date: Thu Apr 21 21:55:19 2011
New Revision: 1095827

URL: http://svn.apache.org/viewvc?rev=1095827&view=rev
Log:
HDFS-1854. make failure message more useful in DFSTestUtil.waitReplication(). Contributed by Matt Foley


Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1095827&r1=1095826&r2=1095827&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Thu Apr 21 21:55:19 2011
@@ -127,6 +127,9 @@ Trunk (unreleased changes)
     being written are closed for a grace period, and start a new thread when
     new files are opened for write.  (szetszwo)
 
+    HDFS-1854. make failure message more useful in
+    DFSTestUtil.waitReplication(). (Matt Foley via eli)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1095827&r1=1095826&r2=1095827&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java Thu Apr 21 21:55:19 2011
@@ -241,10 +241,13 @@ public class DFSTestUtil {
       BlockLocation locs[] = fs.getFileBlockLocations(
         fs.getFileStatus(fileName), 0, Long.MAX_VALUE);
       for (int j = 0; j < locs.length; j++) {
-        String[] loc = locs[j].getHosts();
-        if (loc.length != replFactor) {
-          System.out.println("File " + fileName + " has replication factor " +
-              loc.length);
+        String[] hostnames = locs[j].getNames();
+        if (hostnames.length != replFactor) {
+          String hostNameList = "";
+          for (String h : hostnames) hostNameList += h + " ";
+          System.out.println("Block " + j + " of file " + fileName 
+              + " has replication factor " + hostnames.length + "; locations "
+              + hostNameList);
           good = false;
           try {
             System.out.println("Waiting for replication factor to drain");
@@ -253,6 +256,10 @@ public class DFSTestUtil {
           break;
         }
       }
+      if (good) {
+        System.out.println("All blocks of file " + fileName
+            + " verified to have replication factor " + replFactor);
+      }
     } while(!good);
   }