You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2011/08/11 07:24:56 UTC

svn commit: r1156490 - in /hadoop/common/trunk/hdfs: CHANGES.txt src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java

Author: szetszwo
Date: Thu Aug 11 05:24:55 2011
New Revision: 1156490

URL: http://svn.apache.org/viewvc?rev=1156490&view=rev
Log:
HDFS-2245. Fix a NullPointerException in BlockManager.chooseTarget(..).

Modified:
    hadoop/common/trunk/hdfs/CHANGES.txt
    hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java

Modified: hadoop/common/trunk/hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/CHANGES.txt?rev=1156490&r1=1156489&r2=1156490&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hdfs/CHANGES.txt Thu Aug 11 05:24:55 2011
@@ -951,6 +951,9 @@ Trunk (unreleased changes)
     HDFS-2196. Make ant build system work with hadoop-common JAR generated
     by Maven. (Alejandro Abdelnur via tomwhite)
 
+    HDFS-2245. Fix a NullPointerException in BlockManager.chooseTarget(..).
+    (szetszwo)
+
   BREAKDOWN OF HDFS-1073 SUBTASKS
 
     HDFS-1521. Persist transaction ID on disk between NN restarts.

Modified: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1156490&r1=1156489&r2=1156490&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Thu Aug 11 05:24:55 2011
@@ -1221,12 +1221,13 @@ public class BlockManager {
     final DatanodeDescriptor targets[] = blockplacement.chooseTarget(
         src, numOfReplicas, client, excludedNodes, blocksize);
     if (targets.length < minReplication) {
-      throw new IOException("File " + src + " could only be replicated to " +
-                            targets.length + " nodes, instead of " +
-                            minReplication + ". There are "
-                            + getDatanodeManager().getNetworkTopology().getNumOfLeaves()
-                            + " datanode(s) running but "+excludedNodes.size() +
-                            " node(s) are excluded in this operation.");
+      throw new IOException("File " + src + " could only be replicated to "
+          + targets.length + " nodes instead of minReplication (="
+          + minReplication + ").  There are "
+          + getDatanodeManager().getNetworkTopology().getNumOfLeaves()
+          + " datanode(s) running and "
+          + (excludedNodes == null? "no": excludedNodes.size())
+          + " node(s) are excluded in this operation.");
     }
     return targets;
   }

Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1156490&r1=1156489&r2=1156490&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java Thu Aug 11 05:24:55 2011
@@ -404,6 +404,36 @@ public class TestFileCreation extends ju
     }
   }
 
+  /** test addBlock(..) when replication<min and excludeNodes==null. */
+  public void testFileCreationError3() throws IOException {
+    System.out.println("testFileCreationError3 start");
+    Configuration conf = new HdfsConfiguration();
+    // create cluster
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+    DistributedFileSystem dfs = null;
+    try {
+      cluster.waitActive();
+      dfs = (DistributedFileSystem)cluster.getFileSystem();
+      DFSClient client = dfs.dfs;
+
+      // create a new file.
+      final Path f = new Path("/foo.txt");
+      createFile(dfs, f, 3);
+      try {
+        cluster.getNameNode().addBlock(f.toString(), 
+            client.clientName, null, null);
+        fail();
+      } catch(IOException ioe) {
+        FileSystem.LOG.info("GOOD!", ioe);
+      }
+
+      System.out.println("testFileCreationError3 successful");
+    } finally {
+      IOUtils.closeStream(dfs);
+      cluster.shutdown();
+    }
+  }
+
   /**
    * Test that file leases are persisted across namenode restarts.
    * This test is currently not triggered because more HDFS work is