You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sh...@apache.org on 2009/09/13 00:11:38 UTC

svn commit: r814244 - in /hadoop/hdfs/branches/HDFS-265: ./ lib/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/

Author: shv
Date: Sat Sep 12 22:11:37 2009
New Revision: 814244

URL: http://svn.apache.org/viewvc?rev=814244&view=rev
Log:
HDFS-604. Merge -r 813631:814221 from trunk to the append branch.

Modified:
    hadoop/hdfs/branches/HDFS-265/CHANGES.txt
    hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-0.21.0-dev.jar
    hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-examples-0.21.0-dev.jar
    hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-test-0.21.0-dev.jar
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java

Modified: hadoop/hdfs/branches/HDFS-265/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/CHANGES.txt?rev=814244&r1=814243&r2=814244&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-265/CHANGES.txt Sat Sep 12 22:11:37 2009
@@ -54,6 +54,9 @@
     FileNotFoundException from FileSystem::listStatus rather than returning
     null. (Jakob Homan via cdouglas)
 
+    HDFS-602. DistributedFileSystem mkdirs throws FileAlreadyExistsException
+    instead of FileNotFoundException. (Boris Shkolnik via suresh)
+
   NEW FEATURES
 
     HDFS-436. Introduce AspectJ framework for HDFS code and tests.
@@ -269,6 +272,15 @@
     HDFS-605. Do not run fault injection tests in the run-test-hdfs-with-mr
     target.  (Konstantin Boudnik via szetszwo)
 
+    HDFS-606. Fix ConcurrentModificationException in invalidateCorruptReplicas()
+    (shv)
+
+    HDFS-601. TestBlockReport obtains data directories directly from
+    MiniHDFSCluster. (Konstantin Boudnik via shv)
+
+    HDFS-614. TestDatanodeBlockScanner obtains data directories directly from
+    MiniHDFSCluster. (shv)
+
 Release 0.20.1 - Unreleased
 
   IMPROVEMENTS

Modified: hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-0.21.0-dev.jar?rev=814244&r1=814243&r2=814244&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-examples-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-examples-0.21.0-dev.jar?rev=814244&r1=814243&r2=814244&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-test-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-test-0.21.0-dev.jar?rev=814244&r1=814243&r2=814244&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=814244&r1=814243&r2=814244&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java Sat Sep 12 22:11:37 2009
@@ -65,6 +65,7 @@
 import org.apache.hadoop.fs.FSInputChecker;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FSOutputSummer;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
@@ -975,7 +976,8 @@
     } catch(RemoteException re) {
       throw re.unwrapRemoteException(AccessControlException.class,
                                      NSQuotaExceededException.class,
-                                     DSQuotaExceededException.class);
+                                     DSQuotaExceededException.class,
+                                     FileAlreadyExistsException.class);
     }
   }
 

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=814244&r1=814243&r2=814244&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java Sat Sep 12 22:11:37 2009
@@ -1136,8 +1136,10 @@
     boolean gotException = false;
     if (nodes == null)
       return;
-    for (Iterator<DatanodeDescriptor> it = nodes.iterator(); it.hasNext(); ) {
-      DatanodeDescriptor node = it.next();
+    // make a copy of the array of nodes in order to avoid
+    // ConcurrentModificationException, when the block is removed from the node
+    DatanodeDescriptor[] nodesCopy = nodes.toArray(new DatanodeDescriptor[0]);
+    for (DatanodeDescriptor node : nodesCopy) {
       try {
         invalidateBlock(blk, node);
       } catch (IOException e) {

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=814244&r1=814243&r2=814244&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Sat Sep 12 22:11:37 2009
@@ -17,23 +17,30 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.io.*;
+import java.io.Closeable;
+import java.io.FileNotFoundException;
+import java.io.IOException;
 import java.net.URI;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.permission.*;
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.MetricsUtil;
-import org.apache.hadoop.metrics.MetricsContext;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.metrics.MetricsContext;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.metrics.MetricsUtil;
 
 /*************************************************
  * FSDirectory stores the filesystem directory state.
@@ -957,7 +964,7 @@
    */
   boolean mkdirs(String src, PermissionStatus permissions,
       boolean inheritPermission, long now)
-      throws FileNotFoundException, QuotaExceededException {
+      throws FileAlreadyExistsException, QuotaExceededException {
     src = normalizePath(src);
     String[] names = INode.getPathNames(src);
     byte[][] components = INode.getPathComponents(names);
@@ -972,7 +979,7 @@
       for(; i < inodes.length && inodes[i] != null; i++) {
         pathbuilder.append(Path.SEPARATOR + names[i]);
         if (!inodes[i].isDirectory()) {
-          throw new FileNotFoundException("Parent path is not a directory: "
+          throw new FileAlreadyExistsException("Parent path is not a directory: "
               + pathbuilder);
         }
       }

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java?rev=814244&r1=814243&r2=814244&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java Sat Sep 12 22:11:37 2009
@@ -17,6 +17,16 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -30,18 +40,9 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.log4j.Level;
 import org.junit.After;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.io.File;
-import java.io.FilenameFilter;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-
 /**
  * This test simulates a variety of situations when blocks are being intentionally
  * corrupted, unexpectedly modified, and so on before a block report is happening
@@ -155,7 +156,7 @@
         (long)AppendTestUtil.FILE_SIZE, REPL_FACTOR, rand.nextLong());
 
     // mock around with newly created blocks and delete some
-    String testDataDirectory = System.getProperty("test.build.data");
+    String testDataDirectory = cluster.getDataDirectory();
 
     File dataDir = new File(testDataDirectory);
     assertTrue(dataDir.isDirectory());

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=814244&r1=814243&r2=814244&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java Sat Sep 12 22:11:37 2009
@@ -131,7 +131,7 @@
 
   public static boolean corruptReplica(String blockName, int replica) throws IOException {
     Random random = new Random();
-    File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
+    File baseDir = new File(MiniDFSCluster.getBaseDirectory(), "data");
     boolean corrupted = false;
     for (int i=replica*2; i<replica*2+2; i++) {
       File blockFile = new File(baseDir, "data" + (i+1) + 
@@ -183,7 +183,7 @@
     assertTrue(blocks.get(0).isCorrupt() == false);
 
     // Corrupt random replica of block 
-    corruptReplica(block, rand);
+    assertTrue(corruptReplica(block, rand));
 
     // Restart the datanode hoping the corrupt block to be reported
     cluster.restartDataNode(rand);
@@ -203,9 +203,9 @@
   
     // Corrupt all replicas. Now, block should be marked as corrupt
     // and we should get all the replicas 
-    corruptReplica(block, 0);
-    corruptReplica(block, 1);
-    corruptReplica(block, 2);
+    assertTrue(corruptReplica(block, 0));
+    assertTrue(corruptReplica(block, 1));
+    assertTrue(corruptReplica(block, 2));
 
     // Read the file to trigger reportBadBlocks by client
     try {
@@ -410,7 +410,7 @@
    * Change the length of a block at datanode dnIndex
    */
   static boolean changeReplicaLength(String blockName, int dnIndex, int lenDelta) throws IOException {
-    File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
+    File baseDir = new File(MiniDFSCluster.getBaseDirectory(), "data");
     for (int i=dnIndex*2; i<dnIndex*2+2; i++) {
       File blockFile = new File(baseDir, "data" + (i+1) + 
           MiniDFSCluster.FINALIZED_DIR_NAME + blockName);
@@ -426,7 +426,7 @@
   
   private static void waitForBlockDeleted(String blockName, int dnIndex) 
   throws IOException, InterruptedException {
-    File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
+    File baseDir = new File(MiniDFSCluster.getBaseDirectory(), "data");
     File blockFile1 = new File(baseDir, "data" + (2*dnIndex+1) + 
         MiniDFSCluster.FINALIZED_DIR_NAME + blockName);
     File blockFile2 = new File(baseDir, "data" + (2*dnIndex+2) +