You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2012/10/22 22:43:27 UTC

svn commit: r1401071 [2/2] - in /hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/docs/src/documentation/content/xdocs/ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protoc...

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java Mon Oct 22 20:43:16 2012
@@ -104,7 +104,7 @@ class INodeFileUnderConstruction extends
       "non-complete blocks! Blocks are: " + blocksAsString();
     INodeFile obj = new INodeFile(getPermissionStatus(),
                                   getBlocks(),
-                                  getReplication(),
+                                  getBlockReplication(),
                                   getModificationTime(),
                                   getModificationTime(),
                                   getPreferredBlockSize());

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java Mon Oct 22 20:43:16 2012
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.na
 
 import java.io.Closeable;
 import java.io.IOException;
-import java.util.Collection;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -56,21 +55,6 @@ public interface JournalManager extends 
    */
   void finalizeLogSegment(long firstTxId, long lastTxId) throws IOException;
 
-   /**
-   * Get a list of edit log input streams.  The list will start with the
-   * stream that contains fromTxnId, and continue until the end of the journal
-   * being managed.
-   * 
-   * @param fromTxnId the first transaction id we want to read
-   * @param inProgressOk whether or not in-progress streams should be returned
-   *
-   * @return a list of streams
-   * @throws IOException if the underlying storage has an error or is otherwise
-   * inaccessible
-   */
-  void selectInputStreams(Collection<EditLogInputStream> streams,
-      long fromTxnId, boolean inProgressOk) throws IOException;
-
   /**
    * Set the amount of memory that this stream should use to buffer edits
    */

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LogsPurgeable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LogsPurgeable.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LogsPurgeable.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LogsPurgeable.java Mon Oct 22 20:43:16 2012
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
+import java.util.Collection;
 
 /**
  * Interface used to abstract over classes which manage edit logs that may need
@@ -33,5 +34,20 @@ interface LogsPurgeable {
    * @throws IOException in the event of error
    */
   public void purgeLogsOlderThan(long minTxIdToKeep) throws IOException;
-
+  
+  /**
+   * Get a list of edit log input streams.  The list will start with the
+   * stream that contains fromTxnId, and continue until the end of the journal
+   * being managed.
+   * 
+   * @param fromTxId the first transaction id we want to read
+   * @param inProgressOk whether or not in-progress streams should be returned
+   *
+   * @return a list of streams
+   * @throws IOException if the underlying storage has an error or is otherwise
+   * inaccessible
+   */
+  void selectInputStreams(Collection<EditLogInputStream> streams,
+      long fromTxId, boolean inProgressOk) throws IOException;
+  
 }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java Mon Oct 22 20:43:16 2012
@@ -19,7 +19,9 @@ package org.apache.hadoop.hdfs.server.na
 
 import java.io.File;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collections;
+import java.util.Comparator;
 import java.util.List;
 import java.util.TreeSet;
 
@@ -32,6 +34,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 
 import com.google.common.base.Preconditions;
+import com.google.common.collect.ComparisonChain;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 
@@ -48,6 +51,7 @@ public class NNStorageRetentionManager {
   
   private final int numCheckpointsToRetain;
   private final long numExtraEditsToRetain;
+  private final int maxExtraEditsSegmentsToRetain;
   private static final Log LOG = LogFactory.getLog(
       NNStorageRetentionManager.class);
   private final NNStorage storage;
@@ -65,6 +69,9 @@ public class NNStorageRetentionManager {
     this.numExtraEditsToRetain = conf.getLong(
         DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY,
         DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_DEFAULT);
+    this.maxExtraEditsSegmentsToRetain = conf.getInt(
+        DFSConfigKeys.DFS_NAMENODE_MAX_EXTRA_EDITS_SEGMENTS_RETAINED_KEY,
+        DFSConfigKeys.DFS_NAMENODE_MAX_EXTRA_EDITS_SEGMENTS_RETAINED_DEFAULT);
     Preconditions.checkArgument(numCheckpointsToRetain > 0,
         "Must retain at least one checkpoint");
     Preconditions.checkArgument(numExtraEditsToRetain >= 0,
@@ -94,7 +101,39 @@ public class NNStorageRetentionManager {
     // provide a "cushion" of older txns that we keep, which is
     // handy for HA, where a remote node may not have as many
     // new images.
-    long purgeLogsFrom = Math.max(0, minImageTxId + 1 - numExtraEditsToRetain);
+    //
+    // First, determine the target number of extra transactions to retain based
+    // on the configured amount.
+    long minimumRequiredTxId = minImageTxId + 1;
+    long purgeLogsFrom = Math.max(0, minimumRequiredTxId - numExtraEditsToRetain);
+    
+    ArrayList<EditLogInputStream> editLogs = new ArrayList<EditLogInputStream>();
+    purgeableLogs.selectInputStreams(editLogs, purgeLogsFrom, false);
+    Collections.sort(editLogs, new Comparator<EditLogInputStream>() {
+      @Override
+      public int compare(EditLogInputStream a, EditLogInputStream b) {
+        return ComparisonChain.start()
+            .compare(a.getFirstTxId(), b.getFirstTxId())
+            .compare(a.getLastTxId(), b.getLastTxId())
+            .result();
+      }
+    });
+    
+    // Next, adjust the number of transactions to retain if doing so would mean
+    // keeping too many segments around.
+    while (editLogs.size() > maxExtraEditsSegmentsToRetain) {
+      purgeLogsFrom = editLogs.get(0).getFirstTxId();
+      editLogs.remove(0);
+    }
+    
+    // Finally, ensure that we're not trying to purge any transactions that we
+    // actually need.
+    if (purgeLogsFrom > minimumRequiredTxId) {
+      throw new AssertionError("Should not purge more edits than required to "
+          + "restore: " + purgeLogsFrom + " should be <= "
+          + minimumRequiredTxId);
+    }
+    
     purgeableLogs.purgeLogsOlderThan(purgeLogsFrom);
   }
   

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Mon Oct 22 20:43:16 2012
@@ -834,7 +834,7 @@ class NamenodeJspHelper {
           doc.endTag();
 
           doc.startTag("replication");
-          doc.pcdata(""+inode.getReplication());
+          doc.pcdata(""+inode.getBlockReplication());
           doc.endTag();
 
           doc.startTag("disk_space_consumed");

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Mon Oct 22 20:43:16 2012
@@ -751,6 +751,24 @@ public class SecondaryNameNode implement
           }
         }
       }
+
+      @Override
+      public void selectInputStreams(Collection<EditLogInputStream> streams,
+          long fromTxId, boolean inProgressOk) {
+        Iterator<StorageDirectory> iter = storage.dirIterator();
+        while (iter.hasNext()) {
+          StorageDirectory dir = iter.next();
+          List<EditLogFile> editFiles;
+          try {
+            editFiles = FileJournalManager.matchEditLogs(
+                dir.getCurrentDir());
+          } catch (IOException ioe) {
+            throw new RuntimeException(ioe);
+          }
+          FileJournalManager.addStreamsToCollectionFromFiles(editFiles, streams,
+              fromTxId, inProgressOk);
+        }
+      }
       
     }
     

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java Mon Oct 22 20:43:16 2012
@@ -112,4 +112,10 @@ public interface FSNamesystemMBean {
    * @return number of dead data nodes
    */
   public int getNumDeadDataNodes();
+  
+  /**
+   * Number of stale data nodes
+   * @return number of stale data nodes
+   */
+  public int getNumStaleDataNodes();
 }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java Mon Oct 22 20:43:16 2012
@@ -82,9 +82,9 @@ public class DatanodeRegistration extend
   public String toString() {
     return getClass().getSimpleName()
       + "(" + getIpAddr()
-      + ", storageID=" + storageID
-      + ", infoPort=" + infoPort
-      + ", ipcPort=" + ipcPort
+      + ", storageID=" + getStorageID()
+      + ", infoPort=" + getInfoPort()
+      + ", ipcPort=" + getIpcPort()
       + ", storageInfo=" + storageInfo
       + ")";
   }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java Mon Oct 22 20:43:16 2012
@@ -165,7 +165,7 @@ class ImageLoaderCurrent implements Imag
 
       if (LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imageVersion)) {
         boolean isCompressed = in.readBoolean();
-        v.visit(ImageElement.IS_COMPRESSED, imageVersion);
+        v.visit(ImageElement.IS_COMPRESSED, String.valueOf(isCompressed));
         if (isCompressed) {
           String codecClassName = Text.readString(in);
           v.visit(ImageElement.COMPRESS_CODEC, codecClassName);

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1397381-1401062

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto Mon Oct 22 20:43:16 2012
@@ -54,12 +54,7 @@ message OpReadBlockProto {
 
 
 message ChecksumProto {
-  enum ChecksumType {
-    NULL = 0;
-    CRC32 = 1;
-    CRC32C = 2;
-  }
-  required ChecksumType type = 1;
+  required ChecksumTypeProto type = 1;
   required uint32 bytesPerChecksum = 2;
 }
   

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Mon Oct 22 20:43:16 2012
@@ -358,7 +358,7 @@
 
 <property>
   <name>dfs.blocksize</name>
-  <value>67108864</value>
+  <value>134217728</value>
   <description>
       The default block size for new files, in bytes.
       You can use the following suffix (case insensitive):
@@ -660,6 +660,20 @@
   edits in order to start again.
   Typically each edit is on the order of a few hundred bytes, so the default
   of 1 million edits should be on the order of hundreds of MBs or low GBs.
+
+  NOTE: Fewer extra edits may be retained than value specified for this setting
+  if doing so would mean that more segments would be retained than the number
+  configured by dfs.namenode.max.extra.edits.segments.retained.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.max.extra.edits.segments.retained</name>
+  <value>10000</value>
+  <description>The maximum number of extra edit log segments which should be retained
+  beyond what is minimally necessary for a NN restart. When used in conjunction with
+  dfs.namenode.num.extra.edits.retained, this configuration property serves to cap
+  the number of extra edits files to a reasonable value.
   </description>
 </property>
 

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1397381-1401062

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1397381-1401062

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1397381-1401062

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1397381-1401062

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java Mon Oct 22 20:43:16 2012
@@ -17,18 +17,16 @@
  */
 package org.apache.hadoop.fs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
 
 import java.io.IOException;
 import java.util.regex.Pattern;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.*;
 
 public class TestGlobPaths {
 
@@ -49,26 +47,377 @@ public class TestGlobPaths {
   static private MiniDFSCluster dfsCluster;
   static private FileSystem fs;
   static final private int NUM_OF_PATHS = 4;
-  static final String USER_DIR = "/user/"+System.getProperty("user.name");
+  static private String USER_DIR;
   private Path[] path = new Path[NUM_OF_PATHS];
 
-  @Before
-  public void setUp() throws Exception {
-    try {
-      Configuration conf = new HdfsConfiguration();
-      dfsCluster = new MiniDFSCluster.Builder(conf).build();
-      fs = FileSystem.get(conf);
-    } catch (IOException e) {
-      e.printStackTrace();
-    }
+  @BeforeClass
+  public static void setUp() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    dfsCluster = new MiniDFSCluster.Builder(conf).build();
+    fs = FileSystem.get(conf);
+    USER_DIR = fs.getHomeDirectory().toUri().getPath().toString();
   }
   
-  @After
-  public void tearDown() throws Exception {
+  @AfterClass
+  public static void tearDown() throws Exception {
     if(dfsCluster!=null) {
       dfsCluster.shutdown();
     }
   }
+
+  @Test
+  public void testMultiGlob() throws IOException {
+    FileStatus[] status;
+    /*
+     *  /dir1/subdir1
+     *  /dir1/subdir1/f1
+     *  /dir1/subdir1/f2
+     *  /dir1/subdir2/f1
+     *  /dir2/subdir1
+     *  /dir2/subdir2
+     *  /dir2/subdir2/f1
+     *  /dir3/f1
+     *  /dir3/f1
+     *  /dir3/f2(dir)
+     *  /dir3/subdir2(file)
+     *  /dir3/subdir3
+     *  /dir3/subdir3/f1
+     *  /dir3/subdir3/f1/f1
+     *  /dir3/subdir3/f3
+     *  /dir4
+     */
+
+    Path d1 = new Path(USER_DIR, "dir1");
+    Path d11 = new Path(d1, "subdir1");
+    Path d12 = new Path(d1, "subdir2");
+    
+    Path f111 = new Path(d11, "f1");
+    fs.createNewFile(f111);
+    Path f112 = new Path(d11, "f2");
+    fs.createNewFile(f112);
+    Path f121 = new Path(d12, "f1");
+    fs.createNewFile(f121);
+    
+    Path d2 = new Path(USER_DIR, "dir2");
+    Path d21 = new Path(d2, "subdir1");
+    fs.mkdirs(d21);
+    Path d22 = new Path(d2, "subdir2");
+    Path f221 = new Path(d22, "f1");
+    fs.createNewFile(f221);
+
+    Path d3 = new Path(USER_DIR, "dir3");
+    Path f31 = new Path(d3, "f1");
+    fs.createNewFile(f31);
+    Path d32 = new Path(d3, "f2");
+    fs.mkdirs(d32);
+    Path f32 = new Path(d3, "subdir2"); // fake as a subdir!
+    fs.createNewFile(f32);
+    Path d33 = new Path(d3, "subdir3");
+    Path f333 = new Path(d33, "f3");
+    fs.createNewFile(f333);
+    Path d331 = new Path(d33, "f1");
+    Path f3311 = new Path(d331, "f1");
+    fs.createNewFile(f3311);
+    Path d4 = new Path(USER_DIR, "dir4");
+    fs.mkdirs(d4);
+
+    /*
+     * basic 
+     */
+    Path root = new Path(USER_DIR);
+    status = fs.globStatus(root);
+    checkStatus(status, root);
+    
+    status = fs.globStatus(new Path(USER_DIR, "x"));
+    assertNull(status);
+
+    status = fs.globStatus(new Path("x"));
+    assertNull(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "x/x"));
+    assertNull(status);
+
+    status = fs.globStatus(new Path("x/x"));
+    assertNull(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "*"));
+    checkStatus(status, d1, d2, d3, d4);
+
+    status = fs.globStatus(new Path("*"));
+    checkStatus(status, d1, d2, d3, d4);
+
+    status = fs.globStatus(new Path(USER_DIR, "*/x"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("*/x"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "x/*"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("x/*"));
+    checkStatus(status);
+
+    // make sure full pattern is scanned instead of bailing early with undef
+    status = fs.globStatus(new Path(USER_DIR, "x/x/x/*"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("x/x/x/*"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "*/*"));
+    checkStatus(status, d11, d12, d21, d22, f31, d32, f32, d33);
+
+    status = fs.globStatus(new Path("*/*"));
+    checkStatus(status, d11, d12, d21, d22, f31, d32, f32, d33);
+
+    /*
+     * one level deep
+     */
+    status = fs.globStatus(new Path(USER_DIR, "dir*/*"));
+    checkStatus(status, d11, d12, d21, d22, f31, d32, f32, d33);
+
+    status = fs.globStatus(new Path("dir*/*"));
+    checkStatus(status, d11, d12, d21, d22, f31, d32, f32, d33);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*"));
+    checkStatus(status, d11, d12, d21, d22, f32, d33);
+
+    status = fs.globStatus(new Path("dir*/subdir*"));
+    checkStatus(status, d11, d12, d21, d22, f32, d33);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/f*"));
+    checkStatus(status, f31, d32);
+
+    status = fs.globStatus(new Path("dir*/f*"));
+    checkStatus(status, f31, d32);
+
+    /*
+     * subdir1 globs
+     */
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1"));
+    checkStatus(status, d11, d21);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/*"));
+    checkStatus(status, f111, f112);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/*/*"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/x"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/x*"));
+    checkStatus(status);
+
+    /*
+     * subdir2 globs
+     */
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir2"));
+    checkStatus(status, d12, d22, f32);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir2/*"));
+    checkStatus(status, f121, f221);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir2/*/*"));
+    checkStatus(status);
+
+    /*
+     * subdir3 globs
+     */
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir3"));
+    checkStatus(status, d33);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir3/*"));
+    checkStatus(status, d331, f333); 
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir3/*/*"));
+    checkStatus(status, f3311);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir3/*/*/*"));
+    checkStatus(status);
+
+    /*
+     * file1 single dir globs
+     */    
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f1"));
+    checkStatus(status, f111);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f1*"));
+    checkStatus(status, f111);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f1/*"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f1*/*"));
+    checkStatus(status);
+
+    /*
+     * file1 multi-dir globs
+     */
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1"));
+    checkStatus(status, f111, f121, f221, d331);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1*"));
+    checkStatus(status, f111, f121, f221, d331);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1/*"));
+    checkStatus(status, f3311);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1*/*"));
+    checkStatus(status, f3311);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1*/*"));
+    checkStatus(status, f3311);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1*/x"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1*/*/*"));
+    checkStatus(status);
+
+    /*
+     *  file glob multiple files
+     */
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*"));
+    checkStatus(status, d11, d12, d21, d22, f32, d33);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/*"));
+    checkStatus(status, f111, f112, f121, f221, d331, f333); 
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f*"));
+    checkStatus(status, f111, f112, f121, f221, d331, f333);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f*/*"));
+    checkStatus(status, f3311);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/*/f1"));
+    checkStatus(status, f3311); 
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/*/*"));
+    checkStatus(status, f3311); 
+
+
+    // doesn't exist
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f3"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f3*"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("{x}"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("{x,y}"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("dir*/{x,y}"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("dir*/{f1,y}"));
+    checkStatus(status, f31);
+
+    status = fs.globStatus(new Path("{x,y}"));
+    checkStatus(status);
+    
+    status = fs.globStatus(new Path("/{x/x,y/y}"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("{x/x,y/y}"));
+    checkStatus(status);
+    
+    status = fs.globStatus(new Path(Path.CUR_DIR));
+    checkStatus(status, new Path(USER_DIR));
+
+    status = fs.globStatus(new Path(USER_DIR+"{/dir1}"));
+    checkStatus(status, d1);
+
+    status = fs.globStatus(new Path(USER_DIR+"{/dir*}"));
+    checkStatus(status, d1, d2, d3, d4);
+
+    /* 
+     * true filter
+     */
+
+    PathFilter trueFilter = new PathFilter() {
+      @Override
+      public boolean accept(Path path) {
+        return true;
+      }
+    };
+
+    status = fs.globStatus(new Path(Path.SEPARATOR), trueFilter);
+    checkStatus(status, new Path(Path.SEPARATOR));
+    
+    status = fs.globStatus(new Path(Path.CUR_DIR), trueFilter);
+    checkStatus(status, new Path(USER_DIR));    
+
+    status = fs.globStatus(d1, trueFilter);
+    checkStatus(status, d1);
+
+    status = fs.globStatus(new Path(USER_DIR), trueFilter);
+    checkStatus(status, new Path(USER_DIR));
+
+    status = fs.globStatus(new Path(USER_DIR, "*"), trueFilter);
+    checkStatus(status, d1, d2, d3, d4);
+
+    status = fs.globStatus(new Path("/x/*"), trueFilter);
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("/x"), trueFilter);
+    assertNull(status);
+
+    status = fs.globStatus(new Path("/x/x"), trueFilter);
+    assertNull(status);
+    
+    /*
+     * false filter
+     */
+    PathFilter falseFilter = new PathFilter() {
+      @Override
+      public boolean accept(Path path) {
+        return false;
+      }
+    };
+
+    status = fs.globStatus(new Path(Path.SEPARATOR), falseFilter);
+    assertNull(status);
+    
+    status = fs.globStatus(new Path(Path.CUR_DIR), falseFilter);
+    assertNull(status);    
+    
+    status = fs.globStatus(new Path(USER_DIR), falseFilter);
+    assertNull(status);
+    
+    status = fs.globStatus(new Path(USER_DIR, "*"), falseFilter);
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("/x/*"), falseFilter);
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("/x"), falseFilter);
+    assertNull(status);
+
+    status = fs.globStatus(new Path("/x/x"), falseFilter);
+    assertNull(status);
+  }
+  
+  private void checkStatus(FileStatus[] status, Path ... expectedMatches) {
+    assertNotNull(status);
+    String[] paths = new String[status.length];
+    for (int i=0; i < status.length; i++) {
+      paths[i] = getPathFromStatus(status[i]);
+    }
+    String got = StringUtils.join(paths, "\n");
+    String expected = StringUtils.join(expectedMatches, "\n");
+    assertEquals(expected, got);
+  }
+
+  private String getPathFromStatus(FileStatus status) {
+    return status.getPath().toUri().getPath();
+  }
+  
   
   @Test
   public void testPathFilter() throws IOException {
@@ -98,21 +447,7 @@ public class TestGlobPaths {
   }
   
   @Test
-  public void testGlob() throws Exception {
-    //pTestEscape(); // need to wait until HADOOP-1995 is fixed
-    pTestJavaRegexSpecialChars();
-    pTestCurlyBracket();
-    pTestLiteral();
-    pTestAny();
-    pTestClosure();
-    pTestSet();
-    pTestRange();
-    pTestSetExcl();
-    pTestCombination();
-    pTestRelativePath();
-  }
-  
-  private void pTestLiteral() throws IOException {
+  public void pTestLiteral() throws IOException {
     try {
       String [] files = new String[] {USER_DIR+"/a2c", USER_DIR+"/abc.d"};
       Path[] matchedPath = prepareTesting(USER_DIR+"/abc.d", files);
@@ -123,7 +458,8 @@ public class TestGlobPaths {
     }
   }
   
-  private void pTestEscape() throws IOException {
+  @Test
+  public void pTestEscape() throws IOException {
     try {
       String [] files = new String[] {USER_DIR+"/ab\\[c.d"};
       Path[] matchedPath = prepareTesting(USER_DIR+"/ab\\[c.d", files);
@@ -134,7 +470,8 @@ public class TestGlobPaths {
     }
   }
   
-  private void pTestAny() throws IOException {
+  @Test
+  public void pTestAny() throws IOException {
     try {
       String [] files = new String[] { USER_DIR+"/abc", USER_DIR+"/a2c",
                                        USER_DIR+"/a.c", USER_DIR+"/abcd"};
@@ -148,15 +485,8 @@ public class TestGlobPaths {
     }
   }
   
-  private void pTestClosure() throws IOException {
-    pTestClosure1();
-    pTestClosure2();
-    pTestClosure3();
-    pTestClosure4();
-    pTestClosure5();
-  }
-  
-  private void pTestClosure1() throws IOException {
+  @Test
+  public void pTestClosure1() throws IOException {
     try {
       String [] files = new String[] {USER_DIR+"/a", USER_DIR+"/abc",
                                       USER_DIR+"/abc.p", USER_DIR+"/bacd"};
@@ -170,7 +500,8 @@ public class TestGlobPaths {
     }
   }
   
-  private void pTestClosure2() throws IOException {
+  @Test
+  public void pTestClosure2() throws IOException {
     try {
       String [] files = new String[] {USER_DIR+"/a.", USER_DIR+"/a.txt",
                                      USER_DIR+"/a.old.java", USER_DIR+"/.java"};
@@ -184,7 +515,8 @@ public class TestGlobPaths {
     }
   }
   
-  private void pTestClosure3() throws IOException {
+  @Test
+  public void pTestClosure3() throws IOException {
     try {    
       String [] files = new String[] {USER_DIR+"/a.txt.x", USER_DIR+"/ax",
                                       USER_DIR+"/ab37x", USER_DIR+"/bacd"};
@@ -198,7 +530,8 @@ public class TestGlobPaths {
     } 
   }
 
-  private void pTestClosure4() throws IOException {
+  @Test
+  public void pTestClosure4() throws IOException {
     try {
       String [] files = new String[] {USER_DIR+"/dir1/file1", 
                                       USER_DIR+"/dir2/file2", 
@@ -212,7 +545,8 @@ public class TestGlobPaths {
     }
   }
   
-  private void pTestClosure5() throws IOException {
+  @Test
+  public void pTestClosure5() throws IOException {
     try {
       String [] files = new String[] {USER_DIR+"/dir1/file1", 
                                       USER_DIR+"/file1"};
@@ -224,7 +558,8 @@ public class TestGlobPaths {
     }
   }
 
-  private void pTestSet() throws IOException {
+  @Test
+  public void pTestSet() throws IOException {
     try {    
       String [] files = new String[] {USER_DIR+"/a.c", USER_DIR+"/a.cpp",
                                       USER_DIR+"/a.hlp", USER_DIR+"/a.hxy"};
@@ -238,7 +573,8 @@ public class TestGlobPaths {
     }
   }
   
-  private void pTestRange() throws IOException {
+  @Test
+  public void pTestRange() throws IOException {
     try {    
       String [] files = new String[] {USER_DIR+"/a.d", USER_DIR+"/a.e",
                                       USER_DIR+"/a.f", USER_DIR+"/a.h"};
@@ -252,7 +588,8 @@ public class TestGlobPaths {
     }
   }
   
-  private void pTestSetExcl() throws IOException {
+  @Test
+  public void pTestSetExcl() throws IOException {
     try {    
       String [] files = new String[] {USER_DIR+"/a.d", USER_DIR+"/a.e",
                                       USER_DIR+"/a.0", USER_DIR+"/a.h"};
@@ -265,7 +602,8 @@ public class TestGlobPaths {
     }
   }
 
-  private void pTestCombination() throws IOException {
+  @Test
+  public void pTestCombination() throws IOException {
     try {    
       String [] files = new String[] {"/user/aa/a.c", "/user/bb/a.cpp",
                                       "/user1/cc/b.hlp", "/user/dd/a.hxy"};
@@ -277,7 +615,8 @@ public class TestGlobPaths {
     }
   }
   
-  private void pTestRelativePath() throws IOException {
+  @Test
+  public void pTestRelativePath() throws IOException {
     try {
       String [] files = new String[] {"a", "abc", "abc.p", "bacd"};
       Path[] matchedPath = prepareTesting("a*", files);
@@ -291,7 +630,8 @@ public class TestGlobPaths {
   }
   
   /* Test {xx,yy} */
-  private void pTestCurlyBracket() throws IOException {
+  @Test
+  public void pTestCurlyBracket() throws IOException {
     Path[] matchedPath;
     String [] files;
     try {
@@ -390,7 +730,8 @@ public class TestGlobPaths {
   }
   
   /* test that a path name can contain Java regex special characters */
-  private void pTestJavaRegexSpecialChars() throws IOException {
+  @Test
+  public void pTestJavaRegexSpecialChars() throws IOException {
     try {
       String[] files = new String[] {USER_DIR+"/($.|+)bc", USER_DIR+"/abc"};
       Path[] matchedPath = prepareTesting(USER_DIR+"/($.|+)*", files);
@@ -401,6 +742,7 @@ public class TestGlobPaths {
     }
 
   }
+  
   private Path[] prepareTesting(String pattern, String[] files)
     throws IOException {
     for(int i=0; i<Math.min(NUM_OF_PATHS, files.length); i++) {
@@ -437,8 +779,9 @@ public class TestGlobPaths {
     return globResults;
   }
   
-  private void cleanupDFS() throws IOException {
-    fs.delete(new Path("/user"), true);
+  @After
+  public void cleanupDFS() throws IOException {
+    fs.delete(new Path(USER_DIR), true);
   }
   
 }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java Mon Oct 22 20:43:16 2012
@@ -291,4 +291,39 @@ public class TestFileAppend4 {
       cluster.shutdown();
     }
   }
+
+  /**
+   * Test the updation of NeededReplications for the Appended Block
+   */
+  @Test(timeout = 60000)
+  public void testUpdateNeededReplicationsForAppendedFile() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+        .build();
+    DistributedFileSystem fileSystem = null;
+    try {
+      // create a file.
+      fileSystem = cluster.getFileSystem();
+      Path f = new Path("/testAppend");
+      FSDataOutputStream create = fileSystem.create(f, (short) 2);
+      create.write("/testAppend".getBytes());
+      create.close();
+
+      // Append to the file.
+      FSDataOutputStream append = fileSystem.append(f);
+      append.write("/testAppend".getBytes());
+      append.close();
+
+      // Start a new datanode
+      cluster.startDataNodes(conf, 1, true, null, null);
+
+      // Check for replications
+      DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
+    } finally {
+      if (null != fileSystem) {
+        fileSystem.close();
+      }
+      cluster.shutdown();
+    }
+  }
 }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java Mon Oct 22 20:43:16 2012
@@ -20,45 +20,40 @@ package org.apache.hadoop.hdfs;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
-import org.apache.log4j.Level;
+import org.apache.hadoop.metrics2.util.Quantile;
+import org.apache.hadoop.metrics2.util.SampleQuantiles;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
 import org.junit.Test;
 
+import com.google.common.base.Stopwatch;
+
 /**
  * This class tests hflushing concurrently from many threads.
  */
 public class TestMultiThreadedHflush {
   static final int blockSize = 1024*1024;
-  static final int numBlocks = 10;
-  static final int fileSize = numBlocks * blockSize + 1;
 
   private static final int NUM_THREADS = 10;
   private static final int WRITE_SIZE = 517;
   private static final int NUM_WRITES_PER_THREAD = 1000;
   
   private byte[] toWrite = null;
-
-  {
-    ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
-  }
+  
+  private final SampleQuantiles quantiles = new SampleQuantiles(
+      new Quantile[] {
+        new Quantile(0.50, 0.050),
+        new Quantile(0.75, 0.025), new Quantile(0.90, 0.010),
+        new Quantile(0.95, 0.005), new Quantile(0.99, 0.001) });
 
   /*
    * creates a file but does not close it
@@ -104,8 +99,11 @@ public class TestMultiThreadedHflush {
     }
 
     private void doAWrite() throws IOException {
+      Stopwatch sw = new Stopwatch().start();
       stm.write(toWrite);
       stm.hflush();
+      long micros = sw.elapsedTime(TimeUnit.MICROSECONDS);
+      quantiles.insert(micros);
     }
   }
 
@@ -115,14 +113,28 @@ public class TestMultiThreadedHflush {
    * They all finish before the file is closed.
    */
   @Test
-  public void testMultipleHflushers() throws Exception {
+  public void testMultipleHflushersRepl1() throws Exception {
+    doTestMultipleHflushers(1);
+  }
+  
+  @Test
+  public void testMultipleHflushersRepl3() throws Exception {
+    doTestMultipleHflushers(3);
+  }
+  
+  private void doTestMultipleHflushers(int repl) throws Exception {
     Configuration conf = new Configuration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(repl)
+        .build();
 
     FileSystem fs = cluster.getFileSystem();
     Path p = new Path("/multiple-hflushers.dat");
     try {
-      doMultithreadedWrites(conf, p, NUM_THREADS, WRITE_SIZE, NUM_WRITES_PER_THREAD);
+      doMultithreadedWrites(conf, p, NUM_THREADS, WRITE_SIZE,
+          NUM_WRITES_PER_THREAD, repl);
+      System.out.println("Latency quantiles (in microseconds):\n" +
+          quantiles);
     } finally {
       fs.close();
       cluster.shutdown();
@@ -200,13 +212,13 @@ public class TestMultiThreadedHflush {
   }
 
   public void doMultithreadedWrites(
-    Configuration conf, Path p, int numThreads, int bufferSize, int numWrites)
-    throws Exception {
+      Configuration conf, Path p, int numThreads, int bufferSize, int numWrites,
+      int replication) throws Exception {
     initBuffer(bufferSize);
 
     // create a new file.
     FileSystem fs = p.getFileSystem(conf);
-    FSDataOutputStream stm = createFile(fs, p, 1);
+    FSDataOutputStream stm = createFile(fs, p, replication);
     System.out.println("Created file simpleFlush.dat");
 
     // There have been a couple issues with flushing empty buffers, so do
@@ -240,20 +252,41 @@ public class TestMultiThreadedHflush {
   }
 
   public static void main(String args[]) throws Exception {
-    if (args.length != 1) {
-      System.err.println(
-        "usage: " + TestMultiThreadedHflush.class.getSimpleName() +
-        " <path to test file> ");
-      System.exit(1);
+    System.exit(ToolRunner.run(new CLIBenchmark(), args));
+  }
+  
+  private static class CLIBenchmark extends Configured implements Tool {
+    public int run(String args[]) throws Exception {
+      if (args.length != 1) {
+        System.err.println(
+          "usage: " + TestMultiThreadedHflush.class.getSimpleName() +
+          " <path to test file> ");
+        System.err.println(
+            "Configurations settable by -D options:\n" +
+            "  num.threads [default 10] - how many threads to run\n" +
+            "  write.size [default 511] - bytes per write\n" +
+            "  num.writes [default 50000] - how many writes to perform");
+        System.exit(1);
+      }
+      TestMultiThreadedHflush test = new TestMultiThreadedHflush();
+      Configuration conf = getConf();
+      Path p = new Path(args[0]);
+      
+      int numThreads = conf.getInt("num.threads", 10);
+      int writeSize = conf.getInt("write.size", 511);
+      int numWrites = conf.getInt("num.writes", 50000);
+      int replication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
+          DFSConfigKeys.DFS_REPLICATION_DEFAULT);
+      
+      Stopwatch sw = new Stopwatch().start();
+      test.doMultithreadedWrites(conf, p, numThreads, writeSize, numWrites,
+          replication);
+      sw.stop();
+  
+      System.out.println("Finished in " + sw.elapsedMillis() + "ms");
+      System.out.println("Latency quantiles (in microseconds):\n" +
+          test.quantiles);
+      return 0;
     }
-    TestMultiThreadedHflush test = new TestMultiThreadedHflush();
-    Configuration conf = new Configuration();
-    Path p = new Path(args[0]);
-    long st = System.nanoTime();
-    test.doMultithreadedWrites(conf, p, 10, 511, 50000);
-    long et = System.nanoTime();
-
-    System.out.println("Finished in " + ((et - st) / 1000000) + "ms");
   }
-
 }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Mon Oct 22 20:43:16 2012
@@ -372,7 +372,7 @@ public class TestBalancer {
    * Test parse method in Balancer#Cli class with threshold value out of
    * boundaries.
    */
-  @Test
+  @Test(timeout=100000)
   public void testBalancerCliParseWithThresholdOutOfBoundaries() {
     String parameters[] = new String[] { "-threshold", "0" };
     String reason = "IllegalArgumentException is expected when threshold value"
@@ -394,7 +394,7 @@ public class TestBalancer {
   
   /** Test a cluster with even distribution, 
    * then a new empty node is added to the cluster*/
-  @Test
+  @Test(timeout=100000)
   public void testBalancer0() throws Exception {
     testBalancer0Internal(new HdfsConfiguration());
   }
@@ -406,7 +406,7 @@ public class TestBalancer {
   }
 
   /** Test unevenly distributed cluster */
-  @Test
+  @Test(timeout=100000)
   public void testBalancer1() throws Exception {
     testBalancer1Internal(new HdfsConfiguration());
   }
@@ -419,7 +419,7 @@ public class TestBalancer {
         new String[] {RACK0, RACK1});
   }
   
-  @Test
+  @Test(timeout=100000)
   public void testBalancer2() throws Exception {
     testBalancer2Internal(new HdfsConfiguration());
   }
@@ -467,8 +467,7 @@ public class TestBalancer {
   /**
    * Test parse method in Balancer#Cli class with wrong number of params
    */
-
-  @Test
+  @Test(timeout=100000)
   public void testBalancerCliParseWithWrongParams() {
     String parameters[] = new String[] { "-threshold" };
     String reason =

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java Mon Oct 22 20:43:16 2012
@@ -191,4 +191,12 @@ public class BlockManagerTestUtil {
         "Must use default policy, got %s", bpp.getClass());
     ((BlockPlacementPolicyDefault)bpp).setPreferLocalNode(prefer);
   }
+  
+  /**
+   * Call heartbeat check function of HeartbeatManager
+   * @param bm the BlockManager to manipulate
+   */
+  public static void checkHeartbeat(BlockManager bm) {
+    bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
+  }
 }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java Mon Oct 22 20:43:16 2012
@@ -379,7 +379,7 @@ public class TestBlockManager {
   
   private BlockInfo addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) {
     BlockCollection bc = Mockito.mock(BlockCollection.class);
-    Mockito.doReturn((short)3).when(bc).getReplication();
+    Mockito.doReturn((short)3).when(bc).getBlockReplication();
     BlockInfo blockInfo = blockOnNodes(blockId, nodes);
 
     bm.blocksMap.addBlockCollection(blockInfo, bc);

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java Mon Oct 22 20:43:16 2012
@@ -20,14 +20,30 @@ package org.apache.hadoop.hdfs.server.bl
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.junit.Test;
 
 /**
- * This class tests the internals of PendingReplicationBlocks.java
+ * This class tests the internals of PendingReplicationBlocks.java,
+ * as well as how PendingReplicationBlocks acts in BlockManager
  */
 public class TestPendingReplication {
   final static int TIMEOUT = 3;     // 3 seconds
+  private static final int DFS_REPLICATION_INTERVAL = 1;
+  // Number of datanodes in the cluster
+  private static final int DATANODE_COUNT = 5;
 
   @Test
   public void testPendingReplication() {
@@ -40,7 +56,7 @@ public class TestPendingReplication {
     //
     for (int i = 0; i < 10; i++) {
       Block block = new Block(i, i, 0);
-      pendingReplications.add(block, i);
+      pendingReplications.increment(block, i);
     }
     assertEquals("Size of pendingReplications ",
                  10, pendingReplications.size());
@@ -50,15 +66,15 @@ public class TestPendingReplication {
     // remove one item and reinsert it
     //
     Block blk = new Block(8, 8, 0);
-    pendingReplications.remove(blk);             // removes one replica
+    pendingReplications.decrement(blk);             // removes one replica
     assertEquals("pendingReplications.getNumReplicas ",
                  7, pendingReplications.getNumReplicas(blk));
 
     for (int i = 0; i < 7; i++) {
-      pendingReplications.remove(blk);           // removes all replicas
+      pendingReplications.decrement(blk);           // removes all replicas
     }
     assertTrue(pendingReplications.size() == 9);
-    pendingReplications.add(blk, 8);
+    pendingReplications.increment(blk, 8);
     assertTrue(pendingReplications.size() == 10);
 
     //
@@ -86,7 +102,7 @@ public class TestPendingReplication {
 
     for (int i = 10; i < 15; i++) {
       Block block = new Block(i, i, 0);
-      pendingReplications.add(block, i);
+      pendingReplications.increment(block, i);
     }
     assertTrue(pendingReplications.size() == 15);
 
@@ -116,4 +132,70 @@ public class TestPendingReplication {
     }
     pendingReplications.stop();
   }
+  
+  /**
+   * Test if BlockManager can correctly remove corresponding pending records
+   * when a file is deleted
+   * 
+   * @throws Exception
+   */
+  @Test
+  public void testPendingAndInvalidate() throws Exception {
+    final Configuration CONF = new HdfsConfiguration();
+    CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
+    CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
+        DFS_REPLICATION_INTERVAL);
+    CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
+        DFS_REPLICATION_INTERVAL);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
+        DATANODE_COUNT).build();
+    cluster.waitActive();
+    
+    FSNamesystem namesystem = cluster.getNamesystem();
+    BlockManager bm = namesystem.getBlockManager();
+    DistributedFileSystem fs = cluster.getFileSystem();
+    try {
+      // 1. create a file
+      Path filePath = new Path("/tmp.txt");
+      DFSTestUtil.createFile(fs, filePath, 1024, (short) 3, 0L);
+      
+      // 2. disable the heartbeats
+      for (DataNode dn : cluster.getDataNodes()) {
+        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
+      }
+      
+      // 3. mark a couple of blocks as corrupt
+      LocatedBlock block = NameNodeAdapter.getBlockLocations(
+          cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
+      cluster.getNamesystem().writeLock();
+      try {
+        bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
+            "TEST");
+        bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[1],
+            "TEST");
+      } finally {
+        cluster.getNamesystem().writeUnlock();
+      }
+      BlockManagerTestUtil.computeAllPendingWork(bm);
+      BlockManagerTestUtil.updateState(bm);
+      assertEquals(bm.getPendingReplicationBlocksCount(), 1L);
+      assertEquals(bm.pendingReplications.getNumReplicas(block.getBlock()
+          .getLocalBlock()), 2);
+      
+      // 4. delete the file
+      fs.delete(filePath, true);
+      // retry at most 10 times, each time sleep for 1s. Note that 10s is much
+      // less than the default pending record timeout (5~10min)
+      int retries = 10; 
+      long pendingNum = bm.getPendingReplicationBlocksCount();
+      while (pendingNum != 0 && retries-- > 0) {
+        Thread.sleep(1000);  // let NN do the deletion
+        BlockManagerTestUtil.updateState(bm);
+        pendingNum = bm.getPendingReplicationBlocksCount();
+      }
+      assertEquals(pendingNum, 0L);
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java Mon Oct 22 20:43:16 2012
@@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.protocol.E
 import org.junit.Test;
 
 public class TestUnderReplicatedBlocks {
-  @Test
+  @Test(timeout=300000) // 5 min timeout
   public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {
     Configuration conf = new HdfsConfiguration();
     final short REPLICATION_FACTOR = 2;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java Mon Oct 22 20:43:16 2012
@@ -74,7 +74,7 @@ public class TestMultipleNNDataBlockScan
     }
   }
   
-  @Test
+  @Test(timeout=120000)
   public void testDataBlockScanner() throws IOException, InterruptedException {
     setUp();
     try {
@@ -97,7 +97,7 @@ public class TestMultipleNNDataBlockScan
     }
   }
   
-  @Test
+  @Test(timeout=120000)
   public void testBlockScannerAfterRefresh() throws IOException,
       InterruptedException {
     setUp();
@@ -149,7 +149,7 @@ public class TestMultipleNNDataBlockScan
     }
   }
   
-  @Test
+  @Test(timeout=120000)
   public void testBlockScannerAfterRestart() throws IOException,
       InterruptedException {
     setUp();
@@ -176,7 +176,7 @@ public class TestMultipleNNDataBlockScan
     }
   }
   
-  @Test
+  @Test(timeout=120000)
   public void test2NNBlockRescanInterval() throws IOException {
     ((Log4JLogger)BlockPoolSliceScanner.LOG).getLogger().setLevel(Level.ALL);
     Configuration conf = new HdfsConfiguration();
@@ -206,7 +206,7 @@ public class TestMultipleNNDataBlockScan
    * 
    * @throws Exception
    */
-  @Test
+  @Test(timeout=120000)
   public void testBlockRescanInterval() throws IOException {
     ((Log4JLogger)BlockPoolSliceScanner.LOG).getLogger().setLevel(Level.ALL);
     Configuration conf = new HdfsConfiguration();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java Mon Oct 22 20:43:16 2012
@@ -117,7 +117,7 @@ public class TestAuditLogs {
     int val = istream.read();
     istream.close();
     verifyAuditLogs(true);
-    assertTrue("failed to read from file", val > 0);
+    assertTrue("failed to read from file", val >= 0);
   }
 
   /** test that allowed stat puts proper entry in audit log */
@@ -168,7 +168,7 @@ public class TestAuditLogs {
     istream.close();
 
     verifyAuditLogsRepeat(true, 3);
-    assertTrue("failed to read from file", val > 0);
+    assertTrue("failed to read from file", val >= 0);
   }
 
   /** test that stat via webhdfs puts proper entry in audit log */

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Mon Oct 22 20:43:16 2012
@@ -48,7 +48,7 @@ public class TestINodeFile {
                                   FsPermission.getDefault()), null, replication,
                                   0L, 0L, preferredBlockSize);
     assertEquals("True has to be returned in this case", replication,
-                 inf.getReplication());
+                 inf.getBlockReplication());
   }
 
   /**

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java Mon Oct 22 20:43:16 2012
@@ -22,6 +22,7 @@ import static org.apache.hadoop.hdfs.ser
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
 
 import java.io.IOException;
+import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -196,6 +197,35 @@ public class TestNNStorageRetentionManag
     runTest(tc);
   }
   
+  @Test
+  public void testRetainExtraLogsLimitedSegments() throws IOException {
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY,
+        150);
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MAX_EXTRA_EDITS_SEGMENTS_RETAINED_KEY, 2);
+    TestCaseDescription tc = new TestCaseDescription();
+    tc.addRoot("/foo1", NameNodeDirType.IMAGE);
+    tc.addRoot("/foo2", NameNodeDirType.EDITS);
+    tc.addImage("/foo1/current/" + getImageFileName(100), true);
+    tc.addImage("/foo1/current/" + getImageFileName(200), true);
+    tc.addImage("/foo1/current/" + getImageFileName(300), false);
+    tc.addImage("/foo1/current/" + getImageFileName(400), false);
+
+    tc.addLog("/foo2/current/" + getFinalizedEditsFileName(1, 100), true);
+    // Without lowering the max segments to retain, we'd retain all segments
+    // going back to txid 150 (300 - 150).
+    tc.addLog("/foo2/current/" + getFinalizedEditsFileName(101, 175), true);
+    tc.addLog("/foo2/current/" + getFinalizedEditsFileName(176, 200), true);
+    tc.addLog("/foo2/current/" + getFinalizedEditsFileName(201, 225), true);
+    tc.addLog("/foo2/current/" + getFinalizedEditsFileName(226, 240), true);
+    // Only retain 2 extra segments. The 301-400 segment is considered required,
+    // not extra.
+    tc.addLog("/foo2/current/" + getFinalizedEditsFileName(241, 275), false);
+    tc.addLog("/foo2/current/" + getFinalizedEditsFileName(276, 300), false);
+    tc.addLog("/foo2/current/" + getFinalizedEditsFileName(301, 400), false);
+    tc.addLog("/foo2/current/" + getInProgressEditsFileName(401), false);
+    runTest(tc);
+  }
+  
   private void runTest(TestCaseDescription tc) throws IOException {
     StoragePurger mockPurger =
       Mockito.mock(NNStorageRetentionManager.StoragePurger.class);
@@ -287,8 +317,10 @@ public class TestNNStorageRetentionManag
       return mockStorageForDirs(sds.toArray(new StorageDirectory[0]));
     }
     
+    @SuppressWarnings("unchecked")
     public FSEditLog mockEditLog(StoragePurger purger) {
       final List<JournalManager> jms = Lists.newArrayList();
+      final JournalSet journalSet = new JournalSet(0);
       for (FakeRoot root : dirRoots.values()) {
         if (!root.type.isOfType(NameNodeDirType.EDITS)) continue;
         
@@ -297,6 +329,7 @@ public class TestNNStorageRetentionManag
             root.mockStorageDir(), null);
         fjm.purger = purger;
         jms.add(fjm);
+        journalSet.add(fjm, false);
       }
 
       FSEditLog mockLog = Mockito.mock(FSEditLog.class);
@@ -314,6 +347,18 @@ public class TestNNStorageRetentionManag
           return null;
         }
       }).when(mockLog).purgeLogsOlderThan(Mockito.anyLong());
+      
+      Mockito.doAnswer(new Answer<Void>() {
+        
+        @Override
+        public Void answer(InvocationOnMock invocation) throws Throwable {
+          Object[] args = invocation.getArguments();
+          journalSet.selectInputStreams((Collection<EditLogInputStream>)args[0],
+              (long)((Long)args[1]), (boolean)((Boolean)args[2]));
+          return null;
+        }
+      }).when(mockLog).selectInputStreams(Mockito.anyCollection(),
+          Mockito.anyLong(), Mockito.anyBoolean());
       return mockLog;
     }
   }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Mon Oct 22 20:43:16 2012
@@ -41,10 +41,14 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.MetricsAsserts;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Before;
@@ -77,7 +81,8 @@ public class TestNameNodeMetrics {
         DFS_REPLICATION_INTERVAL);
     CONF.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, 
         "" + PERCENTILES_INTERVAL);
-
+    // Enable stale DataNodes checking
+    CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
     ((Log4JLogger)LogFactory.getLog(MetricsAsserts.class))
       .getLogger().setLevel(Level.DEBUG);
   }
@@ -119,6 +124,40 @@ public class TestNameNodeMetrics {
     stm.close();
   }
   
+  /** Test metrics indicating the number of stale DataNodes */
+  @Test
+  public void testStaleNodes() throws Exception {
+    // Set two datanodes as stale
+    for (int i = 0; i < 2; i++) {
+      DataNode dn = cluster.getDataNodes().get(i);
+      DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
+      long staleInterval = CONF.getLong(
+          DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
+          DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);
+      cluster.getNameNode().getNamesystem().getBlockManager()
+          .getDatanodeManager().getDatanode(dn.getDatanodeId())
+          .setLastUpdate(Time.now() - staleInterval - 1);
+    }
+    // Let HeartbeatManager to check heartbeat
+    BlockManagerTestUtil.checkHeartbeat(cluster.getNameNode().getNamesystem()
+        .getBlockManager());
+    assertGauge("StaleDataNodes", 2, getMetrics(NS_METRICS));
+    
+    // Reset stale datanodes
+    for (int i = 0; i < 2; i++) {
+      DataNode dn = cluster.getDataNodes().get(i);
+      DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
+      cluster.getNameNode().getNamesystem().getBlockManager()
+          .getDatanodeManager().getDatanode(dn.getDatanodeId())
+          .setLastUpdate(Time.now());
+    }
+    
+    // Let HeartbeatManager to refresh
+    BlockManagerTestUtil.checkHeartbeat(cluster.getNameNode().getNamesystem()
+        .getBlockManager());
+    assertGauge("StaleDataNodes", 0, getMetrics(NS_METRICS));
+  }
+  
   /** Test metrics associated with addition of a file */
   @Test
   public void testFileAdd() throws Exception {