You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cm...@apache.org on 2013/08/13 23:19:59 UTC

svn commit: r1513658 - in /hadoop/common/branches/HDFS-4949/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/main/tomcat/ROOT/ hadoop-hdfs-nfs/ hadoop-hdfs/ hadoop-hdfs/src/main/bin/ hadoop-hdfs/src/main/docs/ hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main...

Author: cmccabe
Date: Tue Aug 13 21:19:53 2013
New Revision: 1513658

URL: http://svn.apache.org/r1513658
Log:
merge trunk into HDFS-4949 branch

Modified:
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ROOT/index.html   (props changed)
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/releasenotes.html   (props changed)
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/package.html   (props changed)
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/overview.html   (props changed)
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/overview.html   (props changed)
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props changed)
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/native/docs/libhdfs_footer.html   (props changed)
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html   (props changed)
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html   (props changed)
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/   (props changed)
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html   (props changed)
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
    hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java

Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1509426-1512447

Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ROOT/index.html
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml Tue Aug 13 21:19:53 2013
@@ -19,9 +19,9 @@ http://maven.apache.org/xsd/maven-4.0.0.
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-project-dist</artifactId>
+    <artifactId>hadoop-project</artifactId>
     <version>3.0.0-SNAPSHOT</version>
-    <relativePath>../../hadoop-project-dist</relativePath>
+    <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-hdfs-nfs</artifactId>

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Aug 13 21:19:53 2013
@@ -10,12 +10,6 @@ Trunk (Unreleased)
 
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
 
-    HDFS-4659 Support setting execution bit for regular files (Brandon Li via sanjay)
-
-    HDFS-4762 Provide HDFS based NFSv3 and Mountd implementation (brandonli)
-
-    HDFS-4962 Use enum for nfs constants (Nicholas SZE via jing9)
-
   IMPROVEMENTS
 
     HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common.
@@ -241,10 +235,8 @@ Trunk (Unreleased)
     HDFS-3934. duplicative dfs_hosts entries handled wrong. (Colin Patrick
     McCabe)
 
-    HDFS-4948. mvn site for hadoop-hdfs-nfs fails. (brandonli)
-
-    HDFS-5043. For HdfsFileStatus, set default value of childrenNum to -1
-    instead of 0 to avoid confusing applications. (brandonli)
+    HDFS-4366. Block Replication Policy Implementation May Skip Higher-Priority
+    Blocks for Lower-Priority Blocks (Derek Dagit via kihwal)
 
 Release 2.3.0 - UNRELEASED
 
@@ -279,15 +271,39 @@ Release 2.1.1-beta - UNRELEASED
 
   NEW FEATURES
 
+    HDFS-4962 Use enum for nfs constants (Nicholas SZE via jing9)
+
+    HDFS-5071 Change hdfs-nfs parent project to hadoop-project (brandonli)
+
+    HDFS-4763 Add script changes/utility for starting NFS gateway (brandonli)
+
   IMPROVEMENTS
 
     HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may
     contain additional properties.  (szetszwo)
 
+    HDFS-5061. Make FSNameSystem#auditLoggers an unmodifiable list. 
+    (Arpit Agarwal via suresh)
+
+    HDFS-4905. Add appendToFile command to "hdfs dfs". (Arpit Agarwal via
+    cnauroth)
+
+    HDFS-4926. Namenode webserver's page has a tooltip that is inconsistent 
+    with the datanode HTML link. (Vivek Ganesan via jing9)
+
+    HDFS-5047. Supress logging of full stack trace of quota and lease
+    exceptions. (Robert Parker via kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES
 
+    HDFS-5028. LeaseRenewer throws ConcurrentModificationException when timeout.
+    (zhaoyunjiong via szetszwo)
+
+    HDFS-5043. For HdfsFileStatus, set default value of childrenNum to -1
+    instead of 0 to avoid confusing applications. (brandonli)
+
 Release 2.1.0-beta - 2013-08-06
 
   INCOMPATIBLE CHANGES
@@ -339,6 +355,10 @@ Release 2.1.0-beta - 2013-08-06
     HDFS-3495. Update Balancer to support new NetworkTopology with NodeGroup.
     (Junping Du via szetszwo)
 
+    HDFS-4659 Support setting execution bit for regular files (Brandon Li via sanjay)
+
+    HDFS-4762 Provide HDFS based NFSv3 and Mountd implementation (brandonli)
+
     HDFS-4372. Track NameNode startup progress. (cnauroth)
 
     HDFS-4373. Add HTTP API for querying NameNode startup progress. (cnauroth)
@@ -719,6 +739,8 @@ Release 2.1.0-beta - 2013-08-06
     HDFS-4943. WebHdfsFileSystem does not work when original file path has
     encoded chars.  (Jerry He via szetszwo)
 
+    HDFS-4948. mvn site for hadoop-hdfs-nfs fails. (brandonli)
+
     HDFS-4887. TestNNThroughputBenchmark exits abruptly. (kihwal)
 
     HDFS-4980. Incorrect logging.properties file for hadoop-httpfs.

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs Tue Aug 13 21:19:53 2013
@@ -57,6 +57,8 @@ function print_usage(){
   echo "                       current directory contents with a snapshot"
   echo "  lsSnapshottableDir   list all snapshottable dirs owned by the current user"
   echo "						Use -help to see options"
+  echo "  portmap              run a portmap service"
+  echo "  nfs3                 run an NFS version 3 gateway"
   echo ""
   echo "Most commands print help when invoked w/o parameters."
 }
@@ -149,6 +151,10 @@ elif [ "$COMMAND" = "snapshotDiff" ] ; t
   CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
 elif [ "$COMMAND" = "lsSnapshottableDir" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
+elif [ "$COMMAND" = "portmap" ] ; then
+  CLASS=org.apache.hadoop.portmap.Portmap
+elif [ "$COMMAND" = "nfs3" ] ; then
+  CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
 else
   CLASS="$COMMAND"
 fi

Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/releasenotes.html
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1509426-1512447

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Tue Aug 13 21:19:53 2013
@@ -170,12 +170,11 @@ public class DistributedFileSystem exten
   }
 
   /**
-   * Checks that the passed URI belongs to this filesystem, resolves the path
-   * component against the current working directory if relative, and finally
-   * returns the absolute path component.
+   * Checks that the passed URI belongs to this filesystem and returns
+   * just the path component. Expects a URI with an absolute path.
    * 
-   * @param file URI to check and resolve
-   * @return resolved absolute path component of {file}
+   * @param file URI with absolute path
+   * @return path component of {file}
    * @throws IllegalArgumentException if URI does not belong to this DFS
    */
   private String getPathName(Path file) {
@@ -514,15 +513,10 @@ public class DistributedFileSystem exten
   @Override
   public boolean rename(Path src, Path dst) throws IOException {
     statistics.incrementWriteOps(1);
-    // Both Paths have to belong to this DFS
+
     final Path absSrc = fixRelativePart(src);
     final Path absDst = fixRelativePart(dst);
-    FileSystem srcFS = getFSofPath(absSrc, getConf());
-    FileSystem dstFS = getFSofPath(absDst, getConf());
-    if (!srcFS.getUri().equals(getUri()) ||
-        !dstFS.getUri().equals(getUri())) {
-      throw new IOException("Renames across FileSystems not supported");
-    }
+
     // Try the rename without resolving first
     try {
       return dfs.rename(getPathName(absSrc), getPathName(absDst));
@@ -539,7 +533,8 @@ public class DistributedFileSystem exten
         @Override
         public Boolean next(final FileSystem fs, final Path p)
             throws IOException {
-          return fs.rename(source, p);
+          // Should just throw an error in FileSystem#checkPath
+          return doCall(p);
         }
       }.resolve(this, absDst);
     }
@@ -553,15 +548,8 @@ public class DistributedFileSystem exten
   public void rename(Path src, Path dst, final Options.Rename... options)
       throws IOException {
     statistics.incrementWriteOps(1);
-    // Both Paths have to belong to this DFS
     final Path absSrc = fixRelativePart(src);
     final Path absDst = fixRelativePart(dst);
-    FileSystem srcFS = getFSofPath(absSrc, getConf());
-    FileSystem dstFS = getFSofPath(absDst, getConf());
-    if (!srcFS.getUri().equals(getUri()) ||
-        !dstFS.getUri().equals(getUri())) {
-      throw new IOException("Renames across FileSystems not supported");
-    }
     // Try the rename without resolving first
     try {
       dfs.rename(getPathName(absSrc), getPathName(absDst), options);
@@ -579,7 +567,7 @@ public class DistributedFileSystem exten
         @Override
         public Void next(final FileSystem fs, final Path p)
             throws IOException {
-          // Since we know it's this DFS for both, can just call doCall again
+          // Should just throw an error in FileSystem#checkPath
           return doCall(p);
         }
       }.resolve(this, absDst);

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java Tue Aug 13 21:19:53 2013
@@ -449,8 +449,8 @@ class LeaseRenewer {
           LOG.warn("Failed to renew lease for " + clientsString() + " for "
               + (elapsed/1000) + " seconds.  Aborting ...", ie);
           synchronized (this) {
-            for(DFSClient c : dfsclients) {
-              c.abort();
+            while (!dfsclients.isEmpty()) {
+              dfsclients.get(0).abort();
             }
           }
           break;

Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/package.html
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/overview.html
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Tue Aug 13 21:19:53 2013
@@ -1208,7 +1208,6 @@ public class BlockManager {
             // abandoned block or block reopened for append
             if(bc == null || bc instanceof MutableBlockCollection) {
               neededReplications.remove(block, priority); // remove from neededReplications
-              neededReplications.decrementReplicationIndex(priority);
               continue;
             }
 
@@ -1235,7 +1234,6 @@ public class BlockManager {
               if ( (pendingReplications.getNumReplicas(block) > 0) ||
                    (blockHasEnoughRacks(block)) ) {
                 neededReplications.remove(block, priority); // remove from neededReplications
-                neededReplications.decrementReplicationIndex(priority);
                 blockLog.info("BLOCK* Removing " + block
                     + " from neededReplications as it has enough replicas");
                 continue;
@@ -1295,7 +1293,6 @@ public class BlockManager {
           if(bc == null || bc instanceof MutableBlockCollection) {
             neededReplications.remove(block, priority); // remove from neededReplications
             rw.targets = null;
-            neededReplications.decrementReplicationIndex(priority);
             continue;
           }
           requiredReplication = bc.getBlockReplication();
@@ -1309,7 +1306,6 @@ public class BlockManager {
             if ( (pendingReplications.getNumReplicas(block) > 0) ||
                  (blockHasEnoughRacks(block)) ) {
               neededReplications.remove(block, priority); // remove from neededReplications
-              neededReplications.decrementReplicationIndex(priority);
               rw.targets = null;
               blockLog.info("BLOCK* Removing " + block
                   + " from neededReplications as it has enough replicas");
@@ -1346,7 +1342,6 @@ public class BlockManager {
           // remove from neededReplications
           if(numEffectiveReplicas + targets.length >= requiredReplication) {
             neededReplications.remove(block, priority); // remove from neededReplications
-            neededReplications.decrementReplicationIndex(priority);
           }
         }
       }

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java Tue Aug 13 21:19:53 2013
@@ -18,11 +18,8 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Map;
-
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -82,16 +79,12 @@ class UnderReplicatedBlocks implements I
   static final int QUEUE_WITH_CORRUPT_BLOCKS = 4;
   /** the queues themselves */
   private List<LightWeightLinkedSet<Block>> priorityQueues
-      = new ArrayList<LightWeightLinkedSet<Block>>();
+      = new ArrayList<LightWeightLinkedSet<Block>>(LEVEL);
 
-  /** Stores the replication index for each priority */
-  private Map<Integer, Integer> priorityToReplIdx = new HashMap<Integer, Integer>(LEVEL);
-  
   /** Create an object. */
   UnderReplicatedBlocks() {
     for (int i = 0; i < LEVEL; i++) {
       priorityQueues.add(new LightWeightLinkedSet<Block>());
-      priorityToReplIdx.put(i, 0);
     }
   }
 
@@ -310,13 +303,15 @@ class UnderReplicatedBlocks implements I
   
   /**
    * Get a list of block lists to be replicated. The index of block lists
-   * represents its replication priority. Replication index will be tracked for
-   * each priority list separately in priorityToReplIdx map. Iterates through
-   * all priority lists and find the elements after replication index. Once the
-   * last priority lists reaches to end, all replication indexes will be set to
-   * 0 and start from 1st priority list to fulfill the blockToProces count.
-   * 
-   * @param blocksToProcess - number of blocks to fetch from underReplicated blocks.
+   * represents its replication priority. Iterates each block list in priority
+   * order beginning with the highest priority list. Iterators use a bookmark to
+   * resume where the previous iteration stopped. Returns when the block count
+   * is met or iteration reaches the end of the lowest priority list, in which
+   * case bookmarks for each block list are reset to the heads of their
+   * respective lists.
+   *
+   * @param blocksToProcess - number of blocks to fetch from underReplicated
+   *          blocks.
    * @return Return a list of block lists to be replicated. The block list index
    *         represents its replication priority.
    */
@@ -336,12 +331,8 @@ class UnderReplicatedBlocks implements I
     for (int priority = 0; priority < LEVEL; priority++) { 
       // Go through all blocks that need replications with current priority.
       BlockIterator neededReplicationsIterator = iterator(priority);
-      Integer replIndex = priorityToReplIdx.get(priority);
-      
-      // skip to the first unprocessed block, which is at replIndex
-      for (int i = 0; i < replIndex && neededReplicationsIterator.hasNext(); i++) {
-        neededReplicationsIterator.next();
-      }
+      // Set the iterator to the first unprocessed block at this priority level.
+      neededReplicationsIterator.setToBookmark();
 
       blocksToProcess = Math.min(blocksToProcess, size());
       
@@ -354,20 +345,18 @@ class UnderReplicatedBlocks implements I
           && neededReplicationsIterator.hasNext()) {
         Block block = neededReplicationsIterator.next();
         blocksToReplicate.get(priority).add(block);
-        replIndex++;
         blockCount++;
       }
       
       if (!neededReplicationsIterator.hasNext()
           && neededReplicationsIterator.getPriority() == LEVEL - 1) {
-        // reset all priorities replication index to 0 because there is no
-        // recently added blocks in any list.
+        // Reset all priorities' bookmarks to the beginning because there were
+        // no recently added blocks in any list.
         for (int i = 0; i < LEVEL; i++) {
-          priorityToReplIdx.put(i, 0);
+          this.priorityQueues.get(i).resetBookmark();
         }
         break;
       }
-      priorityToReplIdx.put(priority, replIndex); 
     }
     return blocksToReplicate;
   }
@@ -450,15 +439,19 @@ class UnderReplicatedBlocks implements I
     int getPriority() {
       return level;
     }
-  }
 
-  /**
-   * This method is to decrement the replication index for the given priority
-   * 
-   * @param priority  - int priority level
-   */
-  public void decrementReplicationIndex(int priority) {
-    Integer replIdx = priorityToReplIdx.get(priority);
-    priorityToReplIdx.put(priority, --replIdx); 
+    /**
+     * Sets iterator(s) to bookmarked elements.
+     */
+    private synchronized void setToBookmark() {
+      if (this.isIteratorForLevel) {
+        this.iterators.set(0, priorityQueues.get(this.level)
+            .getBookmark());
+      } else {
+        for(int i=0; i<LEVEL; i++) {
+          this.iterators.set(i, priorityQueues.get(i).getBookmark());
+        }
+      }
+    }
   }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Tue Aug 13 21:19:53 2013
@@ -774,7 +774,7 @@ public class FSNamesystem implements Nam
     if (auditLoggers.isEmpty()) {
       auditLoggers.add(new DefaultAuditLogger());
     }
-    return auditLoggers;
+    return Collections.unmodifiableList(auditLoggers);
   }
 
   void loadFSImage(StartupOption startOpt, FSImage fsImage, boolean haEnabled)

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Tue Aug 13 21:19:53 2013
@@ -70,6 +70,8 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
@@ -307,7 +309,10 @@ class NameNodeRpcServer implements Namen
         QuotaExceededException.class,
         RecoveryInProgressException.class,
         AccessControlException.class,
-        InvalidToken.class);
+        InvalidToken.class,
+        LeaseExpiredException.class,
+        NSQuotaExceededException.class,
+        DSQuotaExceededException.class);
  }
   
   /**

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Tue Aug 13 21:19:53 2013
@@ -709,10 +709,10 @@ class NamenodeJspHelper {
       int idx = (suffix != null && name.endsWith(suffix)) ? name
           .indexOf(suffix) : -1;
 
-      out.print(rowTxt() + "<td class=\"name\"><a title=\"" + d.getXferAddr()
+      out.print(rowTxt() + "<td class=\"name\"> <a title=\"" + url
           + "\" href=\"" + url + "\">"
           + ((idx > 0) ? name.substring(0, idx) : name) + "</a>"
-          + ((alive) ? "" : "\n"));
+          + ((alive) ? "" : "\n") + "<td class=\"address\">" + d.getXferAddr());
     }
 
     void generateDecommissioningNodeData(JspWriter out, DatanodeDescriptor d,
@@ -746,10 +746,10 @@ class NamenodeJspHelper {
       /*
        * Say the datanode is dn1.hadoop.apache.org with ip 192.168.0.5 we use:
        * 1) d.getHostName():d.getPort() to display. Domain and port are stripped
-       *    if they are common across the nodes. i.e. "dn1"
-       * 2) d.getHost():d.Port() for "title". i.e. "192.168.0.5:50010"
-       * 3) d.getHostName():d.getInfoPort() for url.
+       *    if they are common across the nodes. i.e. "dn1" 
+       * 2) d.getHostName():d.getInfoPort() for url and title.
        *    i.e. "http://dn1.hadoop.apache.org:50075/..."
+       * 3) d.getXferAddr() for "Transferring Address". i.e. "192.168.0.5:50010"
        * Note that "d.getHost():d.getPort()" is what DFS clients use to
        * interact with datanodes.
        */
@@ -880,7 +880,9 @@ class NamenodeJspHelper {
             }
 
             out.print("<tr class=\"headerRow\"> <th " + nodeHeaderStr("name")
-                + "> Node <th " + nodeHeaderStr("lastcontact")
+                + "> Node <th " + nodeHeaderStr("address")
+                + "> Transferring<br>Address <th "
+                + nodeHeaderStr("lastcontact")
                 + "> Last <br>Contact <th " + nodeHeaderStr("adminstate")
                 + "> Admin State <th " + nodeHeaderStr("capacity")
                 + "> Configured <br>Capacity (" + diskByteStr + ") <th "
@@ -896,8 +898,8 @@ class NamenodeJspHelper {
                 + nodeHeaderStr("bpused") + "> Block Pool<br>Used (" 
                 + diskByteStr + ") <th "
                 + nodeHeaderStr("pcbpused")
-                + "> Block Pool<br>Used (%)"
-                + "> Blocks <th " + nodeHeaderStr("volfails")
+                + "> Block Pool<br>Used (%)" + " <th "
+                + nodeHeaderStr("volfails")
                 +"> Failed Volumes\n");
 
             JspHelper.sortNodeList(live, sorterField, sorterOrder);
@@ -915,7 +917,9 @@ class NamenodeJspHelper {
           if (dead.size() > 0) {
             out.print("<table border=1 cellspacing=0> <tr id=\"row1\"> "
                 + "<th " + nodeHeaderStr("node")
-                + "> Node <th " + nodeHeaderStr("decommissioned")
+                + "> Node <th " + nodeHeaderStr("address")
+                + "> Transferring<br>Address <th "
+                + nodeHeaderStr("decommissioned")
                 + "> Decommissioned\n");
 
             JspHelper.sortNodeList(dead, sorterField, sorterOrder);
@@ -935,7 +939,9 @@ class NamenodeJspHelper {
           if (decommissioning.size() > 0) {
             out.print("<table border=1 cellspacing=0> <tr class=\"headRow\"> "
                 + "<th " + nodeHeaderStr("name") 
-                + "> Node <th " + nodeHeaderStr("lastcontact")
+                + "> Node <th " + nodeHeaderStr("address")
+                + "> Transferring<br>Address <th "
+                + nodeHeaderStr("lastcontact")
                 + "> Last <br>Contact <th "
                 + nodeHeaderStr("underreplicatedblocks")
                 + "> Under Replicated Blocks <th "

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java Tue Aug 13 21:19:53 2013
@@ -56,6 +56,8 @@ public class LightWeightLinkedSet<T> ext
   private DoubleLinkedElement<T> head;
   private DoubleLinkedElement<T> tail;
 
+  private LinkedSetIterator bookmark;
+
   /**
    * @param initCapacity
    *          Recommended size of the internal array.
@@ -69,6 +71,7 @@ public class LightWeightLinkedSet<T> ext
     super(initCapacity, maxLoadFactor, minLoadFactor);
     head = null;
     tail = null;
+    bookmark = new LinkedSetIterator();
   }
 
   public LightWeightLinkedSet() {
@@ -111,6 +114,12 @@ public class LightWeightLinkedSet<T> ext
     tail = le;
     if (head == null) {
       head = le;
+      bookmark.next = head;
+    }
+
+    // Update bookmark, if necessary.
+    if (bookmark.next == null) {
+      bookmark.next = le;
     }
     return true;
   }
@@ -141,6 +150,11 @@ public class LightWeightLinkedSet<T> ext
     if (tail == found) {
       tail = tail.before;
     }
+
+    // Update bookmark, if necessary.
+    if (found == this.bookmark.next) {
+      this.bookmark.next = found.after;
+    }
     return found;
   }
 
@@ -262,5 +276,25 @@ public class LightWeightLinkedSet<T> ext
     super.clear();
     this.head = null;
     this.tail = null;
+    this.resetBookmark();
+  }
+
+  /**
+   * Returns a new iterator starting at the bookmarked element.
+   *
+   * @return the iterator to the bookmarked element.
+   */
+  public Iterator<T> getBookmark() {
+    LinkedSetIterator toRet = new LinkedSetIterator();
+    toRet.next = this.bookmark.next;
+    this.bookmark = toRet;
+    return toRet;
+  }
+
+  /**
+   * Resets the bookmark to the beginning of the list.
+   */
+  public void resetBookmark() {
+    this.bookmark.next = this.head;
   }
 }
\ No newline at end of file

Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/overview.html
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1509426-1512447

Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/native/docs/libhdfs_footer.html
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1509426-1512447

Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1509426-1512447

Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css Tue Aug 13 21:19:53 2013
@@ -47,7 +47,6 @@ div#dfsnodetable a#title {
 }
 
 div#dfsnodetable td, th {
-	border-bottom-style : none;
         padding-bottom : 4px;
         padding-top : 4px;       
 }
@@ -103,6 +102,7 @@ table.nodes td {
 div#dfsnodetable td, div#dfsnodetable th, div.dfstable td {
 	padding-left : 10px;
 	padding-right : 10px;
+	border:1px solid black;
 }
 
 td.perc_filled {

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java Tue Aug 13 21:19:53 2013
@@ -20,14 +20,18 @@ package org.apache.hadoop.fs;
 import static org.junit.Assert.*;
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.regex.Pattern;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.*;
 
+import com.google.common.base.Joiner;
+
 public class TestGlobPaths {
 
   static class RegexPathFilter implements PathFilter {
@@ -784,4 +788,265 @@ public class TestGlobPaths {
     fs.delete(new Path(USER_DIR), true);
   }
   
+  /**
+   * A glob test that can be run on either FileContext or FileSystem.
+   */
+  private static interface FSTestWrapperGlobTest {
+    void run(FSTestWrapper wrap, FileSystem fs, FileContext fc)
+        throws Exception;
+  }
+
+  /**
+   * Run a glob test on FileSystem.
+   */
+  private static void testOnFileSystem(FSTestWrapperGlobTest test) throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    try {
+      FileSystem fs = FileSystem.get(conf);
+      test.run(new FileSystemTestWrapper(fs), fs, null);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Run a glob test on FileContext.
+   */
+  private static void testOnFileContext(FSTestWrapperGlobTest test) throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    try {
+      FileContext fc = FileContext.getFileContext(conf);
+      test.run(new FileContextTestWrapper(fc), null, fc);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+  
+  /**
+   * Accept all paths.
+   */
+  private static class AcceptAllPathFilter implements PathFilter {
+    @Override
+    public boolean accept(Path path) {
+      return true;
+    }
+  }
+
+  /**
+   * Accept only paths ending in Z.
+   */
+  private static class AcceptPathsEndingInZ implements PathFilter {
+    @Override
+    public boolean accept(Path path) {
+      String stringPath = path.toUri().getPath();
+      return stringPath.endsWith("z");
+    }
+  }
+
+  /**
+   * Test globbing through symlinks.
+   */
+  private static class TestGlobWithSymlinks implements FSTestWrapperGlobTest {
+    public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc)
+        throws Exception {
+      // Test that globbing through a symlink to a directory yields a path
+      // containing that symlink.
+      wrap.mkdir(new Path("/alpha"),
+          FsPermission.getDirDefault(), false);
+      wrap.createSymlink(new Path("/alpha"), new Path("/alphaLink"), false);
+      wrap.mkdir(new Path("/alphaLink/beta"),
+          FsPermission.getDirDefault(), false);
+      // Test simple glob
+      FileStatus[] statuses =
+          wrap.globStatus(new Path("/alpha/*"), new AcceptAllPathFilter());
+      Assert.assertEquals(1, statuses.length);
+      Assert.assertEquals("/alpha/beta",
+          statuses[0].getPath().toUri().getPath());
+      // Test glob through symlink
+      statuses =
+          wrap.globStatus(new Path("/alphaLink/*"), new AcceptAllPathFilter());
+      Assert.assertEquals(1, statuses.length);
+      Assert.assertEquals("/alphaLink/beta",
+          statuses[0].getPath().toUri().getPath());
+      // If the terminal path component in a globbed path is a symlink,
+      // we don't dereference that link.
+      wrap.createSymlink(new Path("beta"), new Path("/alphaLink/betaLink"),
+          false);
+      statuses = wrap.globStatus(new Path("/alpha/betaLi*"),
+          new AcceptAllPathFilter());
+      Assert.assertEquals(1, statuses.length);
+      Assert.assertEquals("/alpha/betaLink",
+          statuses[0].getPath().toUri().getPath());
+      // todo: test symlink-to-symlink-to-dir, etc.
+    }
+  }
+
+  @Test
+  public void testGlobWithSymlinksOnFS() throws Exception {
+    testOnFileSystem(new TestGlobWithSymlinks());
+  }
+
+  @Test
+  public void testGlobWithSymlinksOnFC() throws Exception {
+    testOnFileContext(new TestGlobWithSymlinks());
+  }
+
+  /**
+   * Test globbing symlinks to symlinks.
+   *
+   * Also test globbing dangling symlinks.  It should NOT throw any exceptions!
+   */
+  private static class TestGlobWithSymlinksToSymlinks
+      implements FSTestWrapperGlobTest {
+    public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc)
+        throws Exception {
+      // Test that globbing through a symlink to a symlink to a directory
+      // fully resolves
+      wrap.mkdir(new Path("/alpha"), FsPermission.getDirDefault(), false);
+      wrap.createSymlink(new Path("/alpha"), new Path("/alphaLink"), false);
+      wrap.createSymlink(new Path("/alphaLink"),
+          new Path("/alphaLinkLink"), false);
+      wrap.mkdir(new Path("/alpha/beta"), FsPermission.getDirDefault(), false);
+      // Test glob through symlink to a symlink to a directory
+      FileStatus statuses[] =
+          wrap.globStatus(new Path("/alphaLinkLink"), new AcceptAllPathFilter());
+      Assert.assertEquals(1, statuses.length);
+      Assert.assertEquals("/alphaLinkLink",
+          statuses[0].getPath().toUri().getPath());
+      statuses =
+          wrap.globStatus(new Path("/alphaLinkLink/*"), new AcceptAllPathFilter());
+      Assert.assertEquals(1, statuses.length);
+      Assert.assertEquals("/alphaLinkLink/beta",
+          statuses[0].getPath().toUri().getPath());
+      // Test glob of dangling symlink (theta does not actually exist)
+      wrap.createSymlink(new Path("theta"), new Path("/alpha/kappa"), false);
+      statuses = wrap.globStatus(new Path("/alpha/kappa/kappa"),
+              new AcceptAllPathFilter());
+      Assert.assertNull(statuses);
+      // Test glob of symlinks
+      wrap.createFile("/alpha/beta/gamma");
+      wrap.createSymlink(new Path("gamma"),
+          new Path("/alpha/beta/gammaLink"), false);
+      wrap.createSymlink(new Path("gammaLink"),
+          new Path("/alpha/beta/gammaLinkLink"), false);
+      wrap.createSymlink(new Path("gammaLinkLink"),
+          new Path("/alpha/beta/gammaLinkLinkLink"), false);
+      statuses = wrap.globStatus(new Path("/alpha/*/gammaLinkLinkLink"),
+              new AcceptAllPathFilter());
+      Assert.assertEquals(1, statuses.length);
+      Assert.assertEquals("/alpha/beta/gammaLinkLinkLink",
+          statuses[0].getPath().toUri().getPath());
+      statuses = wrap.globStatus(new Path("/alpha/beta/*"),
+              new AcceptAllPathFilter());
+      Assert.assertEquals("/alpha/beta/gamma;/alpha/beta/gammaLink;" +
+          "/alpha/beta/gammaLinkLink;/alpha/beta/gammaLinkLinkLink",
+          TestPath.mergeStatuses(statuses));
+      // Let's create two symlinks that point to each other, and glob on them.
+      wrap.createSymlink(new Path("tweedledee"),
+          new Path("/tweedledum"), false);
+      wrap.createSymlink(new Path("tweedledum"),
+          new Path("/tweedledee"), false);
+      statuses = wrap.globStatus(new Path("/tweedledee/unobtainium"),
+              new AcceptAllPathFilter());
+      Assert.assertNull(statuses);
+    }
+  }
+
+  @Test
+  public void testGlobWithSymlinksToSymlinksOnFS() throws Exception {
+    testOnFileSystem(new TestGlobWithSymlinksToSymlinks());
+  }
+
+  @Test
+  public void testGlobWithSymlinksToSymlinksOnFC() throws Exception {
+    testOnFileContext(new TestGlobWithSymlinksToSymlinks());
+  }
+
+  /**
+   * Test globbing symlinks with a custom PathFilter
+   */
+  private static class TestGlobSymlinksWithCustomPathFilter
+      implements FSTestWrapperGlobTest {
+    public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc)
+        throws Exception {
+      // Test that globbing through a symlink to a symlink to a directory
+      // fully resolves
+      wrap.mkdir(new Path("/alpha"), FsPermission.getDirDefault(), false);
+      wrap.createSymlink(new Path("/alpha"), new Path("/alphaLinkz"), false);
+      wrap.mkdir(new Path("/alpha/beta"), FsPermission.getDirDefault(), false);
+      wrap.mkdir(new Path("/alpha/betaz"), FsPermission.getDirDefault(), false);
+      // Test glob through symlink to a symlink to a directory, with a PathFilter
+      FileStatus statuses[] =
+          wrap.globStatus(new Path("/alpha/beta"), new AcceptPathsEndingInZ());
+      Assert.assertNull(statuses);
+      statuses =
+          wrap.globStatus(new Path("/alphaLinkz/betaz"), new AcceptPathsEndingInZ());
+      Assert.assertEquals(1, statuses.length);
+      Assert.assertEquals("/alphaLinkz/betaz",
+          statuses[0].getPath().toUri().getPath());
+      statuses =
+          wrap.globStatus(new Path("/*/*"), new AcceptPathsEndingInZ());
+      Assert.assertEquals("/alpha/betaz;/alphaLinkz/betaz",
+          TestPath.mergeStatuses(statuses));
+      statuses =
+          wrap.globStatus(new Path("/*/*"), new AcceptAllPathFilter());
+      Assert.assertEquals("/alpha/beta;/alpha/betaz;" +
+          "/alphaLinkz/beta;/alphaLinkz/betaz",
+          TestPath.mergeStatuses(statuses));
+    }
+  }
+
+  @Test
+  public void testGlobSymlinksWithCustomPathFilterOnFS() throws Exception {
+    testOnFileSystem(new TestGlobSymlinksWithCustomPathFilter());
+  }
+
+  @Test
+  public void testGlobSymlinksWithCustomPathFilterOnFC() throws Exception {
+    testOnFileContext(new TestGlobSymlinksWithCustomPathFilter());
+  }
+
+  /**
+   * Test that globStatus fills in the scheme even when it is not provided.
+   */
+  private static class TestGlobFillsInScheme
+      implements FSTestWrapperGlobTest {
+    public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc) 
+        throws Exception {
+      // Verify that the default scheme is hdfs, when we don't supply one.
+      wrap.mkdir(new Path("/alpha"), FsPermission.getDirDefault(), false);
+      wrap.createSymlink(new Path("/alpha"), new Path("/alphaLink"), false);
+      FileStatus statuses[] =
+          wrap.globStatus(new Path("/alphaLink"), new AcceptAllPathFilter());
+      Assert.assertEquals(1, statuses.length);
+      Path path = statuses[0].getPath();
+      Assert.assertEquals("/alphaLink", path.toUri().getPath());
+      Assert.assertEquals("hdfs", path.toUri().getScheme());
+      if (fc != null) {
+        // If we're using FileContext, then we can list a file:/// URI.
+        // Since everyone should have the root directory, we list that.
+        statuses =
+            wrap.globStatus(new Path("file:///"), new AcceptAllPathFilter());
+        Assert.assertEquals(1, statuses.length);
+        Path filePath = statuses[0].getPath();
+        Assert.assertEquals("file", filePath.toUri().getScheme());
+        Assert.assertEquals("/", filePath.toUri().getPath());
+      } else {
+        // The FileSystem we passed in should have scheme 'hdfs'
+        Assert.assertEquals("hdfs", fs.getScheme());
+      }
+    }
+  }
+
+  @Test
+  public void testGlobFillsInSchemeOnFS() throws Exception {
+    testOnFileSystem(new TestGlobFillsInScheme());
+  }
+
+  @Test
+  public void testGlobFillsInSchemeOnFC() throws Exception {
+    testOnFileContext(new TestGlobFillsInScheme());
+  }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java Tue Aug 13 21:19:53 2013
@@ -24,8 +24,10 @@ import java.net.URISyntaxException;
 import javax.security.auth.login.LoginException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -69,7 +71,11 @@ public class TestViewFileSystemHdfs exte
     
     fHdfs = cluster.getFileSystem(0);
     fHdfs2 = cluster.getFileSystem(1);
-    
+    fHdfs.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
+        FsConstants.VIEWFS_URI.toString());
+    fHdfs2.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
+        FsConstants.VIEWFS_URI.toString());
+
     defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" + 
         UserGroupInformation.getCurrentUser().getShortUserName()));
     defaultWorkingDirectory2 = fHdfs2.makeQualified( new Path("/user/" + 

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Tue Aug 13 21:19:53 2013
@@ -17,17 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.io.PrintStream;
-import java.io.PrintWriter;
+import java.io.*;
 import java.security.Permission;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -42,10 +32,7 @@ import java.util.zip.GZIPOutputStream;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSInputChecker;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -63,6 +50,9 @@ import org.apache.hadoop.util.ToolRunner
 import org.junit.Test;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.not;
+import static org.junit.Assert.*;
 
 /**
  * This class tests commands from DFSShell.
@@ -101,6 +91,18 @@ public class TestDFSShell {
     return f;
   }
 
+  static File createLocalFileWithRandomData(int fileLength, File f)
+      throws IOException {
+    assertTrue(!f.exists());
+    f.createNewFile();
+    FileOutputStream out = new FileOutputStream(f.toString());
+    byte[] buffer = new byte[fileLength];
+    out.write(buffer);
+    out.flush();
+    out.close();
+    return f;
+  }
+
   static void show(String s) {
     System.out.println(Thread.currentThread().getStackTrace()[2] + " " + s);
   }
@@ -1748,6 +1750,85 @@ public class TestDFSShell {
     }
   }
 
+
+  @Test (timeout = 300000)
+  public void testAppendToFile() throws Exception {
+    final int inputFileLength = 1024 * 1024;
+    File testRoot = new File(TEST_ROOT_DIR, "testAppendtoFileDir");
+    testRoot.mkdirs();
+
+    File file1 = new File(testRoot, "file1");
+    File file2 = new File(testRoot, "file2");
+    createLocalFileWithRandomData(inputFileLength, file1);
+    createLocalFileWithRandomData(inputFileLength, file2);
+
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    cluster.waitActive();
+
+    try {
+      FileSystem dfs = cluster.getFileSystem();
+      assertTrue("Not a HDFS: " + dfs.getUri(),
+                 dfs instanceof DistributedFileSystem);
+
+      // Run appendToFile once, make sure that the target file is
+      // created and is of the right size.
+      Path remoteFile = new Path("/remoteFile");
+      FsShell shell = new FsShell();
+      shell.setConf(conf);
+      String[] argv = new String[] {
+          "-appendToFile", file1.toString(), file2.toString(), remoteFile.toString() };
+      int res = ToolRunner.run(shell, argv);
+      assertThat(res, is(0));
+      assertThat(dfs.getFileStatus(remoteFile).getLen(), is((long) inputFileLength * 2));
+
+      // Run the command once again and make sure that the target file
+      // size has been doubled.
+      res = ToolRunner.run(shell, argv);
+      assertThat(res, is(0));
+      assertThat(dfs.getFileStatus(remoteFile).getLen(), is((long) inputFileLength * 4));
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  @Test (timeout = 300000)
+  public void testAppendToFileBadArgs() throws Exception {
+    final int inputFileLength = 1024 * 1024;
+    File testRoot = new File(TEST_ROOT_DIR, "testAppendToFileBadArgsDir");
+    testRoot.mkdirs();
+
+    File file1 = new File(testRoot, "file1");
+    createLocalFileWithRandomData(inputFileLength, file1);
+
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    cluster.waitActive();
+
+    try {
+      FileSystem dfs = cluster.getFileSystem();
+      assertTrue("Not a HDFS: " + dfs.getUri(),
+                 dfs instanceof DistributedFileSystem);
+
+      // Run appendToFile with insufficient arguments.
+      FsShell shell = new FsShell();
+      shell.setConf(conf);
+      String[] argv = new String[] {
+          "-appendToFile", file1.toString() };
+      int res = ToolRunner.run(shell, argv);
+      assertThat(res, not(0));
+
+      // Mix stdin with other input files. Must fail.
+      Path remoteFile = new Path("/remoteFile");
+      argv = new String[] {
+          "-appendToFile", file1.toString(), "-", remoteFile.toString() };
+      res = ToolRunner.run(shell, argv);
+      assertThat(res, not(0));
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
   /**
    * Test that the server trash configuration is respected when
    * the client configuration is not set.

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Tue Aug 13 21:19:53 2013
@@ -21,8 +21,11 @@ import static org.junit.Assert.assertEqu
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
-
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 import java.io.File;
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -31,6 +34,7 @@ import java.util.Map;
 import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -40,9 +44,13 @@ import org.apache.hadoop.hdfs.LogVerific
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.util.Time;
@@ -1004,4 +1012,185 @@ public class TestReplicationPolicy {
     exception.expect(IllegalArgumentException.class);
     blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
   }
-}
+
+  @Test(timeout = 60000)
+  public void testUpdateDoesNotCauseSkippedReplication() {
+    UnderReplicatedBlocks underReplicatedBlocks = new UnderReplicatedBlocks();
+
+    Block block1 = new Block(random.nextLong());
+    Block block2 = new Block(random.nextLong());
+    Block block3 = new Block(random.nextLong());
+
+    // Adding QUEUE_VERY_UNDER_REPLICATED block
+    final int block1CurReplicas = 2;
+    final int block1ExpectedReplicas = 7;
+    underReplicatedBlocks.add(block1, block1CurReplicas, 0,
+        block1ExpectedReplicas);
+
+    // Adding QUEUE_VERY_UNDER_REPLICATED block
+    underReplicatedBlocks.add(block2, 2, 0, 7);
+
+    // Adding QUEUE_UNDER_REPLICATED block
+    underReplicatedBlocks.add(block3, 2, 0, 6);
+
+    List<List<Block>> chosenBlocks;
+
+    // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
+    // from QUEUE_VERY_UNDER_REPLICATED.
+    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    assertTheChosenBlocks(chosenBlocks, 0, 1, 0, 0, 0);
+
+    // Increasing the replications will move the block down a
+    // priority.  This simulates a replica being completed in between checks.
+    underReplicatedBlocks.update(block1, block1CurReplicas+1, 0,
+        block1ExpectedReplicas, 1, 0);
+
+    // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
+    // from QUEUE_VERY_UNDER_REPLICATED.
+    // This block was moved up a priority and should not be skipped over.
+    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    assertTheChosenBlocks(chosenBlocks, 0, 1, 0, 0, 0);
+
+    // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
+    // from QUEUE_UNDER_REPLICATED.
+    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    assertTheChosenBlocks(chosenBlocks, 0, 0, 1, 0, 0);
+  }
+
+  @Test(timeout = 60000)
+  public void testAddStoredBlockDoesNotCauseSkippedReplication()
+      throws IOException {
+    Namesystem mockNS = mock(Namesystem.class);
+    when(mockNS.isPopulatingReplQueues()).thenReturn(true);
+    when(mockNS.hasWriteLock()).thenReturn(true);
+    FSClusterStats mockStats = mock(FSClusterStats.class);
+    BlockManager bm =
+        new BlockManager(mockNS, mockStats, new HdfsConfiguration());
+    UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
+
+    Block block1 = new Block(random.nextLong());
+    Block block2 = new Block(random.nextLong());
+
+    // Adding QUEUE_UNDER_REPLICATED block
+    underReplicatedBlocks.add(block1, 0, 1, 1);
+
+    // Adding QUEUE_UNDER_REPLICATED block
+    underReplicatedBlocks.add(block2, 0, 1, 1);
+
+    List<List<Block>> chosenBlocks;
+
+    // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
+    // from QUEUE_VERY_UNDER_REPLICATED.
+    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
+
+    // Adding this block collection to the BlockManager, so that when we add the
+    // block under construction, the BlockManager will realize the expected
+    // replication has been achieved and remove it from the under-replicated
+    // queue.
+    BlockInfoUnderConstruction info = new BlockInfoUnderConstruction(block1, 1);
+    BlockCollection bc = mock(BlockCollection.class);
+    when(bc.getBlockReplication()).thenReturn((short)1);
+    bm.addBlockCollection(info, bc);
+
+    // Adding this block will increase its current replication, and that will
+    // remove it from the queue.
+    bm.addStoredBlockUnderConstruction(info,
+        TestReplicationPolicy.dataNodes[0], ReplicaState.FINALIZED);
+
+    // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
+    // from QUEUE_VERY_UNDER_REPLICATED.
+    // This block remains and should not be skipped over.
+    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
+  }
+
+  @Test(timeout = 60000)
+  public void
+      testConvertLastBlockToUnderConstructionDoesNotCauseSkippedReplication()
+          throws IOException {
+    Namesystem mockNS = mock(Namesystem.class);
+    when(mockNS.isPopulatingReplQueues()).thenReturn(true);
+    FSClusterStats mockStats = mock(FSClusterStats.class);
+    BlockManager bm =
+        new BlockManager(mockNS, mockStats, new HdfsConfiguration());
+    UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
+
+    Block block1 = new Block(random.nextLong());
+    Block block2 = new Block(random.nextLong());
+
+    // Adding QUEUE_UNDER_REPLICATED block
+    underReplicatedBlocks.add(block1, 0, 1, 1);
+
+    // Adding QUEUE_UNDER_REPLICATED block
+    underReplicatedBlocks.add(block2, 0, 1, 1);
+
+    List<List<Block>> chosenBlocks;
+
+    // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
+    // from QUEUE_VERY_UNDER_REPLICATED.
+    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
+
+    final BlockInfo info = new BlockInfo(block1, 1);
+    final MutableBlockCollection mbc = mock(MutableBlockCollection.class);
+    when(mbc.getLastBlock()).thenReturn(info);
+    when(mbc.getPreferredBlockSize()).thenReturn(block1.getNumBytes() + 1);
+    when(mbc.getBlockReplication()).thenReturn((short)1);
+    ContentSummary cs = mock(ContentSummary.class);
+    when(cs.getLength()).thenReturn((long)1);
+    when(mbc.computeContentSummary()).thenReturn(cs);
+    info.setBlockCollection(mbc);
+    bm.addBlockCollection(info, mbc);
+
+    DatanodeDescriptor[] dnAry = {dataNodes[0]};
+    final BlockInfoUnderConstruction ucBlock =
+        info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION,
+            dnAry);
+    when(mbc.setLastBlock((BlockInfo) any(), (DatanodeDescriptor[]) any()))
+    .thenReturn(ucBlock);
+
+    bm.convertLastBlockToUnderConstruction(mbc);
+
+    // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
+    // from QUEUE_VERY_UNDER_REPLICATED.
+    // This block remains and should not be skipped over.
+    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
+  }
+
+  @Test(timeout = 60000)
+  public void testupdateNeededReplicationsDoesNotCauseSkippedReplication()
+      throws IOException {
+    Namesystem mockNS = mock(Namesystem.class);
+    when(mockNS.isPopulatingReplQueues()).thenReturn(true);
+    FSClusterStats mockStats = mock(FSClusterStats.class);
+    BlockManager bm =
+        new BlockManager(mockNS, mockStats, new HdfsConfiguration());
+    UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
+
+    Block block1 = new Block(random.nextLong());
+    Block block2 = new Block(random.nextLong());
+
+    // Adding QUEUE_UNDER_REPLICATED block
+    underReplicatedBlocks.add(block1, 0, 1, 1);
+
+    // Adding QUEUE_UNDER_REPLICATED block
+    underReplicatedBlocks.add(block2, 0, 1, 1);
+
+    List<List<Block>> chosenBlocks;
+
+    // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
+    // from QUEUE_VERY_UNDER_REPLICATED.
+    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
+
+    bm.setReplication((short)0, (short)1, "", block1);
+
+    // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
+    // from QUEUE_VERY_UNDER_REPLICATED.
+    // This block remains and should not be skipped over.
+    chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
+    assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
+  }
+}
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java Tue Aug 13 21:19:53 2013
@@ -325,10 +325,19 @@ public class TestLightWeightLinkedSet {
     assertEquals(NUM, set.size());
     assertFalse(set.isEmpty());
 
+    // Advance the bookmark.
+    Iterator<Integer> bkmrkIt = set.getBookmark();
+    for (int i=0; i<set.size()/2+1; i++) {
+      bkmrkIt.next();
+    }
+    assertTrue(bkmrkIt.hasNext());
+
     // clear the set
     set.clear();
     assertEquals(0, set.size());
     assertTrue(set.isEmpty());
+    bkmrkIt = set.getBookmark();
+    assertFalse(bkmrkIt.hasNext());
 
     // poll should return an empty list
     assertEquals(0, set.pollAll().size());
@@ -363,4 +372,64 @@ public class TestLightWeightLinkedSet {
     LOG.info("Test capacity - DONE");
   }
 
+  @Test(timeout=60000)
+  public void testGetBookmarkReturnsBookmarkIterator() {
+    LOG.info("Test getBookmark returns proper iterator");
+    assertTrue(set.addAll(list));
+
+    Iterator<Integer> bookmark = set.getBookmark();
+    assertEquals(bookmark.next(), list.get(0));
+
+    final int numAdvance = list.size()/2;
+    for(int i=1; i<numAdvance; i++) {
+      bookmark.next();
+    }
+
+    Iterator<Integer> bookmark2 = set.getBookmark();
+    assertEquals(bookmark2.next(), list.get(numAdvance));
+  }
+
+  @Test(timeout=60000)
+  public void testBookmarkAdvancesOnRemoveOfSameElement() {
+    LOG.info("Test that the bookmark advances if we remove its element.");
+    assertTrue(set.add(list.get(0)));
+    assertTrue(set.add(list.get(1)));
+    assertTrue(set.add(list.get(2)));
+
+    Iterator<Integer> it = set.getBookmark();
+    assertEquals(it.next(), list.get(0));
+    set.remove(list.get(1));
+    it = set.getBookmark();
+    assertEquals(it.next(), list.get(2));
+  }
+
+  @Test(timeout=60000)
+  public void testBookmarkSetToHeadOnAddToEmpty() {
+    LOG.info("Test bookmark is set after adding to previously empty set.");
+    Iterator<Integer> it = set.getBookmark();
+    assertFalse(it.hasNext());
+    set.add(list.get(0));
+    set.add(list.get(1));
+
+    it = set.getBookmark();
+    assertTrue(it.hasNext());
+    assertEquals(it.next(), list.get(0));
+    assertEquals(it.next(), list.get(1));
+    assertFalse(it.hasNext());
+  }
+
+  @Test(timeout=60000)
+  public void testResetBookmarkPlacesBookmarkAtHead() {
+    set.addAll(list);
+    Iterator<Integer> it = set.getBookmark();
+    final int numAdvance = set.size()/2;
+    for (int i=0; i<numAdvance; i++) {
+      it.next();
+    }
+    assertEquals(it.next(), list.get(numAdvance));
+
+    set.resetBookmark();
+    it = set.getBookmark();
+    assertEquals(it.next(), list.get(0));
+  }
 }
\ No newline at end of file