You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by wa...@apache.org on 2013/10/17 04:14:36 UTC

svn commit: r1532952 [6/6] - in /hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs: ./ src/contrib/bkjournal/ src/main/bin/ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java...

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java?rev=1532952&r1=1532951&r2=1532952&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java Thu Oct 17 02:14:33 2013
@@ -17,44 +17,71 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-
-import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.*;
-import static org.mockito.Mockito.*;
+import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.LOADING_EDITS;
+import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.LOADING_FSIMAGE;
+import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.SAFEMODE;
+import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.SAVING_CHECKPOINT;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.atLeastOnce;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
+import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import javax.servlet.ServletContext;
 import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
 import javax.servlet.jsp.JspWriter;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
+import org.apache.hadoop.util.VersionInfo;
+import org.junit.AfterClass;
 import org.junit.Assert;
-import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.znerd.xmlenc.XMLOutputter;
 
-public class TestNameNodeJspHelper {
+import com.google.common.collect.ImmutableSet;
 
-  private MiniDFSCluster cluster = null;
-  Configuration conf = null;
+public class TestNameNodeJspHelper {
 
-  @Before
-  public void setUp() throws Exception {
+  private static final int DATA_NODES_AMOUNT = 2;
+  
+  private static MiniDFSCluster cluster;
+  private static Configuration conf;
+  private static final String NAMENODE_ATTRIBUTE_KEY = "name.node";  
+  
+  @BeforeClass
+  public static void setUp() throws Exception {
     conf = new HdfsConfiguration();
-    cluster  = new MiniDFSCluster.Builder(conf).build();
-    cluster.waitActive();
+    cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(DATA_NODES_AMOUNT).build();
+    cluster.waitClusterUp();
   }
 
-  @After
-  public void tearDown() throws Exception {
+  @AfterClass
+  public static void tearDown() throws Exception {
     if (cluster != null)
       cluster.shutdown();
   }
@@ -66,23 +93,23 @@ public class TestNameNodeJspHelper {
     UserGroupInformation ugi = UserGroupInformation.createRemoteUser("auser");
     String tokenString = NamenodeJspHelper.getDelegationToken(nn, request,
         conf, ugi);
-    //tokenString returned must be null because security is disabled
+    // tokenString returned must be null because security is disabled
     Assert.assertEquals(null, tokenString);
   }
-  
+
   @Test
-  public void  tesSecurityModeText() {
+  public void testSecurityModeText() {
     conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     UserGroupInformation.setConfiguration(conf);
     String securityOnOff = NamenodeJspHelper.getSecurityModeText();
-    Assert.assertTrue("security mode doesn't match. Should be ON", 
+    Assert.assertTrue("security mode doesn't match. Should be ON",
         securityOnOff.contains("ON"));
-    //Security is enabled
+    // Security is enabled
     conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "simple");
     UserGroupInformation.setConfiguration(conf);
-    
+
     securityOnOff = NamenodeJspHelper.getSecurityModeText();
-    Assert.assertTrue("security mode doesn't match. Should be OFF", 
+    Assert.assertTrue("security mode doesn't match. Should be OFF",
         securityOnOff.contains("OFF"));
   }
 
@@ -107,9 +134,83 @@ public class TestNameNodeJspHelper {
     Assert.assertTrue(containsMatch(contents, SAFEMODE.getDescription()));
   }
 
+  @Test
+  public void testGetRollingUpgradeText() {
+    Assert.assertEquals("", NamenodeJspHelper.getRollingUpgradeText(null));
+  }
+
+  /**
+   * Tests for non-null, non-empty NameNode label.
+   */
+  @Test
+  public void testGetNameNodeLabel() {
+    String nameNodeLabel = NamenodeJspHelper.getNameNodeLabel(
+      cluster.getNameNode());
+    Assert.assertNotNull(nameNodeLabel);
+    Assert.assertFalse(nameNodeLabel.isEmpty());
+  }
+
+  /**
+   * Tests for non-null, non-empty NameNode label when called before
+   * initialization of the NameNode RPC server.
+   */
+  @Test
+  public void testGetNameNodeLabelNullRpcServer() {
+    NameNode nn = mock(NameNode.class);
+    when(nn.getRpcServer()).thenReturn(null);
+    String nameNodeLabel = NamenodeJspHelper.getNameNodeLabel(
+      cluster.getNameNode());
+    Assert.assertNotNull(nameNodeLabel);
+    Assert.assertFalse(nameNodeLabel.isEmpty());
+  }
+
+  /**
+   * Tests that passing a null FSNamesystem to generateSnapshotReport does not
+   * throw NullPointerException.
+   */
+  @Test
+  public void testGenerateSnapshotReportNullNamesystem() throws Exception {
+    NamenodeJspHelper.generateSnapshotReport(mock(JspWriter.class), null);
+  }
+
+  /**
+   * Tests that redirectToRandomDataNode does not throw NullPointerException if
+   * it finds a null FSNamesystem.
+   */
+  @Test(expected=IOException.class)
+  public void testRedirectToRandomDataNodeNullNamesystem() throws Exception {
+    NameNode nn = mock(NameNode.class);
+    when(nn.getNamesystem()).thenReturn(null);
+    ServletContext context = mock(ServletContext.class);
+    when(context.getAttribute("name.node")).thenReturn(nn);
+    NamenodeJspHelper.redirectToRandomDataNode(context,
+      mock(HttpServletRequest.class), mock(HttpServletResponse.class));
+  }
+
+  /**
+   * Tests that XMLBlockInfo does not throw NullPointerException if it finds a
+   * null FSNamesystem.
+   */
+  @Test
+  public void testXMLBlockInfoNullNamesystem() throws IOException {
+    XMLOutputter doc = new XMLOutputter(mock(JspWriter.class), "UTF-8");
+    new NamenodeJspHelper.XMLBlockInfo(null, 1L).toXML(doc);
+  }
+
+  /**
+   * Tests that XMLCorruptBlockInfo does not throw NullPointerException if it
+   * finds a null FSNamesystem.
+   */
+  @Test
+  public void testXMLCorruptBlockInfoNullNamesystem() throws IOException {
+    XMLOutputter doc = new XMLOutputter(mock(JspWriter.class), "UTF-8");
+    new NamenodeJspHelper.XMLCorruptBlockInfo(null, mock(Configuration.class),
+      10, 1L).toXML(doc);
+  }
+
   /**
    * Checks if the list contains any string that partially matches the regex.
-   * 
+   *
    * @param list List<String> containing strings to check
    * @param regex String regex to check
    * @return boolean true if some string in list partially matches regex
@@ -123,4 +224,149 @@ public class TestNameNodeJspHelper {
     }
     return false;
   }
+
+  @Test(timeout = 15000)
+  public void testGetRandomDatanode() {
+    ImmutableSet<String> set = ImmutableSet.of();
+    NameNode nameNode = cluster.getNameNode();
+    ImmutableSet.Builder<String> builder = ImmutableSet.builder();
+    for (DataNode dataNode : cluster.getDataNodes()) {
+      builder.add(dataNode.getDisplayName());
+    }
+    set = builder.build();
+
+    for (int i = 0; i < 10; i++) {
+      DatanodeDescriptor dnDescriptor = NamenodeJspHelper
+          .getRandomDatanode(nameNode);
+      assertTrue("testGetRandomDatanode error",
+          set.contains(dnDescriptor.toString()));
+    }
+  }
+      
+  @Test(timeout = 15000)
+  public void testNamenodeJspHelperRedirectToRandomDataNode() throws IOException, InterruptedException {
+    final String urlPart = "browseDirectory.jsp?namenodeInfoPort=";                     
+    
+    ServletContext context = mock(ServletContext.class);
+    HttpServletRequest request = mock(HttpServletRequest.class);
+    HttpServletResponse resp = mock(HttpServletResponse.class);          
+    
+    when(request.getScheme()).thenReturn("http");
+    when(request.getParameter(UserParam.NAME)).thenReturn("localuser");
+    when(context.getAttribute(NAMENODE_ATTRIBUTE_KEY)).thenReturn(
+        cluster.getNameNode());
+    when(context.getAttribute(JspHelper.CURRENT_CONF)).thenReturn(conf);    
+    ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class);
+    doAnswer(new Answer<String>() {
+      @Override
+     public String answer(InvocationOnMock invocation) throws Throwable {
+        return null;
+        }
+    }).when(resp).sendRedirect(captor.capture());
+
+    NamenodeJspHelper.redirectToRandomDataNode(context, request, resp);    
+    assertTrue(captor.getValue().contains(urlPart));    
+  }
+  
+  private enum DataNodeStatus {
+    LIVE("[Live Datanodes(| +):(| +)]\\d"), 
+    DEAD("[Dead Datanodes(| +):(| +)]\\d");
+
+    private Pattern pattern;
+
+    public Pattern getPattern() {
+      return pattern;
+    }
+
+    DataNodeStatus(String line) {
+      this.pattern = Pattern.compile(line);
+    }
+  }
+
+  private void checkDeadLiveNodes(NameNode nameNode, int deadCount,
+      int lifeCount) {
+    FSNamesystem ns = nameNode.getNamesystem();
+    DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
+    List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+    List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+    dm.fetchDatanodes(live, dead, true);
+    assertTrue("checkDeadLiveNodes error !!!", (live.size() == lifeCount)
+        && dead.size() == deadCount);
+  }
+
+  @Test(timeout = 15000)
+  public void testNodeListJspGenerateNodesList() throws IOException {
+    String output;
+    NameNode nameNode = cluster.getNameNode();
+    ServletContext context = mock(ServletContext.class);
+    when(context.getAttribute("name.node")).thenReturn(nameNode);
+    when(context.getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY))
+        .thenReturn(cluster.getNameNode().getHttpAddress());    
+    checkDeadLiveNodes(nameNode, 0, DATA_NODES_AMOUNT);
+    output = getOutputFromGeneratedNodesList(context, DataNodeStatus.LIVE);
+    assertCounts(DataNodeStatus.LIVE, output, DATA_NODES_AMOUNT);
+    output = getOutputFromGeneratedNodesList(context, DataNodeStatus.DEAD);
+    assertCounts(DataNodeStatus.DEAD, output, 0);    
+  }
+
+  private void assertCounts(DataNodeStatus dataNodeStatus, String output,
+      int expectedCount) {
+    Matcher matcher = DataNodeStatus.LIVE.getPattern().matcher(output);
+    if (matcher.find()) {
+      String digitLine = output.substring(matcher.start(), matcher.end())
+          .trim();
+      assertTrue("assertCounts error. actual != expected",
+          Integer.valueOf(digitLine) == expectedCount);
+    } else {
+      fail("assertCount matcher error");
+    }
+  }
+
+  private String getOutputFromGeneratedNodesList(ServletContext context,
+      DataNodeStatus dnStatus) throws IOException {
+    JspWriter out = mock(JspWriter.class);
+    ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class);
+    NamenodeJspHelper.NodeListJsp nodelistjsp = new NamenodeJspHelper.NodeListJsp();
+    final StringBuffer buffer = new StringBuffer();
+    doAnswer(new Answer<String>() {
+      @Override
+      public String answer(InvocationOnMock invok) {
+        Object[] args = invok.getArguments();
+        buffer.append((String) args[0]);
+        return null;
+      }
+    }).when(out).print(captor.capture());
+    HttpServletRequest request = mock(HttpServletRequest.class);
+    when(request.getScheme()).thenReturn("http");
+    when(request.getParameter("whatNodes")).thenReturn(dnStatus.name());
+    nodelistjsp.generateNodesList(context, out, request);
+    return buffer.toString();
+  }
+
+  @Test(timeout = 15000)
+  public void testGetInodeLimitText() {
+    NameNode nameNode = cluster.getNameNode();
+    FSNamesystem fsn = nameNode.getNamesystem();
+    ImmutableSet<String> patterns = 
+        ImmutableSet.of("files and directories", "Heap Memory used", "Non Heap Memory used");        
+    String line = NamenodeJspHelper.getInodeLimitText(fsn);    
+    for(String pattern: patterns) {
+      assertTrue("testInodeLimitText error " + pattern,
+          line.contains(pattern));
+    }    
+  }
+  
+  @Test(timeout = 15000)
+  public void testGetVersionTable() {
+    NameNode nameNode = cluster.getNameNode();
+    FSNamesystem fsn = nameNode.getNamesystem();
+    ImmutableSet<String> patterns = ImmutableSet.of(VersionInfo.getVersion(), 
+        VersionInfo.getRevision(), VersionInfo.getUser(), VersionInfo.getBranch(),
+        fsn.getClusterId(), fsn.getBlockPoolId());
+    String line = NamenodeJspHelper.getVersionTable(fsn);
+    for(String pattern: patterns) {
+       assertTrue("testGetVersionTable error " + pattern,
+           line.contains(pattern));
+    }
+  }  
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java?rev=1532952&r1=1532951&r2=1532952&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java Thu Oct 17 02:14:33 2013
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.na
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -35,11 +36,15 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.ipc.ClientId;
@@ -47,7 +52,9 @@ import org.apache.hadoop.ipc.RPC.RpcKind
 import org.apache.hadoop.ipc.RetryCache.CacheEntry;
 import org.apache.hadoop.ipc.RpcConstants;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.LightWeightCache;
 import org.junit.After;
 import org.junit.Assert;
@@ -75,12 +82,13 @@ public class TestNamenodeRetryCache {
       "TestNamenodeRetryCache", null, FsPermission.getDefault());
   private static DistributedFileSystem filesystem;
   private static int callId = 100;
-  private static Configuration conf = new HdfsConfiguration();
+  private static Configuration conf;
   private static final int BlockSize = 512;
   
   /** Start a cluster */
   @Before
   public void setup() throws Exception {
+    conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf).build();
@@ -294,6 +302,40 @@ public class TestNamenodeRetryCache {
   }
   
   /**
+   * Make sure a retry call does not hang because of the exception thrown in the
+   * first call.
+   */
+  @Test(timeout = 60000)
+  public void testUpdatePipelineWithFailOver() throws Exception {
+    cluster.shutdown();
+    namesystem = null;
+    filesystem = null;
+    cluster = new MiniDFSCluster.Builder(conf).nnTopology(
+        MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
+    FSNamesystem ns0 = cluster.getNamesystem(0);
+    ExtendedBlock oldBlock = new ExtendedBlock();
+    ExtendedBlock newBlock = new ExtendedBlock();
+    DatanodeID[] newNodes = new DatanodeID[2];
+    
+    newCall();
+    try {
+      ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes);
+      fail("Expect StandbyException from the updatePipeline call");
+    } catch (StandbyException e) {
+      // expected, since in the beginning both nn are in standby state
+      GenericTestUtils.assertExceptionContains(
+          HAServiceState.STANDBY.toString(), e);
+    }
+    
+    cluster.transitionToActive(0);
+    try {
+      ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes);
+    } catch (IOException e) {
+      // ignore call should not hang.
+    }
+  }
+  
+  /**
    * Test for crateSnapshot
    */
   @Test

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java?rev=1532952&r1=1532951&r2=1532952&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java Thu Oct 17 02:14:33 2013
@@ -73,24 +73,28 @@ public class TestStartupProgressServlet 
       .put("phases", Arrays.<Object>asList(
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingFsImage")
+          .put("desc", "Loading fsimage")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingEdits")
+          .put("desc", "Loading edits")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SavingCheckpoint")
+          .put("desc", "Saving checkpoint")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SafeMode")
+          .put("desc", "Safe mode")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
@@ -111,11 +115,13 @@ public class TestStartupProgressServlet 
       .put("phases", Arrays.<Object>asList(
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingFsImage")
+          .put("desc", "Loading fsimage")
           .put("status", "COMPLETE")
           .put("percentComplete", 1.0f)
           .put("steps", Collections.<Object>singletonList(
             ImmutableMap.<String, Object>builder()
               .put("name", "Inodes")
+              .put("desc", "inodes")
               .put("count", 100L)
               .put("total", 100L)
               .put("percentComplete", 1.0f)
@@ -124,6 +130,7 @@ public class TestStartupProgressServlet 
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingEdits")
+          .put("desc", "Loading edits")
           .put("status", "RUNNING")
           .put("percentComplete", 0.5f)
           .put("steps", Collections.<Object>singletonList(
@@ -138,12 +145,14 @@ public class TestStartupProgressServlet 
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SavingCheckpoint")
+          .put("desc", "Saving checkpoint")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SafeMode")
+          .put("desc", "Safe mode")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
@@ -164,11 +173,13 @@ public class TestStartupProgressServlet 
       .put("phases", Arrays.<Object>asList(
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingFsImage")
+          .put("desc", "Loading fsimage")
           .put("status", "COMPLETE")
           .put("percentComplete", 1.0f)
           .put("steps", Collections.<Object>singletonList(
             ImmutableMap.<String, Object>builder()
               .put("name", "Inodes")
+              .put("desc", "inodes")
               .put("count", 100L)
               .put("total", 100L)
               .put("percentComplete", 1.0f)
@@ -177,6 +188,7 @@ public class TestStartupProgressServlet 
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingEdits")
+          .put("desc", "Loading edits")
           .put("status", "COMPLETE")
           .put("percentComplete", 1.0f)
           .put("steps", Collections.<Object>singletonList(
@@ -191,11 +203,13 @@ public class TestStartupProgressServlet 
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SavingCheckpoint")
+          .put("desc", "Saving checkpoint")
           .put("status", "COMPLETE")
           .put("percentComplete", 1.0f)
           .put("steps", Collections.<Object>singletonList(
             ImmutableMap.<String, Object>builder()
               .put("name", "Inodes")
+              .put("desc", "inodes")
               .put("count", 300L)
               .put("total", 300L)
               .put("percentComplete", 1.0f)
@@ -204,11 +218,13 @@ public class TestStartupProgressServlet 
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SafeMode")
+          .put("desc", "Safe mode")
           .put("status", "COMPLETE")
           .put("percentComplete", 1.0f)
           .put("steps", Collections.<Object>singletonList(
             ImmutableMap.<String, Object>builder()
               .put("name", "AwaitingReportedBlocks")
+              .put("desc", "awaiting reported blocks")
               .put("count", 400L)
               .put("total", 400L)
               .put("percentComplete", 1.0f)

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java?rev=1532952&r1=1532951&r2=1532952&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java Thu Oct 17 02:14:33 2013
@@ -96,8 +96,8 @@ public class TestDNFencing {
     // Increase max streams so that we re-replicate quickly.
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 1000);
     // See RandomDeleterPolicy javadoc.
-    conf.setClass("dfs.block.replicator.classname", RandomDeleterPolicy.class,
-        BlockPlacementPolicy.class); 
+    conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
+        RandomDeleterPolicy.class, BlockPlacementPolicy.class); 
     conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
     cluster = new MiniDFSCluster.Builder(conf)
       .nnTopology(MiniDFSNNTopology.simpleHATopology())

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java?rev=1532952&r1=1532951&r2=1532952&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java Thu Oct 17 02:14:33 2013
@@ -39,6 +39,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.AbstractFileSystem;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HAUtil;
@@ -47,19 +48,22 @@ import org.apache.hadoop.hdfs.MiniDFSNNT
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RetriableException;
+import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtilTestHelper;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
 
 import com.google.common.base.Joiner;
 
@@ -78,8 +82,12 @@ public class TestDelegationTokensWithHA 
   private static DelegationTokenSecretManager dtSecretManager;
   private static DistributedFileSystem dfs;
 
-  @BeforeClass
-  public static void setupCluster() throws Exception {
+  private volatile boolean catchup = false;
+  
+  @Before
+  public void setupCluster() throws Exception {
+    SecurityUtilTestHelper.setTokenServiceUseIp(true);
+    
     conf.setBoolean(
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
@@ -101,18 +109,12 @@ public class TestDelegationTokensWithHA 
         nn0.getNamesystem());
   }
 
-  @AfterClass
-  public static void shutdownCluster() throws IOException {
+  @After
+  public void shutdownCluster() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
-
-
-  @Before
-  public void prepTest() {
-    SecurityUtilTestHelper.setTokenServiceUseIp(true);
-  }
   
   @Test
   public void testDelegationTokenDFSApi() throws Exception {
@@ -155,6 +157,96 @@ public class TestDelegationTokensWithHA 
     doRenewOrCancel(token, clientConf, TokenTestAction.CANCEL);
   }
   
+  private class EditLogTailerForTest extends EditLogTailer {
+    public EditLogTailerForTest(FSNamesystem namesystem, Configuration conf) {
+      super(namesystem, conf);
+    }
+    
+    public void catchupDuringFailover() throws IOException {
+      synchronized (TestDelegationTokensWithHA.this) {
+        while (!catchup) {
+          try {
+            LOG.info("The editlog tailer is waiting to catchup...");
+            TestDelegationTokensWithHA.this.wait();
+          } catch (InterruptedException e) {}
+        }
+      }
+      super.catchupDuringFailover();
+    }
+  }
+  
+  /**
+   * Test if correct exception (StandbyException or RetriableException) can be
+   * thrown during the NN failover. 
+   */
+  @Test
+  public void testDelegationTokenDuringNNFailover() throws Exception {
+    EditLogTailer editLogTailer = nn1.getNamesystem().getEditLogTailer();
+    // stop the editLogTailer of nn1
+    editLogTailer.stop();
+    Configuration conf = (Configuration) Whitebox.getInternalState(
+        editLogTailer, "conf");
+    nn1.getNamesystem().setEditLogTailerForTests(
+        new EditLogTailerForTest(nn1.getNamesystem(), conf));
+    
+    // create token
+    final Token<DelegationTokenIdentifier> token =
+        getDelegationToken(fs, "JobTracker");
+    DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
+    byte[] tokenId = token.getIdentifier();
+    identifier.readFields(new DataInputStream(
+             new ByteArrayInputStream(tokenId)));
+
+    // Ensure that it's present in the nn0 secret manager and can
+    // be renewed directly from there.
+    LOG.info("A valid token should have non-null password, " +
+        "and should be renewed successfully");
+    assertTrue(null != dtSecretManager.retrievePassword(identifier));
+    dtSecretManager.renewToken(token, "JobTracker");
+    
+    // transition nn0 to standby
+    cluster.transitionToStandby(0);
+    
+    try {
+      cluster.getNameNodeRpc(0).renewDelegationToken(token);
+      fail("StandbyException is expected since nn0 is in standby state");
+    } catch (StandbyException e) {
+      GenericTestUtils.assertExceptionContains(
+          HAServiceState.STANDBY.toString(), e);
+    }
+    
+    new Thread() {
+      @Override
+      public void run() {
+        try {
+          cluster.transitionToActive(1);
+        } catch (Exception e) {
+          LOG.error("Transition nn1 to active failed", e);
+        }    
+      }
+    }.start();
+    
+    Thread.sleep(1000);
+    try {
+      nn1.getNamesystem().verifyToken(token.decodeIdentifier(),
+          token.getPassword());
+      fail("RetriableException/StandbyException is expected since nn1 is in transition");
+    } catch (IOException e) {
+      assertTrue(e instanceof StandbyException
+          || e instanceof RetriableException);
+      LOG.info("Got expected exception", e);
+    }
+    
+    catchup = true;
+    synchronized (this) {
+      this.notifyAll();
+    }
+    
+    Configuration clientConf = dfs.getConf();
+    doRenewOrCancel(token, clientConf, TokenTestAction.RENEW);
+    doRenewOrCancel(token, clientConf, TokenTestAction.CANCEL);
+  }
+  
   @SuppressWarnings("deprecation")
   @Test
   public void testDelegationTokenWithDoAs() throws Exception {

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java?rev=1532952&r1=1532951&r2=1532952&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java Thu Oct 17 02:14:33 2013
@@ -17,12 +17,18 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
+import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -101,6 +107,50 @@ public class TestHASafeMode {
     }
   }
   
+  /**
+   * Make sure the client retries when the active NN is in safemode
+   */
+  @Test (timeout=300000)
+  public void testClientRetrySafeMode() throws Exception {
+    final Map<Path, Boolean> results = Collections
+        .synchronizedMap(new HashMap<Path, Boolean>());
+    final Path test = new Path("/test");
+    // let nn0 enter safemode
+    NameNodeAdapter.enterSafeMode(nn0, false);
+    LOG.info("enter safemode");
+    new Thread() {
+      @Override
+      public void run() {
+        try {
+          boolean mkdir = fs.mkdirs(test);
+          LOG.info("mkdir finished, result is " + mkdir);
+          synchronized (TestHASafeMode.this) {
+            results.put(test, mkdir);
+            TestHASafeMode.this.notifyAll();
+          }
+        } catch (Exception e) {
+          LOG.info("Got Exception while calling mkdir", e);
+        }
+      }
+    }.start();
+    
+    // make sure the client's call has actually been handled by the active NN
+    assertFalse("The directory should not be created while NN in safemode",
+        fs.exists(test));
+    
+    Thread.sleep(1000);
+    // let nn0 leave safemode
+    NameNodeAdapter.leaveSafeMode(nn0);
+    LOG.info("leave safemode");
+    
+    synchronized (this) {
+      while (!results.containsKey(test)) {
+        this.wait();
+      }
+      assertTrue(results.get(test));
+    }
+  }
+  
   private void restartStandby() throws IOException {
     cluster.shutdownNameNode(1);
     // Set the safemode extension to be lengthy, so that the tests

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1532952&r1=1532951&r2=1532952&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Thu Oct 17 02:14:33 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.Method;
@@ -646,10 +647,14 @@ public class TestRetryCacheWithHA {
     @Override
     boolean checkNamenodeBeforeReturn() throws Exception {
       Path linkPath = new Path(link);
-      FileStatus linkStatus = dfs.getFileLinkStatus(linkPath);
+      FileStatus linkStatus = null;
       for (int i = 0; i < CHECKTIMES && linkStatus == null; i++) {
-        Thread.sleep(1000);
-        linkStatus = dfs.getFileLinkStatus(linkPath);
+        try {
+          linkStatus = dfs.getFileLinkStatus(linkPath);
+        } catch (FileNotFoundException fnf) {
+          // Ignoring, this can be legitimate.
+          Thread.sleep(1000);
+        }
       }
       return linkStatus != null;
     }
@@ -857,4 +862,4 @@ public class TestRetryCacheWithHA {
           + results.get(op.name));
     }
   }
-}
\ No newline at end of file
+}

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1532952&r1=1532951&r2=1532952&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Thu Oct 17 02:14:33 2013
@@ -192,17 +192,10 @@ public class TestNameNodeMetrics {
     assertCounter("CreateFileOps", 1L, rb);
     assertCounter("FilesCreated", (long)file.depth(), rb);
 
-    // Blocks are stored in a hashmap. Compute its capacity, which
-    // doubles every time the number of entries reach the threshold.
-    int threshold = (int)(blockCapacity * BlockManager.DEFAULT_MAP_LOAD_FACTOR);
-    while (threshold < blockCount) {
-      blockCapacity <<= 1;
-    }
     long filesTotal = file.depth() + 1; // Add 1 for root
     rb = getMetrics(NS_METRICS);
     assertGauge("FilesTotal", filesTotal, rb);
     assertGauge("BlocksTotal", blockCount, rb);
-    assertGauge("BlockCapacity", blockCapacity, rb);
     fs.delete(file, true);
     filesTotal--; // reduce the filecount for deleted file
 

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java?rev=1532952&r1=1532951&r2=1532952&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java Thu Oct 17 02:14:33 2013
@@ -23,12 +23,17 @@ import static org.junit.Assert.assertNul
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.ByteArrayOutputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.io.PrintStream;
+import java.security.PrivilegedAction;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -45,7 +50,9 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.Quota;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Before;
@@ -777,7 +784,40 @@ public class TestSnapshotDeletion {
     assertEquals("user1", statusOfS1.getOwner());
     assertEquals("group1", statusOfS1.getGroup());
   }
-  
+
+  @Test
+  public void testDeleteSnapshotWithPermissionsDisabled() throws Exception {
+    cluster.shutdown();
+    Configuration newConf = new Configuration(conf);
+    newConf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
+    cluster = new MiniDFSCluster.Builder(newConf).numDataNodes(0).build();
+    cluster.waitActive();
+    hdfs = cluster.getFileSystem();
+
+    final Path path = new Path("/dir");
+    hdfs.mkdirs(path);
+    hdfs.allowSnapshot(path);
+    hdfs.mkdirs(new Path(path, "/test"));
+    hdfs.createSnapshot(path, "s1");
+    UserGroupInformation anotherUser = UserGroupInformation
+        .createRemoteUser("anotheruser");
+    anotherUser.doAs(new PrivilegedAction<Object>() {
+      @Override
+      public Object run() {
+        DistributedFileSystem anotherUserFS = null;
+        try {
+          anotherUserFS = cluster.getFileSystem();
+          anotherUserFS.deleteSnapshot(path, "s1");
+        } catch (IOException e) {
+          fail("Failed to delete snapshot : " + e.getLocalizedMessage());
+        } finally {
+          IOUtils.closeStream(anotherUserFS);
+        }
+        return null;
+      }
+    });
+  }
+
   /** 
    * A test covering the case where the snapshot diff to be deleted is renamed 
    * to its previous snapshot. 
@@ -884,4 +924,29 @@ public class TestSnapshotDeletion {
     subFile1Status = hdfs.getFileStatus(subFile1SCopy);
     assertEquals(REPLICATION_1, subFile1Status.getReplication());
   }
+  
+  @Test
+  public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception {
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    PrintStream psOut = new PrintStream(out);
+    System.setOut(psOut);
+    System.setErr(psOut);
+    FsShell shell = new FsShell();
+    shell.setConf(conf);
+    
+    String[] argv1 = {"-deleteSnapshot", "/tmp"};
+    int val = shell.run(argv1);
+    assertTrue(val == -1);
+    assertTrue(out.toString().contains(
+        argv1[0] + ": Incorrect number of arguments."));
+    out.reset();
+    
+    String[] argv2 = {"-deleteSnapshot", "/tmp", "s1", "s2"};
+    val = shell.run(argv2);
+    assertTrue(val == -1);
+    assertTrue(out.toString().contains(
+        argv2[0] + ": Incorrect number of arguments."));
+    psOut.close();
+    out.close();
+  }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java?rev=1532952&r1=1532951&r2=1532952&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java Thu Oct 17 02:14:33 2013
@@ -22,10 +22,13 @@ import static org.junit.Assert.assertFal
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -226,4 +229,29 @@ public class TestSnapshotRename {
       }
     }
   }
+  
+  @Test
+  public void testRenameSnapshotCommandWithIllegalArguments() throws Exception {
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    PrintStream psOut = new PrintStream(out);
+    System.setOut(psOut);
+    System.setErr(psOut);
+    FsShell shell = new FsShell();
+    shell.setConf(conf);
+    
+    String[] argv1 = {"-renameSnapshot", "/tmp", "s1"};
+    int val = shell.run(argv1);
+    assertTrue(val == -1);
+    assertTrue(out.toString().contains(
+        argv1[0] + ": Incorrect number of arguments."));
+    out.reset();
+    
+    String[] argv2 = {"-renameSnapshot", "/tmp", "s1", "s2", "s3"};
+    val = shell.run(argv2);
+    assertTrue(val == -1);
+    assertTrue(out.toString().contains(
+        argv2[0] + ": Incorrect number of arguments."));
+    psOut.close();
+    out.close();
+  }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java?rev=1532952&r1=1532951&r2=1532952&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java Thu Oct 17 02:14:33 2013
@@ -81,7 +81,7 @@ public class WebHdfsTestUtil {
   
   public static HttpURLConnection twoStepWrite(final WebHdfsFileSystem webhdfs,
       final HttpOpParam.Op op, HttpURLConnection conn) throws IOException {
-    return webhdfs.new Runner(op, conn).twoStepWrite();
+    return webhdfs.new ConnRunner(op, conn).twoStepWrite();
   }
 
   public static FSDataOutputStream write(final WebHdfsFileSystem webhdfs,

Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml?rev=1532952&r1=1532951&r2=1532952&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml Thu Oct 17 02:14:33 2013
@@ -1043,6 +1043,7 @@
 
     <test> <!-- TESTED -->
       <description>ls: Negative test for quoted /*/* globbing </description>
+      <windows>false</windows>
       <test-commands>
         <command>-fs NAMENODE -mkdir /dir0</command>
         <command>-fs NAMENODE -mkdir /dir0/dir1</command>
@@ -1062,6 +1063,7 @@
 
     <test> <!-- TESTED -->
       <description>ls: Test for quoted globbing </description>
+      <windows>false</windows>
       <test-commands>
         <command>-fs NAMENODE -mkdir /dir0</command>
         <command>-fs NAMENODE -mkdir /dir0/\*</command>
@@ -1082,6 +1084,7 @@
 
     <test> <!-- TESTED -->
       <description>rm: Test for quoted globbing </description>
+      <windows>false</windows>
       <test-commands>
         <command>-fs NAMENODE -mkdir /dir0</command>
         <command>-fs NAMENODE -mkdir /dir0/\*</command>
@@ -6049,7 +6052,7 @@
         <command>-fs NAMENODE -mkdir /dir0</command>
         <command>-fs NAMENODE -touchz /dir0/file0</command>
         <command>-fs NAMENODE -touchz /dir0/file1</command>
-        <command>-fs NAMENODE -setrep -R 2 /dir0</command>
+        <command>-fs NAMENODE -setrep 2 /dir0</command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rm -r /user</command>
@@ -6072,7 +6075,7 @@
         <command>-fs NAMENODE -mkdir -p dir0</command>
         <command>-fs NAMENODE -touchz dir0/file0</command>
         <command>-fs NAMENODE -touchz dir0/file1</command>
-        <command>-fs NAMENODE -setrep -R 2 dir0</command>
+        <command>-fs NAMENODE -setrep 2 dir0</command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rm -r /user</command>
@@ -6090,6 +6093,24 @@
     </test>
     
     <test> <!-- TESTED -->
+      <description>setrep: -R ignored for existing file</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p dir0</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -setrep -R 2 dir0/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Replication 2 set: dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
       <description>setrep: non existent file (absolute path)</description>
       <test-commands>
         <command>-fs NAMENODE -setrep 2 /dir0/file</command>
@@ -6145,7 +6166,7 @@
         <command>-fs NAMENODE -mkdir hdfs:///dir0/</command>
         <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
         <command>-fs NAMENODE -touchz hdfs:///dir0/file1</command>
-        <command>-fs NAMENODE -setrep -R 2 hdfs:///dir0</command>
+        <command>-fs NAMENODE -setrep 2 hdfs:///dir0</command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rm -r hdfs:///*</command>
@@ -6203,7 +6224,7 @@
         <command>-fs NAMENODE -mkdir -p NAMENODE/dir0</command>
         <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
         <command>-fs NAMENODE -touchz NAMENODE/dir0/file1</command>
-        <command>-fs NAMENODE -setrep -R 2 NAMENODE/dir0</command>
+        <command>-fs NAMENODE -setrep 2 NAMENODE/dir0</command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rm -r NAMENODE/*</command>