You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cm...@apache.org on 2014/08/20 01:50:25 UTC

svn commit: r1619012 [28/35] - in /hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop...

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java Tue Aug 19 23:49:39 2014
@@ -21,19 +21,26 @@ import static org.apache.hadoop.test.Met
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.List;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.junit.Test;
 
 /**
  * This test ensures the all types of data node report work correctly.
  */
 public class TestDatanodeReport {
+  static final Log LOG = LogFactory.getLog(TestDatanodeReport.class);
   final static private Configuration conf = new HdfsConfiguration();
   final static private int NUM_OF_DATANODES = 4;
     
@@ -50,20 +57,18 @@ public class TestDatanodeReport {
     try {
       //wait until the cluster is up
       cluster.waitActive();
-
-      InetSocketAddress addr = new InetSocketAddress("localhost",
-          cluster.getNameNodePort());
-      DFSClient client = new DFSClient(addr, conf);
-
-      assertEquals(client.datanodeReport(DatanodeReportType.ALL).length,
-                   NUM_OF_DATANODES);
-      assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length,
-                   NUM_OF_DATANODES);
-      assertEquals(client.datanodeReport(DatanodeReportType.DEAD).length, 0);
+      final String bpid = cluster.getNamesystem().getBlockPoolId();
+      final List<DataNode> datanodes = cluster.getDataNodes();
+      final DFSClient client = cluster.getFileSystem().dfs;
+
+      assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, bpid);
+      assertReports(NUM_OF_DATANODES, DatanodeReportType.LIVE, client, datanodes, bpid);
+      assertReports(0, DatanodeReportType.DEAD, client, datanodes, bpid);
 
       // bring down one datanode
-      ArrayList<DataNode> datanodes = cluster.getDataNodes();
-      datanodes.remove(datanodes.size()-1).shutdown();
+      final DataNode last = datanodes.get(datanodes.size() - 1);
+      LOG.info("XXX shutdown datanode " + last.getDatanodeUuid());
+      last.shutdown();
 
       DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
       while (nodeInfo.length != 1) {
@@ -74,22 +79,59 @@ public class TestDatanodeReport {
         nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
       }
 
-      assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length,
-                   NUM_OF_DATANODES-1);
-      assertEquals(client.datanodeReport(DatanodeReportType.ALL).length,
-                   NUM_OF_DATANODES);
+      assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, null);
+      assertReports(NUM_OF_DATANODES - 1, DatanodeReportType.LIVE, client, datanodes, null);
+      assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
 
       Thread.sleep(5000);
       assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
-    }finally {
+    } finally {
       cluster.shutdown();
     }
   }
- 
-  public static void main(String[] args) throws Exception {
-    new TestDatanodeReport().testDatanodeReport();
-  }
   
-}
-
+  final static Comparator<StorageReport> CMP = new Comparator<StorageReport>() {
+    @Override
+    public int compare(StorageReport left, StorageReport right) {
+      return left.getStorage().getStorageID().compareTo(
+            right.getStorage().getStorageID());
+    }
+  };
 
+  static void assertReports(int numDatanodes, DatanodeReportType type,
+      DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
+    final DatanodeInfo[] infos = client.datanodeReport(type);
+    assertEquals(numDatanodes, infos.length);
+    final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
+    assertEquals(numDatanodes, reports.length);
+    
+    for(int i = 0; i < infos.length; i++) {
+      assertEquals(infos[i], reports[i].getDatanodeInfo());
+      
+      final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
+      if (bpid != null) {
+        //check storage
+        final StorageReport[] computed = reports[i].getStorageReports();
+        Arrays.sort(computed, CMP);
+        final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
+        Arrays.sort(expected, CMP);
+  
+        assertEquals(expected.length, computed.length);
+        for(int j = 0; j < expected.length; j++) {
+          assertEquals(expected[j].getStorage().getStorageID(),
+                       computed[j].getStorage().getStorageID());
+        }
+      }
+    }
+  }
+  
+  static DataNode findDatanode(String id, List<DataNode> datanodes) {
+    for(DataNode d : datanodes) {
+      if (d.getDatanodeUuid().equals(id)) {
+        return d;
+      }
+    }
+    throw new IllegalStateException("Datnode " + id + " not in datanode list: "
+        + datanodes);
+  }
+}
\ No newline at end of file

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java Tue Aug 19 23:49:39 2014
@@ -42,7 +42,9 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.PathUtils;
@@ -64,6 +66,7 @@ public class TestDecommission {
   static final int NAMENODE_REPLICATION_INTERVAL = 1; //replication interval
 
   final Random myrand = new Random();
+  Path dir;
   Path hostsFile;
   Path excludeFile;
   FileSystem localFileSys;
@@ -76,7 +79,7 @@ public class TestDecommission {
     // Set up the hosts/exclude files.
     localFileSys = FileSystem.getLocal(conf);
     Path workingDir = localFileSys.getWorkingDirectory();
-    Path dir = new Path(workingDir, PathUtils.getTestDirName(getClass()) + "/work-dir/decommission");
+    dir = new Path(workingDir, PathUtils.getTestDirName(getClass()) + "/work-dir/decommission");
     hostsFile = new Path(dir, "hosts");
     excludeFile = new Path(dir, "exclude");
     
@@ -96,7 +99,7 @@ public class TestDecommission {
   
   @After
   public void teardown() throws IOException {
-    cleanupFile(localFileSys, excludeFile.getParent());
+    cleanupFile(localFileSys, dir);
     if (cluster != null) {
       cluster.shutdown();
     }
@@ -202,10 +205,11 @@ public class TestDecommission {
   }
 
   /*
-   * decommission one random node and wait for each to reach the
-   * given {@code waitForState}.
+   * decommission the DN at index dnIndex or one random node if dnIndex is set
+   * to -1 and wait for the node to reach the given {@code waitForState}.
    */
   private DatanodeInfo decommissionNode(int nnIndex,
+                                  String datanodeUuid,
                                   ArrayList<DatanodeInfo>decommissionedNodes,
                                   AdminStates waitForState)
     throws IOException {
@@ -213,14 +217,26 @@ public class TestDecommission {
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
 
     //
-    // pick one datanode randomly.
+    // pick one datanode randomly unless the caller specifies one.
     //
     int index = 0;
-    boolean found = false;
-    while (!found) {
-      index = myrand.nextInt(info.length);
-      if (!info[index].isDecommissioned()) {
-        found = true;
+    if (datanodeUuid == null) {
+      boolean found = false;
+      while (!found) {
+        index = myrand.nextInt(info.length);
+        if (!info[index].isDecommissioned()) {
+          found = true;
+        }
+      }
+    } else {
+      // The caller specifies a DN
+      for (; index < info.length; index++) {
+        if (info[index].getDatanodeUuid().equals(datanodeUuid)) {
+          break;
+        }
+      }
+      if (index == info.length) {
+        throw new IOException("invalid datanodeUuid " + datanodeUuid);
       }
     }
     String nodename = info[index].getXferAddr();
@@ -242,11 +258,13 @@ public class TestDecommission {
     return ret;
   }
 
-  /* stop decommission of the datanode and wait for each to reach the NORMAL state */
-  private void recomissionNode(DatanodeInfo decommissionedNode) throws IOException {
+  /* Ask a specific NN to stop decommission of the datanode and wait for each
+   * to reach the NORMAL state.
+   */
+  private void recomissionNode(int nnIndex, DatanodeInfo decommissionedNode) throws IOException {
     LOG.info("Recommissioning node: " + decommissionedNode);
     writeConfigFile(excludeFile, null);
-    refreshNodes(cluster.getNamesystem(), conf);
+    refreshNodes(cluster.getNamesystem(nnIndex), conf);
     waitNodeState(decommissionedNode, AdminStates.NORMAL);
 
   }
@@ -367,7 +385,7 @@ public class TestDecommission {
     int liveDecomissioned = ns.getNumDecomLiveDataNodes();
 
     // Decommission one node. Verify that node is decommissioned.
-    DatanodeInfo decomNode = decommissionNode(0, decommissionedNodes,
+    DatanodeInfo decomNode = decommissionNode(0, null, decommissionedNodes,
         AdminStates.DECOMMISSIONED);
     decommissionedNodes.add(decomNode);
     assertEquals(deadDecomissioned, ns.getNumDecomDeadDataNodes());
@@ -403,7 +421,130 @@ public class TestDecommission {
   public void testDecommissionFederation() throws IOException {
     testDecommission(2, 2);
   }
-  
+
+  /**
+   * Test decommission process on standby NN.
+   * Verify admins can run "dfsadmin -refreshNodes" on SBN and decomm
+   * process can finish as long as admins run "dfsadmin -refreshNodes"
+   * on active NN.
+   * SBN used to mark excess replica upon recommission. The SBN's pick
+   * for excess replica could be different from the one picked by ANN.
+   * That creates inconsistent state and prevent SBN from finishing
+   * decommission.
+   */
+  @Test(timeout=360000)
+  public void testDecommissionOnStandby() throws Exception {
+    Configuration hdfsConf = new HdfsConfiguration(conf);
+    hdfsConf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+    hdfsConf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 30000);
+    hdfsConf.setInt(DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_KEY, 2);
+
+    // The time to wait so that the slow DN's heartbeat is considered old
+    // by BlockPlacementPolicyDefault and thus will choose that DN for
+    // excess replica.
+    long slowHeartbeatDNwaitTime =
+        hdfsConf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
+        DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000 * (hdfsConf.getInt(
+        DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_KEY,
+        DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT) + 1);
+
+    cluster = new MiniDFSCluster.Builder(hdfsConf)
+        .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build();
+
+    cluster.transitionToActive(0);
+    cluster.waitActive();
+
+
+    // Step 1, create a cluster with 4 DNs. Blocks are stored on the first 3 DNs.
+    // The last DN is empty. Also configure the last DN to have slow heartbeat
+    // so that it will be chosen as excess replica candidate during recommission.
+
+    // Step 1.a, copy blocks to the first 3 DNs. Given the replica count is the
+    // same as # of DNs, each DN will have a replica for any block.
+    Path file1 = new Path("testDecommissionHA.dat");
+    int replicas = 3;
+    FileSystem activeFileSys = cluster.getFileSystem(0);
+    writeFile(activeFileSys, file1, replicas);
+
+    HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
+        cluster.getNameNode(1));
+
+    // Step 1.b, start a DN with slow heartbeat, so that we can know for sure it
+    // will be chosen as the target of excess replica during recommission.
+    hdfsConf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 30);
+    cluster.startDataNodes(hdfsConf, 1, true, null, null, null);
+    DataNode lastDN = cluster.getDataNodes().get(3);
+    lastDN.getDatanodeUuid();
+
+    // Step 2, decommission the first DN at both ANN and SBN.
+    DataNode firstDN = cluster.getDataNodes().get(0);
+
+    // Step 2.a, ask ANN to decomm the first DN
+    DatanodeInfo decommissionedNodeFromANN = decommissionNode(
+        0, firstDN.getDatanodeUuid(), null, AdminStates.DECOMMISSIONED);
+
+    // Step 2.b, ask SBN to decomm the first DN
+    DatanodeInfo decomNodeFromSBN = decommissionNode(1, firstDN.getDatanodeUuid(), null,
+        AdminStates.DECOMMISSIONED);
+
+    // Step 3, recommission the first DN on SBN and ANN to create excess replica
+    // It recommissions the node on SBN first to create potential
+    // inconsistent state. In production cluster, such insistent state can happen
+    // even if recommission command was issued on ANN first given the async nature
+    // of the system.
+
+    // Step 3.a, ask SBN to recomm the first DN.
+    // SBN has been fixed so that it no longer invalidates excess replica during
+    // recommission.
+    // Before the fix, SBN could get into the following state.
+    //    1. the last DN would have been chosen as excess replica, given its
+    //    heartbeat is considered old.
+    //    Please refer to BlockPlacementPolicyDefault#chooseReplicaToDelete
+    //    2. After recomissionNode finishes, SBN has 3 live replicas ( 0, 1, 2 )
+    //    and one excess replica ( 3 )
+    // After the fix,
+    //    After recomissionNode finishes, SBN has 4 live replicas ( 0, 1, 2, 3 )
+    Thread.sleep(slowHeartbeatDNwaitTime);
+    recomissionNode(1, decomNodeFromSBN);
+
+    // Step 3.b, ask ANN to recommission the first DN.
+    // To verify the fix, the test makes sure the excess replica picked by ANN
+    // is different from the one picked by SBN before the fix.
+    // To achieve that, we make sure next-to-last DN is chosen as excess replica
+    // by ANN.
+    // 1. restore LastDNprop's heartbeat interval.
+    // 2. Make next-to-last DN's heartbeat slow.
+    MiniDFSCluster.DataNodeProperties LastDNprop = cluster.stopDataNode(3);
+    LastDNprop.conf.setLong(
+        DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
+    cluster.restartDataNode(LastDNprop);
+
+    MiniDFSCluster.DataNodeProperties nextToLastDNprop = cluster.stopDataNode(2);
+    nextToLastDNprop.conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 30);
+    cluster.restartDataNode(nextToLastDNprop);
+    cluster.waitActive();
+    Thread.sleep(slowHeartbeatDNwaitTime);
+    recomissionNode(0, decommissionedNodeFromANN);
+
+    // Step 3.c, make sure the DN has deleted the block and report to NNs
+    cluster.triggerHeartbeats();
+    HATestUtil.waitForDNDeletions(cluster);
+    cluster.triggerDeletionReports();
+
+    // Step 4, decommission the first DN on both ANN and SBN
+    // With the fix to make sure SBN no longer marks excess replica
+    // during recommission, SBN's decommission can finish properly
+    decommissionNode(0, firstDN.getDatanodeUuid(), null,
+        AdminStates.DECOMMISSIONED);
+
+    // Ask SBN to decomm the first DN
+    decommissionNode(1, firstDN.getDatanodeUuid(), null,
+        AdminStates.DECOMMISSIONED);
+
+    cluster.shutdown();
+
+  }
+
   private void testDecommission(int numNamenodes, int numDatanodes)
       throws IOException {
     LOG.info("Starting test testDecommission");
@@ -430,7 +571,7 @@ public class TestDecommission {
         int liveDecomissioned = ns.getNumDecomLiveDataNodes();
 
         // Decommission one node. Verify that node is decommissioned.
-        DatanodeInfo decomNode = decommissionNode(i, decommissionedNodes,
+        DatanodeInfo decomNode = decommissionNode(i, null, decommissionedNodes,
             AdminStates.DECOMMISSIONED);
         decommissionedNodes.add(decomNode);
         assertEquals(deadDecomissioned, ns.getNumDecomDeadDataNodes());
@@ -458,7 +599,7 @@ public class TestDecommission {
       }
     }
 
-    // Restart the cluster and ensure recommissioned datanodes
+    // Restart the cluster and ensure decommissioned datanodes
     // are allowed to register with the namenode
     cluster.shutdown();
     startCluster(numNamenodes, numDatanodes, conf);
@@ -486,7 +627,7 @@ public class TestDecommission {
       writeFile(fileSys, file1, replicas);
         
       // Decommission one node. Verify that node is decommissioned.
-      DatanodeInfo decomNode = decommissionNode(i, decommissionedNodes,
+      DatanodeInfo decomNode = decommissionNode(i, null, decommissionedNodes,
           AdminStates.DECOMMISSIONED);
       decommissionedNodes.add(decomNode);
         
@@ -510,7 +651,7 @@ public class TestDecommission {
           + tries + " times.", tries < 20);
 
       // stop decommission and check if the new replicas are removed
-      recomissionNode(decomNode);
+      recomissionNode(0, decomNode);
       // wait for the block to be deleted
       tries = 0;
       while (tries++ < 20) {
@@ -561,7 +702,7 @@ public class TestDecommission {
       
       FSNamesystem fsn = cluster.getNamesystem(i);
       NameNode namenode = cluster.getNameNode(i);
-      DatanodeInfo downnode = decommissionNode(i, null,
+      DatanodeInfo downnode = decommissionNode(i, null, null,
           AdminStates.DECOMMISSION_INPROGRESS);
       // Check namenode stats for multiple datanode heartbeats
       verifyStats(namenode, fsn, downnode, true);
@@ -744,4 +885,76 @@ public class TestDecommission {
     startCluster(numNamenodes, numDatanodes, conf);
     cluster.shutdown();
   }
+
+  /**
+   * Test using a "registration name" in a host include file.
+   *
+   * Registration names are DataNode names specified in the configuration by
+   * dfs.datanode.hostname.  The DataNode will send this name to the NameNode
+   * as part of its registration.  Registration names are helpful when you
+   * want to override the normal first result of DNS resolution on the
+   * NameNode.  For example, a given datanode IP may map to two hostnames,
+   * and you may want to choose which hostname is used internally in the
+   * cluster.
+   *
+   * It is not recommended to use a registration name which is not also a
+   * valid DNS hostname for the DataNode.  See HDFS-5237 for background.
+   */
+  @Test(timeout=360000)
+  public void testIncludeByRegistrationName() throws IOException,
+      InterruptedException {
+    Configuration hdfsConf = new Configuration(conf);
+    // Any IPv4 address starting with 127 functions as a "loopback" address
+    // which is connected to the current host.  So by choosing 127.0.0.100
+    // as our registration name, we have chosen a name which is also a valid
+    // way of reaching the local DataNode we're going to start.
+    // Typically, a registration name would be a hostname, but we don't want
+    // to deal with DNS in this test.
+    final String registrationName = "127.0.0.100";
+    final String nonExistentDn = "127.0.0.10";
+    hdfsConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, registrationName);
+    cluster = new MiniDFSCluster.Builder(hdfsConf)
+        .numDataNodes(1).checkDataNodeHostConfig(true)
+        .setupHostsFile(true).build();
+    cluster.waitActive();
+
+    // Set up an includes file that doesn't have our datanode.
+    ArrayList<String> nodes = new ArrayList<String>();
+    nodes.add(nonExistentDn);
+    writeConfigFile(hostsFile,  nodes);
+    refreshNodes(cluster.getNamesystem(0), hdfsConf);
+
+    // Wait for the DN to be marked dead.
+    DFSClient client = getDfsClient(cluster.getNameNode(0), hdfsConf);
+    while (true) {
+      DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.DEAD);
+      if (info.length == 1) {
+        break;
+      }
+      LOG.info("Waiting for datanode to be marked dead");
+      Thread.sleep(HEARTBEAT_INTERVAL * 1000);
+    }
+
+    // Use a non-empty include file with our registration name.
+    // It should work.
+    int dnPort = cluster.getDataNodes().get(0).getXferPort();
+    nodes = new ArrayList<String>();
+    nodes.add(registrationName + ":" + dnPort);
+    writeConfigFile(hostsFile,  nodes);
+    refreshNodes(cluster.getNamesystem(0), hdfsConf);
+    cluster.restartDataNode(0);
+
+    // Wait for the DN to come back.
+    while (true) {
+      DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.LIVE);
+      if (info.length == 1) {
+        Assert.assertFalse(info[0].isDecommissioned());
+        Assert.assertFalse(info[0].isDecommissionInProgress());
+        assertEquals(registrationName, info[0].getHostName());
+        break;
+      }
+      LOG.info("Waiting for datanode to come back");
+      Thread.sleep(HEARTBEAT_INTERVAL * 1000);
+    }
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java Tue Aug 19 23:49:39 2014
@@ -202,6 +202,8 @@ public class TestEncryptedTransfer {
     MiniDFSCluster cluster = null;
     try {
       Configuration conf = new Configuration();
+      // Set short retry timeouts so this test runs faster
+      conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
       cluster = new MiniDFSCluster.Builder(conf).build();
       
       FileSystem fs = getFileSystem(conf);

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java Tue Aug 19 23:49:39 2014
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -32,6 +33,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -39,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
 import org.junit.Assert;
 import org.junit.Test;
@@ -169,6 +172,7 @@ public class TestFileAppend{
       }
 
     } finally {
+      client.close();
       fs.close();
       cluster.shutdown();
     }
@@ -345,7 +349,6 @@ public class TestFileAppend{
       throws IOException, InterruptedException {
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
-    conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
     //Set small soft-limit for lease
     final long softLimit = 1L;
     final long hardLimit = 9999999L;
@@ -381,4 +384,57 @@ public class TestFileAppend{
     }
   }
 
+  /**
+   * Old replica of the block should not be accepted as valid for append/read
+   */
+  @Test
+  public void testFailedAppendBlockRejection() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.set("dfs.client.block.write.replace-datanode-on-failure.enable",
+        "false");
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
+        .build();
+    DistributedFileSystem fs = null;
+    try {
+      fs = cluster.getFileSystem();
+      Path path = new Path("/test");
+      FSDataOutputStream out = fs.create(path);
+      out.writeBytes("hello\n");
+      out.close();
+
+      // stop one datanode
+      DataNodeProperties dnProp = cluster.stopDataNode(0);
+      String dnAddress = dnProp.datanode.getXferAddress().toString();
+      if (dnAddress.startsWith("/")) {
+        dnAddress = dnAddress.substring(1);
+      }
+
+      // append again to bump genstamps
+      for (int i = 0; i < 2; i++) {
+        out = fs.append(path);
+        out.writeBytes("helloagain\n");
+        out.close();
+      }
+
+      // re-open and make the block state as underconstruction
+      out = fs.append(path);
+      cluster.restartDataNode(dnProp, true);
+      // wait till the block report comes
+      Thread.sleep(2000);
+      // check the block locations, this should not contain restarted datanode
+      BlockLocation[] locations = fs.getFileBlockLocations(path, 0,
+          Long.MAX_VALUE);
+      String[] names = locations[0].getNames();
+      for (String node : names) {
+        if (node.equals(dnAddress)) {
+          fail("Failed append should not be present in latest block locations.");
+        }
+      }
+      out.close();
+    } finally {
+      IOUtils.closeStream(fs);
+      cluster.shutdown();
+    }
+  }
+
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java Tue Aug 19 23:49:39 2014
@@ -253,18 +253,9 @@ public class TestFileAppend3  {
     assertTrue(fs.rename(p, pnew));
 
     //d. Close file handle that was opened in (b). 
-    try {
-      out.close();
-      fail("close() should throw an exception");
-    } catch(Exception e) {
-      AppendTestUtil.LOG.info("GOOD!", e);
-    }
-
-    //wait for the lease recovery 
-    cluster.setLeasePeriod(1000, 1000);
-    AppendTestUtil.sleep(5000);
+    out.close();
 
-    //check block sizes 
+    //check block sizes
     final long len = fs.getFileStatus(pnew).getLen();
     final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(pnew.toString(), 0L, len);
     final int numblock = locatedblocks.locatedBlockCount();

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java Tue Aug 19 23:49:39 2014
@@ -28,6 +28,7 @@ import static org.mockito.Mockito.spy;
 
 import java.io.IOException;
 import java.io.OutputStream;
+import java.util.List;
 import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.commons.logging.Log;
@@ -37,11 +38,15 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@@ -327,4 +332,70 @@ public class TestFileAppend4 {
       cluster.shutdown();
     }
   }
+
+  /**
+   * Test that an append with no locations fails with an exception
+   * showing insufficient locations.
+   */
+  @Test(timeout = 60000)
+  public void testAppendInsufficientLocations() throws Exception {
+    Configuration conf = new Configuration();
+
+    // lower heartbeat interval for fast recognition of DN
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
+        1000);
+    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
+
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4)
+        .build();
+    DistributedFileSystem fileSystem = null;
+    try {
+      // create a file with replication 3
+      fileSystem = cluster.getFileSystem();
+      Path f = new Path("/testAppend");
+      FSDataOutputStream create = fileSystem.create(f, (short) 2);
+      create.write("/testAppend".getBytes());
+      create.close();
+
+      // Check for replications
+      DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
+
+      // Shut down all DNs that have the last block location for the file
+      LocatedBlocks lbs = fileSystem.dfs.getNamenode().
+          getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
+      List<DataNode> dnsOfCluster = cluster.getDataNodes();
+      DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().
+          getLocations();
+      for( DataNode dn : dnsOfCluster) {
+        for(DatanodeInfo loc: dnsWithLocations) {
+          if(dn.getDatanodeId().equals(loc)){
+            dn.shutdown();
+            DFSTestUtil.waitForDatanodeDeath(dn);
+          }
+        }
+      }
+
+      // Wait till 0 replication is recognized
+      DFSTestUtil.waitReplication(fileSystem, f, (short) 0);
+
+      // Append to the file, at this state there are 3 live DNs but none of them
+      // have the block.
+      try{
+        fileSystem.append(f);
+        fail("Append should fail because insufficient locations");
+      } catch (IOException e){
+        LOG.info("Expected exception: ", e);
+      }
+      FSDirectory dir = cluster.getNamesystem().getFSDirectory();
+      final INodeFile inode = INodeFile.
+          valueOf(dir.getINode("/testAppend"), "/testAppend");
+      assertTrue("File should remain closed", !inode.isUnderConstruction());
+    } finally {
+      if (null != fileSystem) {
+        fileSystem.close();
+      }
+      cluster.shutdown();
+    }
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java Tue Aug 19 23:49:39 2014
@@ -27,6 +27,7 @@ import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.util.ArrayList;
+import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -35,6 +36,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
@@ -137,13 +139,15 @@ public class TestFileCorruption {
       final String bpid = cluster.getNamesystem().getBlockPoolId();
       File storageDir = cluster.getInstanceStorageDir(0, 0);
       File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+      assertTrue("Data directory does not exist", dataDir.exists());
       ExtendedBlock blk = getBlock(bpid, dataDir);
       if (blk == null) {
         storageDir = cluster.getInstanceStorageDir(0, 1);
         dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
         blk = getBlock(bpid, dataDir);
       }
-      assertFalse(blk==null);
+      assertFalse("Data directory does not contain any blocks or there was an "
+          + "IO error", blk==null);
 
       // start a third datanode
       cluster.startDataNodes(conf, 1, true, null, null);
@@ -174,33 +178,15 @@ public class TestFileCorruption {
     
   }
   
-  private ExtendedBlock getBlock(String bpid, File dataDir) {
-    assertTrue("data directory does not exist", dataDir.exists());
-    File[] blocks = dataDir.listFiles();
-    assertTrue("Blocks do not exist in dataDir", (blocks != null) && (blocks.length > 0));
-
-    int idx = 0;
-    String blockFileName = null;
-    for (; idx < blocks.length; idx++) {
-      blockFileName = blocks[idx].getName();
-      if (blockFileName.startsWith("blk_") && !blockFileName.endsWith(".meta")) {
-        break;
-      }
-    }
-    if (blockFileName == null) {
+  public static ExtendedBlock getBlock(String bpid, File dataDir) {
+    List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(dataDir);
+    if (metadataFiles == null || metadataFiles.isEmpty()) {
       return null;
     }
-    long blockId = Long.parseLong(blockFileName.substring("blk_".length()));
-    long blockTimeStamp = GenerationStamp.GRANDFATHER_GENERATION_STAMP;
-    for (idx=0; idx < blocks.length; idx++) {
-      String fileName = blocks[idx].getName();
-      if (fileName.startsWith(blockFileName) && fileName.endsWith(".meta")) {
-        int startIndex = blockFileName.length()+1;
-        int endIndex = fileName.length() - ".meta".length();
-        blockTimeStamp = Long.parseLong(fileName.substring(startIndex, endIndex));
-        break;
-      }
-    }
-    return new ExtendedBlock(bpid, blockId, blocks[idx].length(), blockTimeStamp);
+    File metadataFile = metadataFiles.get(0);
+    File blockFile = Block.metaToBlockFile(metadataFile);
+    return new ExtendedBlock(bpid, Block.getBlockId(blockFile.getName()),
+        blockFile.length(), Block.getGenerationStamp(metadataFile.getName()));
   }
+
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java Tue Aug 19 23:49:39 2014
@@ -30,6 +30,8 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -74,10 +76,12 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
+import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -96,6 +100,8 @@ public class TestFileCreation {
     ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
   }
+  private static final String RPC_DETAILED_METRICS =
+      "RpcDetailedActivityForPort";
 
   static final long seed = 0xDEADBEEFL;
   static final int blockSize = 8192;
@@ -370,7 +376,7 @@ public class TestFileCreation {
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
-    
+
     UserGroupInformation otherUgi = UserGroupInformation.createUserForTesting(
         "testuser", new String[]{"testgroup"});
     FileSystem fs2 = otherUgi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@@ -379,12 +385,15 @@ public class TestFileCreation {
         return FileSystem.get(cluster.getConfiguration(0));
       }
     });
-    
+
+    String metricsName = RPC_DETAILED_METRICS + cluster.getNameNodePort();
+
     try {
       Path p = new Path("/testfile");
       FSDataOutputStream stm1 = fs.create(p);
       stm1.write(1);
-      stm1.hflush();
+
+      assertCounter("CreateNumOps", 1L, getMetrics(metricsName));
 
       // Create file again without overwrite
       try {
@@ -394,7 +403,9 @@ public class TestFileCreation {
         GenericTestUtils.assertExceptionContains("already being created by",
             abce);
       }
-      
+      // NameNodeProxies' createNNProxyWithClientProtocol has 5 retries.
+      assertCounter("AlreadyBeingCreatedExceptionNumOps",
+          6L, getMetrics(metricsName));
       FSDataOutputStream stm2 = fs2.create(p, true);
       stm2.write(2);
       stm2.close();
@@ -403,7 +414,8 @@ public class TestFileCreation {
         stm1.close();
         fail("Should have exception closing stm1 since it was deleted");
       } catch (IOException ioe) {
-        GenericTestUtils.assertExceptionContains("File is not open for writing", ioe);
+        GenericTestUtils.assertExceptionContains("No lease on /testfile", ioe);
+        GenericTestUtils.assertExceptionContains("File does not exist.", ioe);
       }
       
     } finally {
@@ -1189,8 +1201,8 @@ public class TestFileCreation {
         cluster.getNameNodeRpc()
             .complete(f.toString(), client.clientName, null, someOtherFileId);
         fail();
-      } catch(FileNotFoundException fnf) {
-        FileSystem.LOG.info("Caught Expected FileNotFoundException: ", fnf);
+      } catch(LeaseExpiredException e) {
+        FileSystem.LOG.info("Caught Expected LeaseExpiredException: ", e);
       }
     } finally {
       IOUtils.closeStream(dfs);

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java Tue Aug 19 23:49:39 2014
@@ -167,6 +167,9 @@ public class TestGetBlocks {
       if (stm != null) {
         stm.close();
       }
+      if (client != null) {
+        client.close();
+      }
       cluster.shutdown();
     }
   }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java Tue Aug 19 23:49:39 2014
@@ -25,14 +25,16 @@ import java.net.InetSocketAddress;
 
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
+import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
+import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -76,16 +78,22 @@ public class TestIsMethodSupported {
 
   @Test
   public void testNamenodeProtocol() throws IOException {
-    NamenodeProtocolTranslatorPB translator =
-        (NamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(conf,
+    NamenodeProtocol np =
+        NameNodeProxies.createNonHAProxy(conf,
             nnAddress, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
             true).getProxy();
-    boolean exists = translator.isMethodSupported("rollEditLog");
+
+    boolean exists = RpcClientUtil.isMethodSupported(np,
+        NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+        RPC.getProtocolVersion(NamenodeProtocolPB.class), "rollEditLog");
+
     assertTrue(exists);
-    exists = translator.isMethodSupported("bogusMethod");
+    exists = RpcClientUtil.isMethodSupported(np,
+        NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+        RPC.getProtocolVersion(NamenodeProtocolPB.class), "bogusMethod");
     assertFalse(exists);
   }
-  
+
   @Test
   public void testDatanodeProtocol() throws IOException {
     DatanodeProtocolClientSideTranslatorPB translator = 
@@ -107,16 +115,18 @@ public class TestIsMethodSupported {
         NetUtils.getDefaultSocketFactory(conf));
     assertTrue(translator.isMethodSupported("refreshNamenodes"));
   }
-  
+
   @Test
   public void testClientNamenodeProtocol() throws IOException {
-    ClientNamenodeProtocolTranslatorPB translator = 
-        (ClientNamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(
+    ClientProtocol cp =
+        NameNodeProxies.createNonHAProxy(
             conf, nnAddress, ClientProtocol.class,
             UserGroupInformation.getCurrentUser(), true).getProxy();
-    assertTrue(translator.isMethodSupported("mkdirs"));
+    RpcClientUtil.isMethodSupported(cp,
+        ClientNamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+        RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), "mkdirs");
   }
-  
+
   @Test
   public void tesJournalProtocol() throws IOException {
     JournalProtocolTranslatorPB translator = (JournalProtocolTranslatorPB)

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java Tue Aug 19 23:49:39 2014
@@ -243,11 +243,56 @@ public class TestLease {
       Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
       Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
       Assert.assertEquals(1, leaseCount(cluster));
+      out.close();
     } finally {
       cluster.shutdown();
     }
   }
-  
+
+  /**
+   * Test that we can open up a file for write, move it to another location,
+   * and then create a new file in the previous location, without causing any
+   * lease conflicts.  This is possible because we now use unique inode IDs
+   * to identify files to the NameNode.
+   */
+  @Test
+  public void testLeaseAfterRenameAndRecreate() throws Exception {
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    try {
+      final Path path1 = new Path("/test-file");
+      final String contents1 = "contents1";
+      final Path path2 = new Path("/test-file-new-location");
+      final String contents2 = "contents2";
+
+      // open a file to get a lease
+      FileSystem fs = cluster.getFileSystem();
+      FSDataOutputStream out1 = fs.create(path1);
+      out1.writeBytes(contents1);
+      Assert.assertTrue(hasLease(cluster, path1));
+      Assert.assertEquals(1, leaseCount(cluster));
+
+      DistributedFileSystem fs2 = (DistributedFileSystem)
+          FileSystem.newInstance(fs.getUri(), fs.getConf());
+      fs2.rename(path1, path2);
+
+      FSDataOutputStream out2 = fs2.create(path1);
+      out2.writeBytes(contents2);
+      out2.close();
+
+      // The first file should still be open and valid
+      Assert.assertTrue(hasLease(cluster, path2));
+      out1.close();
+
+      // Contents should be as expected
+      DistributedFileSystem fs3 = (DistributedFileSystem)
+          FileSystem.newInstance(fs.getUri(), fs.getConf());
+      Assert.assertEquals(contents1, DFSTestUtil.readFile(fs3, path2));
+      Assert.assertEquals(contents2, DFSTestUtil.readFile(fs3, path1));
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
   @Test
   public void testLease() throws Exception {
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Tue Aug 19 23:49:39 2014
@@ -153,6 +153,15 @@ public class TestLeaseRecovery2 {
     verifyFile(dfs, filepath1, actual, size);
   }
 
+  @Test
+  public void testLeaseRecoverByAnotherUser() throws Exception {
+    byte [] actual = new byte[FILE_SIZE];
+    cluster.setLeasePeriod(SHORT_LEASE_PERIOD, LONG_LEASE_PERIOD);
+    Path filepath = createFile("/immediateRecoverLease-x", 0, true);
+    recoverLeaseUsingCreate2(filepath);
+    verifyFile(dfs, filepath, actual, 0);
+  }
+
   private Path createFile(final String filestr, final int size,
       final boolean triggerLeaseRenewerInterrupt)
   throws IOException, InterruptedException {
@@ -196,7 +205,7 @@ public class TestLeaseRecovery2 {
   }
 
   private void recoverLeaseUsingCreate(Path filepath)
-  throws IOException, InterruptedException {
+      throws IOException, InterruptedException {
     FileSystem dfs2 = getFSAsAnotherUser(conf);
     for(int i = 0; i < 10; i++) {
       AppendTestUtil.LOG.info("i=" + i);
@@ -216,6 +225,20 @@ public class TestLeaseRecovery2 {
     fail("recoverLeaseUsingCreate failed");
   }
 
+  private void recoverLeaseUsingCreate2(Path filepath)
+          throws Exception {
+    FileSystem dfs2 = getFSAsAnotherUser(conf);
+    int size = AppendTestUtil.nextInt(FILE_SIZE);
+    DistributedFileSystem dfsx = (DistributedFileSystem) dfs2;
+    //create file using dfsx
+    Path filepath2 = new Path("/immediateRecoverLease-x2");
+    FSDataOutputStream stm = dfsx.create(filepath2, true, BUF_SIZE,
+        REPLICATION_NUM, BLOCK_SIZE);
+    assertTrue(dfsx.dfs.exists("/immediateRecoverLease-x2"));
+    try {Thread.sleep(10000);} catch (InterruptedException e) {}
+    dfsx.append(filepath);
+  }
+
   private void verifyFile(FileSystem dfs, Path filepath, byte[] actual,
       int size) throws IOException {
     AppendTestUtil.LOG.info("Lease for file " +  filepath + " is recovered. "

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java Tue Aug 19 23:49:39 2014
@@ -107,8 +107,8 @@ public class TestLeaseRenewer {
     
     // Set up a file so that we start renewing our lease.
     DFSOutputStream mockStream = Mockito.mock(DFSOutputStream.class);
-    String filePath = "/foo";
-    renewer.put(filePath, mockStream, MOCK_DFSCLIENT);
+    long fileId = 123L;
+    renewer.put(fileId, mockStream, MOCK_DFSCLIENT);
 
     // Wait for lease to get renewed
     long failTime = Time.now() + 5000;
@@ -120,7 +120,7 @@ public class TestLeaseRenewer {
       Assert.fail("Did not renew lease at all!");
     }
 
-    renewer.closeFile(filePath, MOCK_DFSCLIENT);
+    renewer.closeFile(fileId, MOCK_DFSCLIENT);
   }
   
   /**
@@ -138,8 +138,8 @@ public class TestLeaseRenewer {
     
     // Set up a file so that we start renewing our lease.
     DFSOutputStream mockStream1 = Mockito.mock(DFSOutputStream.class);
-    String filePath = "/foo";
-    renewer.put(filePath, mockStream1, mockClient1);
+    long fileId = 456L;
+    renewer.put(fileId, mockStream1, mockClient1);
 
     // Second DFSClient does renew lease
     final DFSClient mockClient2 = createMockClient();
@@ -149,7 +149,7 @@ public class TestLeaseRenewer {
 
     // Set up a file so that we start renewing our lease.
     DFSOutputStream mockStream2 = Mockito.mock(DFSOutputStream.class);
-    renewer.put(filePath, mockStream2, mockClient2);
+    renewer.put(fileId, mockStream2, mockClient2);
 
     
     // Wait for lease to get renewed
@@ -170,19 +170,19 @@ public class TestLeaseRenewer {
       }
     }, 100, 10000);
 
-    renewer.closeFile(filePath, mockClient1);
-    renewer.closeFile(filePath, mockClient2);
+    renewer.closeFile(fileId, mockClient1);
+    renewer.closeFile(fileId, mockClient2);
   }
   
   @Test
   public void testThreadName() throws Exception {
     DFSOutputStream mockStream = Mockito.mock(DFSOutputStream.class);
-    String filePath = "/foo";
+    long fileId = 789L;
     Assert.assertFalse("Renewer not initially running",
         renewer.isRunning());
     
     // Pretend to open a file
-    renewer.put(filePath, mockStream, MOCK_DFSCLIENT);
+    renewer.put(fileId, mockStream, MOCK_DFSCLIENT);
     
     Assert.assertTrue("Renewer should have started running",
         renewer.isRunning());
@@ -192,7 +192,7 @@ public class TestLeaseRenewer {
     Assert.assertEquals("LeaseRenewer:myuser@hdfs://nn1/", threadName);
     
     // Pretend to close the file
-    renewer.closeFile(filePath, MOCK_DFSCLIENT);
+    renewer.closeFile(fileId, MOCK_DFSCLIENT);
     renewer.setEmptyTime(Time.now());
     
     // Should stop the renewer running within a few seconds

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java Tue Aug 19 23:49:39 2014
@@ -17,13 +17,6 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.net.URL;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -32,8 +25,16 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.junit.Assert;
 import org.junit.Test;
 
+import javax.management.*;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 /**
  * The test makes sure that NameNode detects presense blocks that do not have
  * any valid replicas. In addition, it verifies that HDFS front page displays
@@ -45,8 +46,11 @@ public class TestMissingBlocksAlert {
                            LogFactory.getLog(TestMissingBlocksAlert.class);
   
   @Test
-  public void testMissingBlocksAlert() throws IOException, 
-                                       InterruptedException {
+  public void testMissingBlocksAlert()
+          throws IOException, InterruptedException,
+                 MalformedObjectNameException, AttributeNotFoundException,
+                 MBeanException, ReflectionException,
+                 InstanceNotFoundException {
     
     MiniDFSCluster cluster = null;
     
@@ -54,6 +58,7 @@ public class TestMissingBlocksAlert {
       Configuration conf = new HdfsConfiguration();
       //minimize test delay
       conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 0);
+      conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
       int fileLen = 10*1024;
       conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, fileLen/2);
 
@@ -94,14 +99,11 @@ public class TestMissingBlocksAlert {
       assertEquals(4, dfs.getUnderReplicatedBlocksCount());
       assertEquals(3, bm.getUnderReplicatedNotMissingBlocks());
 
-
-      // Now verify that it shows up on webui
-      URL url = new URL("http://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY) + 
-                        "/dfshealth.jsp");
-      String dfsFrontPage = DFSTestUtil.urlGet(url);
-      String warnStr = "WARNING : There are ";
-      assertTrue("HDFS Front page does not contain expected warning", 
-                 dfsFrontPage.contains(warnStr + "1 missing blocks"));
+      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+      ObjectName mxbeanName = new ObjectName(
+              "Hadoop:service=NameNode,name=NameNodeInfo");
+      Assert.assertEquals(1, (long)(Long) mbs.getAttribute(mxbeanName,
+                      "NumberOfMissingBlocks"));
 
       // now do the reverse : remove the file expect the number of missing 
       // blocks to go to zero
@@ -116,11 +118,8 @@ public class TestMissingBlocksAlert {
       assertEquals(2, dfs.getUnderReplicatedBlocksCount());
       assertEquals(2, bm.getUnderReplicatedNotMissingBlocks());
 
-      // and make sure WARNING disappears
-      // Now verify that it shows up on webui
-      dfsFrontPage = DFSTestUtil.urlGet(url);
-      assertFalse("HDFS Front page contains unexpected warning", 
-                  dfsFrontPage.contains(warnStr));
+      Assert.assertEquals(0, (long)(Long) mbs.getAttribute(mxbeanName,
+              "NumberOfMissingBlocks"));
     } finally {
       if (cluster != null) {
         cluster.shutdown();

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java Tue Aug 19 23:49:39 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
@@ -177,12 +178,13 @@ public class TestPersistBlocks {
       
       // Abandon the last block
       DFSClient dfsclient = DFSClientAdapter.getDFSClient((DistributedFileSystem)fs);
+      HdfsFileStatus fileStatus = dfsclient.getNamenode().getFileInfo(FILE_NAME);
       LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(
           FILE_NAME, 0, BLOCK_SIZE * NUM_BLOCKS);
       assertEquals(NUM_BLOCKS, blocks.getLocatedBlocks().size());
       LocatedBlock b = blocks.getLastLocatedBlock();
-      dfsclient.getNamenode().abandonBlock(b.getBlock(), FILE_NAME,
-          dfsclient.clientName);
+      dfsclient.getNamenode().abandonBlock(b.getBlock(), fileStatus.getFileId(),
+          FILE_NAME, dfsclient.clientName);
       
       // explicitly do NOT close the file.
       cluster.restartNameNode();

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java Tue Aug 19 23:49:39 2014
@@ -31,12 +31,16 @@ import java.util.concurrent.Future;
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.log4j.Level;
+import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
@@ -49,7 +53,14 @@ import org.mockito.stubbing.Answer;
 public class TestPread {
   static final long seed = 0xDEADBEEFL;
   static final int blockSize = 4096;
-  boolean simulatedStorage = false;
+  boolean simulatedStorage;
+  boolean isHedgedRead;
+
+  @Before
+  public void setup() {
+    simulatedStorage = false;
+    isHedgedRead = false;
+  }
 
   private void writeFile(FileSystem fileSys, Path name) throws IOException {
     int replication = 3;// We need > 1 blocks to test out the hedged reads.
@@ -73,7 +84,7 @@ public class TestPread {
     
     // now create the real file
     DFSTestUtil.createFile(fileSys, name, 12 * blockSize, 12 * blockSize,
-        blockSize, (short) 1, seed);
+        blockSize, (short) replication, seed);
   }
   
   private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
@@ -88,11 +99,30 @@ public class TestPread {
   private void doPread(FSDataInputStream stm, long position, byte[] buffer,
                        int offset, int length) throws IOException {
     int nread = 0;
+    long totalRead = 0;
+    DFSInputStream dfstm = null;
+
+    if (stm.getWrappedStream() instanceof DFSInputStream) {
+      dfstm = (DFSInputStream) (stm.getWrappedStream());
+      totalRead = dfstm.getReadStatistics().getTotalBytesRead();
+    }
+
     while (nread < length) {
-      int nbytes = stm.read(position+nread, buffer, offset+nread, length-nread);
+      int nbytes =
+          stm.read(position + nread, buffer, offset + nread, length - nread);
       assertTrue("Error in pread", nbytes > 0);
       nread += nbytes;
     }
+
+    if (dfstm != null) {
+      if (isHedgedRead) {
+        assertTrue("Expected read statistic to be incremented", length <= dfstm
+            .getReadStatistics().getTotalBytesRead() - totalRead);
+      } else {
+        assertEquals("Expected read statistic to be incremented", length, dfstm
+            .getReadStatistics().getTotalBytesRead() - totalRead);
+      }
+    }
   }
   
   private void pReadFile(FileSystem fileSys, Path name) throws IOException {
@@ -194,7 +224,7 @@ public class TestPread {
     stm.readFully(0, actual);
     checkAndEraseData(actual, 0, expected, "Pread Datanode Restart Test");
   }
-  
+
   private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
     assertTrue(fileSys.exists(name));
     assertTrue(fileSys.delete(name, true));
@@ -235,6 +265,7 @@ public class TestPread {
    */
   @Test
   public void testHedgedPreadDFSBasic() throws IOException {
+    isHedgedRead = true;
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE, 5);
     conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS, 1);
@@ -244,8 +275,83 @@ public class TestPread {
   }
 
   @Test
+  public void testHedgedReadLoopTooManyTimes() throws IOException {
+    Configuration conf = new Configuration();
+    int numHedgedReadPoolThreads = 5;
+    final int hedgedReadTimeoutMillis = 50;
+
+    conf.setInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,
+        numHedgedReadPoolThreads);
+    conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,
+        hedgedReadTimeoutMillis);
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
+    // Set up the InjectionHandler
+    DFSClientFaultInjector.instance = Mockito
+        .mock(DFSClientFaultInjector.class);
+    DFSClientFaultInjector injector = DFSClientFaultInjector.instance;
+    final int sleepMs = 100;
+    Mockito.doAnswer(new Answer<Void>() {
+      @Override
+      public Void answer(InvocationOnMock invocation) throws Throwable {
+        if (true) {
+          Thread.sleep(hedgedReadTimeoutMillis + sleepMs);
+          if (DFSClientFaultInjector.exceptionNum.compareAndSet(0, 1)) {
+            System.out.println("-------------- throw Checksum Exception");
+            throw new ChecksumException("ChecksumException test", 100);
+          }
+        }
+        return null;
+      }
+    }).when(injector).fetchFromDatanodeException();
+    Mockito.doAnswer(new Answer<Void>() {
+      @Override
+      public Void answer(InvocationOnMock invocation) throws Throwable {
+        if (true) {
+          Thread.sleep(sleepMs * 2);
+        }
+        return null;
+      }
+    }).when(injector).readFromDatanodeDelay();
+
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
+        .format(true).build();
+    DistributedFileSystem fileSys = cluster.getFileSystem();
+    DFSClient dfsClient = fileSys.getClient();
+    FSDataOutputStream output = null;
+    DFSInputStream input = null;
+    String filename = "/hedgedReadMaxOut.dat";
+    try {
+      
+      Path file = new Path(filename);
+      output = fileSys.create(file, (short) 2);
+      byte[] data = new byte[64 * 1024];
+      output.write(data);
+      output.flush();
+      output.write(data);
+      output.flush();
+      output.write(data);
+      output.flush();
+      output.close();
+      byte[] buffer = new byte[64 * 1024];
+      input = dfsClient.open(filename);
+      input.read(0, buffer, 0, 1024);
+      input.close();
+      assertEquals(3, input.getHedgedReadOpsLoopNumForTesting());
+    } catch (BlockMissingException e) {
+      assertTrue(false);
+    } finally {
+      Mockito.reset(injector);
+      IOUtils.cleanup(null, input);
+      IOUtils.cleanup(null, output);
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+
+  @Test
   public void testMaxOutHedgedReadPool() throws IOException,
       InterruptedException, ExecutionException {
+    isHedgedRead = true;
     Configuration conf = new Configuration();
     int numHedgedReadPoolThreads = 5;
     final int initialHedgedReadTimeoutMillis = 50000;
@@ -328,6 +434,8 @@ public class TestPread {
       throws IOException {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
     conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
     if (simulatedStorage) {
       SimulatedFSDataset.setFactory(conf);
     }
@@ -353,7 +461,6 @@ public class TestPread {
   public void testPreadDFSSimulated() throws IOException {
     simulatedStorage = true;
     testPreadDFS();
-    simulatedStorage = false;
   }
   
   /**

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java Tue Aug 19 23:49:39 2014
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
-import junit.framework.Assert;
+import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java Tue Aug 19 23:49:39 2014
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.spy;
 
 import java.io.IOException;
 
@@ -26,11 +28,13 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.log4j.Level;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 public class TestRenameWhileOpen {
   {
@@ -57,6 +61,7 @@ public class TestRenameWhileOpen {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
+    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, TestFileCreation.blockSize);
 
     // create cluster
     System.out.println("Test 1*****************************");
@@ -65,6 +70,15 @@ public class TestRenameWhileOpen {
     try {
       cluster.waitActive();
       fs = cluster.getFileSystem();
+
+      // Normally, the in-progress edit log would be finalized by
+      // FSEditLog#endCurrentLogSegment.  For testing purposes, we
+      // disable that here.
+      FSEditLog spyLog =
+          spy(cluster.getNameNode().getFSImage().getEditLog());
+      doNothing().when(spyLog).endCurrentLogSegment(Mockito.anyBoolean());
+      cluster.getNameNode().getFSImage().setEditLogForTesting(spyLog);
+
       final int nnport = cluster.getNameNodePort();
 
       // create file1.
@@ -92,18 +106,21 @@ public class TestRenameWhileOpen {
 
       // create file3
       Path file3 = new Path(dir3, "file3");
-      FSDataOutputStream stm3 = TestFileCreation.createFile(fs, file3, 1);
-      TestFileCreation.writeFile(stm3);
-      // rename file3 to some bad name
-      try {
-        fs.rename(file3, new Path(dir3, "$ "));
-      } catch(Exception e) {
-        e.printStackTrace();
-      }
-      
-      // restart cluster with the same namenode port as before.
-      // This ensures that leases are persisted in fsimage.
+      FSDataOutputStream stm3 = fs.create(file3);
+      fs.rename(file3, new Path(dir3, "bozo"));
+      // Get a new block for the file.
+      TestFileCreation.writeFile(stm3, TestFileCreation.blockSize + 1);
+      stm3.hflush();
+
+      // Stop the NameNode before closing the files.
+      // This will ensure that the write leases are still active and present
+      // in the edit log.  Simiarly, there should be a pending ADD_BLOCK_OP
+      // for file3, since we just added a block to that file.
+      cluster.getNameNode().stop();
+
+      // Restart cluster with the same namenode port as before.
       cluster.shutdown();
+
       try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
       cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
                                                 .format(false)
@@ -111,7 +128,7 @@ public class TestRenameWhileOpen {
       cluster.waitActive();
 
       // restart cluster yet again. This triggers the code to read in
-      // persistent leases from fsimage.
+      // persistent leases from the edit log.
       cluster.shutdown();
       try {Thread.sleep(5000);} catch (InterruptedException e) {}
       cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java Tue Aug 19 23:49:39 2014
@@ -25,13 +25,16 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
+import java.util.ArrayList;
 import java.util.Iterator;
+import java.util.List;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -453,4 +456,66 @@ public class TestReplication {
     }
     fs.delete(fileName, true);
   }
+
+  /**
+   * Test that blocks should get replicated if we have corrupted blocks and
+   * having good replicas at least equal or greater to minreplication
+   *
+   * Simulate rbw blocks by creating dummy copies, then a DN restart to detect
+   * those corrupted blocks asap.
+   */
+  @Test(timeout=30000)
+  public void testReplicationWhenBlockCorruption() throws Exception {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new HdfsConfiguration();
+      conf.setLong(
+          DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+      FileSystem fs = cluster.getFileSystem();
+      FSDataOutputStream create = fs.create(new Path("/test"));
+      fs.setReplication(new Path("/test"), (short) 1);
+      create.write(new byte[1024]);
+      create.close();
+
+      List<File> nonParticipatedNodeDirs = new ArrayList<File>();
+      File participatedNodeDirs = null;
+      for (int i = 0; i < cluster.getDataNodes().size(); i++) {
+        File storageDir = cluster.getInstanceStorageDir(i, 0);
+        String bpid = cluster.getNamesystem().getBlockPoolId();
+        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+        if (data_dir.listFiles().length == 0) {
+          nonParticipatedNodeDirs.add(data_dir);
+        } else {
+          participatedNodeDirs = data_dir;
+        }
+      }
+
+      String blockFile = null;
+      File[] listFiles = participatedNodeDirs.listFiles();
+      for (File file : listFiles) {
+        if (file.getName().startsWith("blk_")
+            && !file.getName().endsWith("meta")) {
+          blockFile = file.getName();
+          for (File file1 : nonParticipatedNodeDirs) {
+            file1.mkdirs();
+            new File(file1, blockFile).createNewFile();
+            new File(file1, blockFile + "_1000.meta").createNewFile();
+          }
+          break;
+        }
+      }
+
+      fs.setReplication(new Path("/test"), (short) 3);
+      cluster.restartDataNodes(); // Lets detect all DNs about dummy copied
+      // blocks
+      cluster.waitActive();
+      cluster.triggerBlockReports();
+      DFSTestUtil.waitReplication(fs, new Path("/test"), (short) 3);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java Tue Aug 19 23:49:39 2014
@@ -390,6 +390,10 @@ public class TestRollingUpgrade {
 
       // Once finalized, there should be no more fsimage for rollbacks.
       Assert.assertFalse(fsimage.hasRollbackFSImage());
+
+      // Should have no problem in restart and replaying edits that include
+      // the FINALIZE op.
+      dfsCluster.restartNameNode(0);
     } finally {
       if (cluster != null) {
         cluster.shutdown();

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java Tue Aug 19 23:49:39 2014
@@ -26,6 +26,7 @@ import static org.junit.Assert.assertTru
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -36,6 +37,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
@@ -47,6 +49,8 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Before;
@@ -73,6 +77,7 @@ public class TestSafeMode {
     conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();      
     fs = cluster.getFileSystem();
@@ -296,7 +301,8 @@ public class TestSafeMode {
    * assert that they are either allowed or fail as expected.
    */
   @Test
-  public void testOperationsWhileInSafeMode() throws IOException {
+  public void testOperationsWhileInSafeMode() throws IOException,
+      InterruptedException {
     final Path file1 = new Path("/file1");
 
     assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
@@ -381,7 +387,19 @@ public class TestSafeMode {
       public void run(FileSystem fs) throws IOException {
         fs.setAcl(file1, Lists.<AclEntry>newArrayList());
       }});
-
+    
+    runFsFun("setXAttr while in SM", new FSRun() {
+      @Override
+      public void run(FileSystem fs) throws IOException {
+        fs.setXAttr(file1, "user.a1", null);
+      }});
+    
+    runFsFun("removeXAttr while in SM", new FSRun() {
+      @Override
+      public void run(FileSystem fs) throws IOException {
+        fs.removeXAttr(file1, "user.a1");
+      }});
+    
     try {
       DFSTestUtil.readFile(fs, file1);
     } catch (IOException ioe) {
@@ -394,6 +412,22 @@ public class TestSafeMode {
       fail("getAclStatus failed while in SM");
     }
 
+    // Test access
+    UserGroupInformation ugiX = UserGroupInformation.createRemoteUser("userX");
+    FileSystem myfs = ugiX.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws IOException {
+        return FileSystem.get(conf);
+      }
+    });
+    myfs.access(file1, FsAction.READ);
+    try {
+      myfs.access(file1, FsAction.WRITE);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+
     assertFalse("Could not leave SM",
         dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
   }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java Tue Aug 19 23:49:39 2014
@@ -22,7 +22,7 @@ import java.io.IOException;
 import org.junit.Test;
 
 public class TestSetrepDecreasing {
-  @Test
+  @Test(timeout=120000)
   public void testSetrepDecreasing() throws IOException {
     TestSetrepIncreasing.setrep(5, 3, false);
   }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java Tue Aug 19 23:49:39 2014
@@ -75,11 +75,11 @@ public class TestSetrepIncreasing {
     }
   }
 
-  @Test
+  @Test(timeout=120000)
   public void testSetrepIncreasing() throws IOException {
     setrep(3, 7, false);
   }
-  @Test
+  @Test(timeout=120000)
   public void testSetrepIncreasingSimulatedStorage() throws IOException {
     setrep(3, 7, true);
   }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java Tue Aug 19 23:49:39 2014
@@ -186,6 +186,17 @@ public class TestSnapshotCommands {
     FsShellRun("-ls /sub1/.snapshot", 0, "/sub1/.snapshot/sn.rename");
     FsShellRun("-ls /sub1/.snapshot/sn.rename", 0, "/sub1/.snapshot/sn.rename/sub1sub1");
     FsShellRun("-ls /sub1/.snapshot/sn.rename", 0, "/sub1/.snapshot/sn.rename/sub1sub2");
+
+    //try renaming from a non-existing snapshot
+    FsShellRun("-renameSnapshot /sub1 sn.nonexist sn.rename", 1,
+        "renameSnapshot: The snapshot sn.nonexist does not exist for directory /sub1");
+
+    //try renaming to existing snapshots
+    FsShellRun("-createSnapshot /sub1 sn.new");
+    FsShellRun("-renameSnapshot /sub1 sn.new sn.rename", 1,
+        "renameSnapshot: The snapshot sn.rename already exists for directory /sub1");
+    FsShellRun("-renameSnapshot /sub1 sn.rename sn.new", 1,
+        "renameSnapshot: The snapshot sn.new already exists for directory /sub1");
   }
 
   @Test