You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xg...@apache.org on 2017/06/05 20:31:10 UTC

[21/50] [abbrv] hadoop git commit: HDFS-11659. TestDataNodeHotSwapVolumes.testRemoveVolumeBeingWritten fail due to no DataNode available for pipeline recovery. Contributed by Lei (Eddy) Xu.

HDFS-11659. TestDataNodeHotSwapVolumes.testRemoveVolumeBeingWritten fail due to no DataNode available for pipeline recovery. Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce179a61
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce179a61
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce179a61

Branch: refs/heads/YARN-5734
Commit: ce179a6104aaee17de57bcde3ad094456d1d68d6
Parents: 4065988
Author: Lei Xu <le...@apache.org>
Authored: Tue May 30 11:09:03 2017 -0700
Committer: Xuan <xg...@apache.org>
Committed: Mon Jun 5 13:29:35 2017 -0700

----------------------------------------------------------------------
 .../datanode/TestDataNodeHotSwapVolumes.java    | 34 ++++++++++++++++++--
 1 file changed, 32 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce179a61/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 5aec174..b308ca9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -97,6 +97,7 @@ public class TestDataNodeHotSwapVolumes {
   private static final int BLOCK_SIZE = 512;
   private static final int DEFAULT_STORAGES_PER_DATANODE = 2;
   private MiniDFSCluster cluster;
+  private Configuration conf;
 
   @After
   public void tearDown() {
@@ -111,7 +112,7 @@ public class TestDataNodeHotSwapVolumes {
   private void startDFSCluster(int numNameNodes, int numDataNodes,
       int storagePerDataNode) throws IOException {
     shutdown();
-    Configuration conf = new Configuration();
+    conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
 
     /*
@@ -756,7 +757,7 @@ public class TestDataNodeHotSwapVolumes {
     }
   }
 
-  @Test(timeout=180000)
+  @Test(timeout=600000)
   public void testRemoveVolumeBeingWritten()
       throws InterruptedException, TimeoutException, ReconfigurationException,
       IOException, BrokenBarrierException {
@@ -848,6 +849,9 @@ public class TestDataNodeHotSwapVolumes {
           1, fsVolumeReferences.size());
     }
 
+    // Add a new DataNode to help with the pipeline recover.
+    cluster.startDataNodes(conf, 1, true, null, null, null);
+
     // Verify the file has sufficient replications.
     DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
     // Read the content back
@@ -857,6 +861,32 @@ public class TestDataNodeHotSwapVolumes {
     if (!exceptions.isEmpty()) {
       throw new IOException(exceptions.get(0).getCause());
     }
+
+    // Write more files to make sure that the DataNode that has removed volume
+    // is still alive to receive data.
+    for (int i = 0; i < 10; i++) {
+      final Path file = new Path("/after-" + i);
+      try (FSDataOutputStream fout = fs.create(file, REPLICATION)) {
+        rb.nextBytes(writeBuf);
+        fout.write(writeBuf);
+      }
+    }
+
+    try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi
+        .getFsVolumeReferences()) {
+      assertEquals("Volume remove wasn't successful.",
+          1, fsVolumeReferences.size());
+      FsVolumeSpi volume = fsVolumeReferences.get(0);
+      String bpid = cluster.getNamesystem().getBlockPoolId();
+      FsVolumeSpi.BlockIterator blkIter = volume.newBlockIterator(bpid, "test");
+      int blockCount = 0;
+      while (!blkIter.atEnd()) {
+        blkIter.nextBlock();
+        blockCount++;
+      }
+      assertTrue(String.format("DataNode(%d) should have more than 1 blocks",
+          dataNodeIdx), blockCount > 1);
+    }
   }
 
   @Test(timeout=60000)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org