You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by in...@apache.org on 2018/05/18 16:41:38 UTC

[4/5] hadoop git commit: HDFS-13592. TestNameNodePrunesMissingStorages#testNameNodePrunesUnreportedStorages does not shut down cluster properly. Contributed by Anbang Hu.

HDFS-13592. TestNameNodePrunesMissingStorages#testNameNodePrunesUnreportedStorages does not shut down cluster properly. Contributed by Anbang Hu.

(cherry picked from commit 57b893de3d36d20f65ee81b5cc3cfef12594b75b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0e0ec16
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0e0ec16
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0e0ec16

Branch: refs/heads/branch-2
Commit: b0e0ec164879cf8218c5761e694130c3dd2df29b
Parents: c4d4ae6
Author: Inigo Goiri <in...@apache.org>
Authored: Fri May 18 09:36:58 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri May 18 09:40:10 2018 -0700

----------------------------------------------------------------------
 .../TestNameNodePrunesMissingStorages.java      | 97 +++++++++++---------
 1 file changed, 53 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0e0ec16/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
index f234bcc..68a0d6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
@@ -376,51 +376,60 @@ public class TestNameNodePrunesMissingStorages {
         .Builder(conf).numDataNodes(1)
         .storagesPerDatanode(2)
         .build();
-    // Create two files to ensure each storage has a block
-    DFSTestUtil.createFile(cluster.getFileSystem(), new Path("file1"),
-        102400, 102400, 102400, (short)1,
-        0x1BAD5EE);
-    DFSTestUtil.createFile(cluster.getFileSystem(), new Path("file2"),
-        102400, 102400, 102400, (short)1,
-        0x1BAD5EED);
-    // Get the datanode storages and data directories
-    DataNode dn = cluster.getDataNodes().get(0);
-    BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
-    DatanodeDescriptor dnDescriptor = bm.getDatanodeManager().
-        getDatanode(cluster.getDataNodes().get(0).getDatanodeUuid());
-    DatanodeStorageInfo[] dnStoragesInfosBeforeRestart =
-        dnDescriptor.getStorageInfos();
-    Collection<String> oldDirs =  new ArrayList<String>(dn.getConf().
-        getTrimmedStringCollection(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
-    // Keep the first data directory and remove the second.
-    String newDirs = oldDirs.iterator().next();
-    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
-    // Restart the datanode with the new conf
-    cluster.stopDataNode(0);
-    cluster.startDataNodes(conf, 1, false, null, null);
-    dn = cluster.getDataNodes().get(0);
-    cluster.waitActive();
-    // Assert that the dnDescriptor has both the storages after restart
-    assertArrayEquals(dnStoragesInfosBeforeRestart,
-        dnDescriptor.getStorageInfos());
-    // Assert that the removed storage is marked as FAILED
-    // when DN heartbeats to the NN
-    int numFailedStoragesWithBlocks = 0;
-    DatanodeStorageInfo failedStorageInfo = null;
-    for (DatanodeStorageInfo dnStorageInfo: dnDescriptor.getStorageInfos()) {
-      if (dnStorageInfo.areBlocksOnFailedStorage()) {
-        numFailedStoragesWithBlocks++;
-        failedStorageInfo = dnStorageInfo;
+    try {
+      cluster.waitActive();
+      // Create two files to ensure each storage has a block
+      DFSTestUtil.createFile(cluster.getFileSystem(), new Path("file1"),
+          102400, 102400, 102400, (short)1,
+          0x1BAD5EE);
+      DFSTestUtil.createFile(cluster.getFileSystem(), new Path("file2"),
+          102400, 102400, 102400, (short)1,
+          0x1BAD5EED);
+      // Get the datanode storages and data directories
+      DataNode dn = cluster.getDataNodes().get(0);
+      BlockManager bm =
+          cluster.getNameNode().getNamesystem().getBlockManager();
+      DatanodeDescriptor dnDescriptor = bm.getDatanodeManager().
+          getDatanode(cluster.getDataNodes().get(0).getDatanodeUuid());
+      DatanodeStorageInfo[] dnStoragesInfosBeforeRestart =
+          dnDescriptor.getStorageInfos();
+      Collection<String> oldDirs =  new ArrayList<String>(dn.getConf().
+          getTrimmedStringCollection(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
+      // Keep the first data directory and remove the second.
+      String newDirs = oldDirs.iterator().next();
+      conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
+      // Restart the datanode with the new conf
+      cluster.stopDataNode(0);
+      cluster.startDataNodes(conf, 1, false, null, null);
+      dn = cluster.getDataNodes().get(0);
+      cluster.waitActive();
+      // Assert that the dnDescriptor has both the storages after restart
+      assertArrayEquals(dnStoragesInfosBeforeRestart,
+          dnDescriptor.getStorageInfos());
+      // Assert that the removed storage is marked as FAILED
+      // when DN heartbeats to the NN
+      int numFailedStoragesWithBlocks = 0;
+      DatanodeStorageInfo failedStorageInfo = null;
+      for (DatanodeStorageInfo dnStorageInfo: dnDescriptor.getStorageInfos()) {
+        if (dnStorageInfo.areBlocksOnFailedStorage()) {
+          numFailedStoragesWithBlocks++;
+          failedStorageInfo = dnStorageInfo;
+        }
+      }
+      assertEquals(1, numFailedStoragesWithBlocks);
+      // Heartbeat manager removes the blocks associated with this failed
+      // storage
+      bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
+      assertTrue(!failedStorageInfo.areBlocksOnFailedStorage());
+      // pruneStorageMap removes the unreported storage
+      cluster.triggerHeartbeats();
+      // Assert that the unreported storage is pruned
+      assertEquals(DataNode.getStorageLocations(dn.getConf()).size(),
+          dnDescriptor.getStorageInfos().length);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
       }
     }
-    assertEquals(1, numFailedStoragesWithBlocks);
-    // Heartbeat manager removes the blocks associated with this failed storage
-    bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
-    assertTrue(!failedStorageInfo.areBlocksOnFailedStorage());
-    // pruneStorageMap removes the unreported storage
-    cluster.triggerHeartbeats();
-    // Assert that the unreported storage is pruned
-    assertEquals(DataNode.getStorageLocations(dn.getConf()).size(),
-        dnDescriptor.getStorageInfos().length);
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org