You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2019/04/29 23:23:34 UTC

[hadoop] branch branch-3.2 updated: HDFS-13677. Dynamic refresh Disk configuration results in overwriting VolumeMap. Contributed by xuzq.

This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
     new db4c0b3  HDFS-13677. Dynamic refresh Disk configuration results in overwriting VolumeMap. Contributed by xuzq.
db4c0b3 is described below

commit db4c0b357d573a1507d6a3da632bb5a11187d414
Author: Arpit Agarwal <ar...@apache.org>
AuthorDate: Mon Apr 29 14:49:35 2019 -0700

    HDFS-13677. Dynamic refresh Disk configuration results in overwriting VolumeMap. Contributed by xuzq.
    
    (cherry picked from commit 4b4200f1f87ad40d9c19ba160f706ffd0470a8d4)
---
 .../datanode/fsdataset/impl/FsDatasetImpl.java     |  2 +-
 .../server/datanode/fsdataset/impl/ReplicaMap.java | 14 +++++
 .../datanode/TestDataNodeHotSwapVolumes.java       | 68 ++++++++++++++++++++++
 .../datanode/fsdataset/impl/TestReplicaMap.java    | 22 +++++++
 4 files changed, 105 insertions(+), 1 deletion(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index ad43b45..29ea1de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -419,7 +419,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
         LOG.error(errorMsg);
         throw new IOException(errorMsg);
       }
-      volumeMap.addAll(replicaMap);
+      volumeMap.mergeAll(replicaMap);
       storageMap.put(sd.getStorageUuid(),
           new DatanodeStorage(sd.getStorageUuid(),
               DatanodeStorage.State.NORMAL,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
index 73d3c60..786078f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
@@ -166,6 +166,20 @@ class ReplicaMap {
   void addAll(ReplicaMap other) {
     map.putAll(other.map);
   }
+
+
+  /**
+   * Merge all entries from the given replica map into the local replica map.
+   */
+  void mergeAll(ReplicaMap other) {
+    other.map.forEach(
+        (bp, replicaInfos) -> {
+          replicaInfos.forEach(
+              replicaInfo -> add(bp, replicaInfo)
+          );
+        }
+    );
+  }
   
   /**
    * Remove the replica's meta information from the map that matches
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index c19c849..609e16c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -420,6 +420,74 @@ public class TestDataNodeHotSwapVolumes {
     verifyFileLength(cluster.getFileSystem(), testFile, numBlocks);
   }
 
+  /**
+   * Test re-adding one volume with some blocks on a running MiniDFSCluster
+   * with only one NameNode to reproduce HDFS-13677.
+   */
+  @Test(timeout=60000)
+  public void testReAddVolumeWithBlocks()
+      throws IOException, ReconfigurationException,
+      InterruptedException, TimeoutException {
+    startDFSCluster(1, 1);
+    String bpid = cluster.getNamesystem().getBlockPoolId();
+    final int numBlocks = 10;
+
+    Path testFile = new Path("/test");
+    createFile(testFile, numBlocks);
+
+    List<Map<DatanodeStorage, BlockListAsLongs>> blockReports =
+        cluster.getAllBlockReports(bpid);
+    assertEquals(1, blockReports.size());  // 1 DataNode
+    assertEquals(2, blockReports.get(0).size());  // 2 volumes
+
+    // Now remove the second volume
+    DataNode dn = cluster.getDataNodes().get(0);
+    Collection<String> oldDirs = getDataDirs(dn);
+    String newDirs = oldDirs.iterator().next();  // Keep the first volume.
+    assertThat(
+        "DN did not update its own config",
+        dn.reconfigurePropertyImpl(
+            DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs),
+        is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
+    assertFileLocksReleased(
+        new ArrayList<String>(oldDirs).subList(1, oldDirs.size()));
+
+    // Now create another file - the first volume should have 15 blocks
+    // and 5 blocks on the previously removed volume
+    createFile(new Path("/test2"), numBlocks);
+    dn.scheduleAllBlockReport(0);
+    blockReports = cluster.getAllBlockReports(bpid);
+
+    assertEquals(1, blockReports.size());  // 1 DataNode
+    assertEquals(1, blockReports.get(0).size());  // 1 volume
+    for (BlockListAsLongs blockList : blockReports.get(0).values()) {
+      assertEquals(15, blockList.getNumberOfBlocks());
+    }
+
+    // Now add the original volume back again and ensure 15 blocks are reported
+    assertThat(
+        "DN did not update its own config",
+        dn.reconfigurePropertyImpl(
+            DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, String.join(",", oldDirs)),
+        is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
+    dn.scheduleAllBlockReport(0);
+    blockReports = cluster.getAllBlockReports(bpid);
+
+    assertEquals(1, blockReports.size());  // 1 DataNode
+    assertEquals(2, blockReports.get(0).size());  // 2 volumes
+
+    // The order of the block reports is not guaranteed. As we expect 2, get the
+    // max block count and the min block count and then assert on that.
+    int minNumBlocks = Integer.MAX_VALUE;
+    int maxNumBlocks = Integer.MIN_VALUE;
+    for (BlockListAsLongs blockList : blockReports.get(0).values()) {
+      minNumBlocks = Math.min(minNumBlocks, blockList.getNumberOfBlocks());
+      maxNumBlocks = Math.max(maxNumBlocks, blockList.getNumberOfBlocks());
+    }
+    assertEquals(5, minNumBlocks);
+    assertEquals(15, maxNumBlocks);
+  }
+
   @Test(timeout=60000)
   public void testAddVolumesDuringWrite()
       throws IOException, InterruptedException, TimeoutException,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java
index 4fa91b0..1059c08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java
@@ -108,4 +108,26 @@ public class TestReplicaMap {
     map.add(bpid, new FinalizedReplica(block, null, null));
     assertNotNull(map.remove(bpid, block.getBlockId()));
   }
+
+  @Test
+  public void testMergeAll() {
+    ReplicaMap temReplicaMap = new ReplicaMap(new AutoCloseableLock());
+    Block tmpBlock = new Block(5678, 5678, 5678);
+    temReplicaMap.add(bpid, new FinalizedReplica(tmpBlock, null, null));
+
+    map.mergeAll(temReplicaMap);
+    assertNotNull(map.get(bpid, 1234));
+    assertNotNull(map.get(bpid, 5678));
+  }
+
+  @Test
+  public void testAddAll() {
+    ReplicaMap temReplicaMap = new ReplicaMap(new AutoCloseableLock());
+    Block tmpBlock = new Block(5678, 5678, 5678);
+    temReplicaMap.add(bpid, new FinalizedReplica(tmpBlock, null, null));
+
+    map.addAll(temReplicaMap);
+    assertNull(map.get(bpid, 1234));
+    assertNotNull(map.get(bpid, 5678));
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org