You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vi...@apache.org on 2015/09/02 00:32:28 UTC

hadoop git commit: HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via Colin P. McCabe)

Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 9005b141a -> 5dbb0325d


HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via Colin P. McCabe)

(cherry picked from commit a17584936cc5141e3f5612ac3ecf35e27968e439)
(cherry picked from commit 7779f38e68ca4e0f7ac08eb7e5f4801b89979d02)

Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java

(cherry picked from commit 65ae3e2ff16ce1114a0115ff916837b0173b77f1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5dbb0325
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5dbb0325
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5dbb0325

Branch: refs/heads/branch-2.6.1
Commit: 5dbb0325df4f95e5f2ab48fc8c627d1b6807eb42
Parents: 9005b14
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Tue Jan 20 20:11:09 2015 -0800
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Tue Sep 1 15:29:59 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 16 +++++----
 .../datanode/fsdataset/impl/FsVolumeList.java   |  8 +++--
 .../fsdataset/impl/TestFsDatasetImpl.java       | 37 ++++++++++++++++++--
 4 files changed, 52 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5dbb0325/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d7ff237..9796321 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -88,6 +88,9 @@ Release 2.6.1 - UNRELEASED
     HDFS-7885. Datanode should not trust the generation stamp provided by
     client. (Tsz Wo Nicholas Sze via jing9)
 
+    HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via
+    Colin P. McCabe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5dbb0325/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 0c2337e..cbcf6b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -336,7 +336,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
     StorageType storageType = location.getStorageType();
     final FsVolumeImpl fsVolume = new FsVolumeImpl(
-        this, sd.getStorageUuid(), dir, this.conf, storageType);
+        this, sd.getStorageUuid(), sd.getCurrentDir(), this.conf, storageType);
     final ReplicaMap tempVolumeMap = new ReplicaMap(fsVolume);
     ArrayList<IOException> exceptions = Lists.newArrayList();
 
@@ -379,19 +379,19 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    */
   @Override
   public synchronized void removeVolumes(Collection<StorageLocation> volumes) {
-    Set<File> volumeSet = new HashSet<File>();
+    Set<String> volumeSet = new HashSet<String>();
     for (StorageLocation sl : volumes) {
-      volumeSet.add(sl.getFile());
+      volumeSet.add(sl.getFile().getAbsolutePath());
     }
     for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
       Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
-      if (volumeSet.contains(sd.getRoot())) {
-        String volume = sd.getRoot().toString();
+      String volume = sd.getRoot().getAbsolutePath();
+      if (volumeSet.contains(volume)) {
         LOG.info("Removing " + volume + " from FsDataset.");
 
         // Disable the volume from the service.
         asyncDiskService.removeVolume(sd.getCurrentDir());
-        this.volumes.removeVolume(volume);
+        this.volumes.removeVolume(sd.getRoot());
 
         // Removed all replica information for the blocks on the volume. Unlike
         // updating the volumeMap in addVolume(), this operation does not scan
@@ -401,7 +401,9 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
           for (Iterator<ReplicaInfo> it = volumeMap.replicas(bpid).iterator();
               it.hasNext(); ) {
             ReplicaInfo block = it.next();
-            if (block.getVolume().getBasePath().equals(volume)) {
+            String absBasePath =
+                  new File(block.getVolume().getBasePath()).getAbsolutePath();
+            if (absBasePath.equals(volume)) {
               invalidate(bpid, block);
               blocks.add(block);
               it.remove();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5dbb0325/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index 9483444..b17b90b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
+import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -278,13 +279,16 @@ class FsVolumeList {
    * Dynamically remove volume in the list.
    * @param volume the volume to be removed.
    */
-  void removeVolume(String volume) {
+  void removeVolume(File volume) {
     // Make a copy of volumes to remove one volume.
     final FsVolumeImpl[] curVolumes = volumes.get();
     final List<FsVolumeImpl> volumeList = Lists.newArrayList(curVolumes);
     for (Iterator<FsVolumeImpl> it = volumeList.iterator(); it.hasNext(); ) {
       FsVolumeImpl fsVolume = it.next();
-      if (fsVolume.getBasePath().equals(volume)) {
+      String basePath, targetPath;
+      basePath = new File(fsVolume.getBasePath()).getAbsolutePath();
+      targetPath = volume.getAbsolutePath();
+      if (basePath.equals(targetPath)) {
         // Make sure the removed volume is the one in the curVolumes.
         removeVolume(fsVolume);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5dbb0325/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 71b9748..09934c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -47,6 +47,7 @@ import org.mockito.stubbing.Answer;
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
@@ -73,6 +74,7 @@ public class TestFsDatasetImpl {
   private static final String BASE_DIR =
       new FileSystemTestHelper().getTestRootDir();
   private static final int NUM_INIT_VOLUMES = 2;
+  private static final String CLUSTER_ID = "cluser-id";
   private static final String[] BLOCK_POOL_IDS = {"bpid-0", "bpid-1"};
 
   // Use to generate storageUuid
@@ -139,10 +141,11 @@ public class TestFsDatasetImpl {
     Set<String> expectedVolumes = new HashSet<String>();
     List<NamespaceInfo> nsInfos = Lists.newArrayList();
     for (String bpid : BLOCK_POOL_IDS) {
-      nsInfos.add(new NamespaceInfo(0, "cluster-id", bpid, 1));
+      nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
     }
     for (int i = 0; i < numNewVolumes; i++) {
       String path = BASE_DIR + "/newData" + i;
+      expectedVolumes.add(path);
       StorageLocation loc = StorageLocation.parse(path);
       Storage.StorageDirectory sd = createStorageDirectory(new File(path));
       DataStorage.VolumeBuilder builder =
@@ -159,7 +162,8 @@ public class TestFsDatasetImpl {
 
     Set<String> actualVolumes = new HashSet<String>();
     for (int i = 0; i < numNewVolumes; i++) {
-      dataset.getVolumes().get(numExistingVolumes + i).getBasePath();
+      actualVolumes.add(
+          dataset.getVolumes().get(numExistingVolumes + i).getBasePath());
     }
     assertEquals(actualVolumes, expectedVolumes);
   }
@@ -243,6 +247,33 @@ public class TestFsDatasetImpl {
   }
 
   @Test(timeout = 5000)
+  public void testRemoveNewlyAddedVolume() throws IOException {
+    final int numExistingVolumes = dataset.getVolumes().size();
+    List<NamespaceInfo> nsInfos = new ArrayList<NamespaceInfo>();
+    for (String bpid : BLOCK_POOL_IDS) {
+      nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
+    }
+    String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
+    StorageLocation loc = StorageLocation.parse(newVolumePath);
+
+    Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
+    DataStorage.VolumeBuilder builder =
+        new DataStorage.VolumeBuilder(storage, sd);
+    when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
+        anyListOf(NamespaceInfo.class)))
+        .thenReturn(builder);
+
+    dataset.addVolume(loc, nsInfos);
+    assertEquals(numExistingVolumes + 1, dataset.getVolumes().size());
+
+    when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
+    when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
+    List<StorageLocation> volumesToRemove = Arrays.asList(loc);
+    dataset.removeVolumes(volumesToRemove);
+    assertEquals(numExistingVolumes, dataset.getVolumes().size());
+  }
+
+  @Test(timeout = 5000)
   public void testChangeVolumeWithRunningCheckDirs() throws IOException {
     RoundRobinVolumeChoosingPolicy<FsVolumeImpl> blockChooser =
         new RoundRobinVolumeChoosingPolicy<FsVolumeImpl>();
@@ -266,7 +297,7 @@ public class TestFsDatasetImpl {
       @Override
       public Object answer(InvocationOnMock invocationOnMock)
           throws Throwable {
-        volumeList.removeVolume("data4");
+        volumeList.removeVolume(new File("data4"));
         volumeList.addVolume(newVolume);
         return null;
       }