You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zj...@apache.org on 2015/04/06 21:23:10 UTC
[40/50] [abbrv] hadoop git commit: HDFS-8051. FsVolumeList#addVolume
should release volume reference if not put it into BlockScanner. (Lei (Eddy)
Xu via Colin P. McCabe)
HDFS-8051. FsVolumeList#addVolume should release volume reference if not put it into BlockScanner. (Lei (Eddy) Xu via Colin P. McCabe)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1ffb3ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1ffb3ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1ffb3ea
Branch: refs/heads/YARN-2928
Commit: e1ffb3ea615e830b4215fafd0b2c1d05a50d4b0c
Parents: 5864e88
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Fri Apr 3 16:34:23 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Apr 6 12:08:16 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../datanode/fsdataset/impl/FsVolumeList.java | 5 +++++
.../fsdataset/impl/TestFsVolumeList.java | 19 +++++++++++++++++++
3 files changed, 27 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1ffb3ea/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2d399a4..6fafec8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1376,6 +1376,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7996. After swapping a volume, BlockReceiver reports
ReplicaNotFoundException (Lei (Eddy) Xu via Colin P. McCabe)
+ HDFS-8051. FsVolumeList#addVolume should release volume reference if not
+ put it into BlockScanner. (Lei (Eddy) Xu via Colin P. McCabe)
+
BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
HDFS-7720. Quota by Storage Type API, tools and ClientNameNode
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1ffb3ea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index 4fddfb9..d87595c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.Time;
@@ -292,6 +293,10 @@ class FsVolumeList {
}
if (blockScanner != null) {
blockScanner.addVolumeScanner(ref);
+ } else {
+ // If the volume is not put into a volume scanner, it does not need to
+ // hold the reference.
+ IOUtils.cleanup(FsDatasetImpl.LOG, ref);
}
// If the volume is used to replace a failed volume, it needs to reset the
// volume failure info for this volume.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1ffb3ea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
index 46189ba..eccff89 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
@@ -35,6 +35,7 @@ import java.util.Collections;
import java.util.List;
import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
public class TestFsVolumeList {
@@ -101,4 +102,22 @@ public class TestFsVolumeList {
// checkDirs() should ignore the 2nd volume since it is closed.
volumeList.checkDirs();
}
+
+ @Test
+ public void testReleaseVolumeRefIfNoBlockScanner() throws IOException {
+ FsVolumeList volumeList = new FsVolumeList(
+ Collections.<VolumeFailureInfo>emptyList(), null, blockChooser);
+ File volDir = new File(baseDir, "volume-0");
+ volDir.mkdirs();
+ FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir,
+ conf, StorageType.DEFAULT);
+ FsVolumeReference ref = volume.obtainReference();
+ volumeList.addVolume(ref);
+ try {
+ ref.close();
+ fail("Should throw exception because the reference is closed in "
+ + "VolumeList#addVolume().");
+ } catch (IllegalStateException e) {
+ }
+ }
}