You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zh...@apache.org on 2015/02/16 19:31:45 UTC

[38/50] [abbrv] hadoop git commit: HDFS-7778. Rename FsVolumeListTest to TestFsVolumeList and commit it to branch-2. Contributed by Lei (Eddy) Xu.

HDFS-7778. Rename FsVolumeListTest to TestFsVolumeList and commit it to branch-2. Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/afebf701
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/afebf701
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/afebf701

Branch: refs/heads/HDFS-7285
Commit: afebf7011c0dd5fe40f70255a530fa8a26911b27
Parents: 08bc0c0
Author: cnauroth <cn...@apache.org>
Authored: Fri Feb 13 16:30:28 2015 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Mon Feb 16 10:29:50 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../fsdataset/impl/FsVolumeListTest.java        | 101 -------------------
 .../fsdataset/impl/TestFsVolumeList.java        | 101 +++++++++++++++++++
 3 files changed, 104 insertions(+), 101 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/afebf701/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1ec2bd2..196b1bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -948,6 +948,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7686. Re-add rapid rescan of possibly corrupt block feature to the
     block scanner (cmccabe)
 
+    HDFS-7778. Rename FsVolumeListTest to TestFsVolumeList and commit it to
+    branch-2. (Lei (Eddy) Xu via cnauroth)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/afebf701/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeListTest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeListTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeListTest.java
deleted file mode 100644
index 691d390..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeListTest.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystemTestHelper;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.StorageType;
-import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.junit.Assert.assertNotEquals;
-import static org.mockito.Mockito.mock;
-
-public class FsVolumeListTest {
-
-  private final Configuration conf = new Configuration();
-  private VolumeChoosingPolicy<FsVolumeImpl> blockChooser =
-      new RoundRobinVolumeChoosingPolicy<>();
-  private FsDatasetImpl dataset = null;
-  private String baseDir;
-  private BlockScanner blockScanner;
-
-  @Before
-  public void setUp() {
-    dataset = mock(FsDatasetImpl.class);
-    baseDir = new FileSystemTestHelper().getTestRootDir();
-    Configuration blockScannerConf = new Configuration();
-    blockScannerConf.setInt(DFSConfigKeys.
-        DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
-    blockScanner = new BlockScanner(null, blockScannerConf);
-  }
-
-  @Test
-  public void testGetNextVolumeWithClosedVolume() throws IOException {
-    FsVolumeList volumeList = new FsVolumeList(0, blockScanner, blockChooser);
-    List<FsVolumeImpl> volumes = new ArrayList<>();
-    for (int i = 0; i < 3; i++) {
-      File curDir = new File(baseDir, "nextvolume-" + i);
-      curDir.mkdirs();
-      FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", curDir,
-          conf, StorageType.DEFAULT);
-      volume.setCapacityForTesting(1024 * 1024 * 1024);
-      volumes.add(volume);
-      volumeList.addVolume(volume.obtainReference());
-    }
-
-    // Close the second volume.
-    volumes.get(1).closeAndWait();
-    for (int i = 0; i < 10; i++) {
-      try (FsVolumeReference ref =
-          volumeList.getNextVolume(StorageType.DEFAULT, 128)) {
-        // volume No.2 will not be chosen.
-        assertNotEquals(ref.getVolume(), volumes.get(1));
-      }
-    }
-  }
-
-  @Test
-  public void testCheckDirsWithClosedVolume() throws IOException {
-    FsVolumeList volumeList = new FsVolumeList(0, blockScanner, blockChooser);
-    List<FsVolumeImpl> volumes = new ArrayList<>();
-    for (int i = 0; i < 3; i++) {
-      File curDir = new File(baseDir, "volume-" + i);
-      curDir.mkdirs();
-      FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", curDir,
-          conf, StorageType.DEFAULT);
-      volumes.add(volume);
-      volumeList.addVolume(volume.obtainReference());
-    }
-
-    // Close the 2nd volume.
-    volumes.get(1).closeAndWait();
-    // checkDirs() should ignore the 2nd volume since it is closed.
-    volumeList.checkDirs();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/afebf701/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
new file mode 100644
index 0000000..d477e5b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.Assert.assertNotEquals;
+import static org.mockito.Mockito.mock;
+
+public class TestFsVolumeList {
+
+  private final Configuration conf = new Configuration();
+  private VolumeChoosingPolicy<FsVolumeImpl> blockChooser =
+      new RoundRobinVolumeChoosingPolicy<>();
+  private FsDatasetImpl dataset = null;
+  private String baseDir;
+  private BlockScanner blockScanner;
+
+  @Before
+  public void setUp() {
+    dataset = mock(FsDatasetImpl.class);
+    baseDir = new FileSystemTestHelper().getTestRootDir();
+    Configuration blockScannerConf = new Configuration();
+    blockScannerConf.setInt(DFSConfigKeys.
+        DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
+    blockScanner = new BlockScanner(null, blockScannerConf);
+  }
+
+  @Test
+  public void testGetNextVolumeWithClosedVolume() throws IOException {
+    FsVolumeList volumeList = new FsVolumeList(0, blockScanner, blockChooser);
+    List<FsVolumeImpl> volumes = new ArrayList<>();
+    for (int i = 0; i < 3; i++) {
+      File curDir = new File(baseDir, "nextvolume-" + i);
+      curDir.mkdirs();
+      FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", curDir,
+          conf, StorageType.DEFAULT);
+      volume.setCapacityForTesting(1024 * 1024 * 1024);
+      volumes.add(volume);
+      volumeList.addVolume(volume.obtainReference());
+    }
+
+    // Close the second volume.
+    volumes.get(1).closeAndWait();
+    for (int i = 0; i < 10; i++) {
+      try (FsVolumeReference ref =
+          volumeList.getNextVolume(StorageType.DEFAULT, 128)) {
+        // volume No.2 will not be chosen.
+        assertNotEquals(ref.getVolume(), volumes.get(1));
+      }
+    }
+  }
+
+  @Test
+  public void testCheckDirsWithClosedVolume() throws IOException {
+    FsVolumeList volumeList = new FsVolumeList(0, blockScanner, blockChooser);
+    List<FsVolumeImpl> volumes = new ArrayList<>();
+    for (int i = 0; i < 3; i++) {
+      File curDir = new File(baseDir, "volume-" + i);
+      curDir.mkdirs();
+      FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", curDir,
+          conf, StorageType.DEFAULT);
+      volumes.add(volume);
+      volumeList.addVolume(volume.obtainReference());
+    }
+
+    // Close the 2nd volume.
+    volumes.get(1).closeAndWait();
+    // checkDirs() should ignore the 2nd volume since it is closed.
+    volumeList.checkDirs();
+  }
+}