You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by we...@apache.org on 2019/08/14 15:07:44 UTC
[hadoop] branch branch-3.1 updated: HDFS-14595. HDFS-11848 breaks
API compatibility. Contributed by Siyao Meng.
This is an automated email from the ASF dual-hosted git repository.
weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-3.1 by this push:
new 224643a HDFS-14595. HDFS-11848 breaks API compatibility. Contributed by Siyao Meng.
224643a is described below
commit 224643a58c3bd28d29c8a695c3522b40860b6887
Author: Siyao Meng <sm...@cloudera.com>
AuthorDate: Wed Aug 14 07:24:22 2019 -0700
HDFS-14595. HDFS-11848 breaks API compatibility. Contributed by Siyao Meng.
Signed-off-by: Wei-Chiu Chuang <we...@apache.org>
Reviewed-by: Ayush Saxena <ay...@apache.org>
(cherry picked from commit 3c0382f1b933b7acfe55081f5bad46f9fe05a14b)
(cherry picked from commit 136a97a74dbc12f05b88d0abda101690e7c727d9)
---
.../apache/hadoop/hdfs/DistributedFileSystem.java | 6 ++++
.../org/apache/hadoop/hdfs/client/HdfsAdmin.java | 6 ++++
.../hadoop/hdfs/TestDistributedFileSystem.java | 17 ++++++++++
.../java/org/apache/hadoop/hdfs/TestHdfsAdmin.java | 38 ++++++++++++++++++----
4 files changed, 60 insertions(+), 7 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 4cb20b3..11ff0c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -3299,6 +3299,12 @@ public class DistributedFileSystem extends FileSystem
return dfs.listOpenFiles();
}
+ @Deprecated
+ public RemoteIterator<OpenFileEntry> listOpenFiles(
+ EnumSet<OpenFilesType> openFilesTypes) throws IOException {
+ return dfs.listOpenFiles(openFilesTypes);
+ }
+
public RemoteIterator<OpenFileEntry> listOpenFiles(
EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
return dfs.listOpenFiles(openFilesTypes, path);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
index 88044b9..27a53cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
@@ -616,6 +616,12 @@ public class HdfsAdmin {
return dfs.listOpenFiles();
}
+ @Deprecated
+ public RemoteIterator<OpenFileEntry> listOpenFiles(
+ EnumSet<OpenFilesType> openFilesTypes) throws IOException {
+ return dfs.listOpenFiles(openFilesTypes);
+ }
+
public RemoteIterator<OpenFileEntry> listOpenFiles(
EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
return dfs.listOpenFiles(openFilesTypes, path);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index e20fcdd..9b977be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -86,6 +86,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -183,8 +185,10 @@ public class TestDistributedFileSystem {
* Tests DFSClient.close throws no ConcurrentModificationException if
* multiple files are open.
* Also tests that any cached sockets are closed. (HDFS-3359)
+ * Also tests deprecated listOpenFiles(EnumSet<>). (HDFS-14595)
*/
@Test
+ @SuppressWarnings("deprecation") // call to listOpenFiles(EnumSet<>)
public void testDFSClose() throws Exception {
Configuration conf = getTestConfiguration();
MiniDFSCluster cluster = null;
@@ -196,6 +200,19 @@ public class TestDistributedFileSystem {
fileSys.create(new Path("/test/dfsclose/file-0"));
fileSys.create(new Path("/test/dfsclose/file-1"));
+ // Test listOpenFiles(EnumSet<>)
+ List<OpenFilesIterator.OpenFilesType> types = new ArrayList<>();
+ types.add(OpenFilesIterator.OpenFilesType.ALL_OPEN_FILES);
+ RemoteIterator<OpenFileEntry> listOpenFiles =
+ fileSys.listOpenFiles(EnumSet.copyOf(types));
+ assertTrue("Two files should be open", listOpenFiles.hasNext());
+ int countOpenFiles = 0;
+ while (listOpenFiles.hasNext()) {
+ listOpenFiles.next();
+ ++countOpenFiles;
+ }
+ assertEquals("Mismatch of open files count", 2, countOpenFiles);
+
// create another file, close it, and read it, so
// the client gets a socket in its SocketCache
Path p = new Path("/non-empty-file");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
index cc32a3c..9edd297 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
@@ -233,6 +233,9 @@ public class TestHdfsAdmin {
closedFileSet.add(filePath);
}
verifyOpenFiles(closedFileSet, openFileMap);
+ // Verify again with the old listOpenFiles(EnumSet<>) API
+ // Just to verify old API's validity
+ verifyOpenFilesOld(closedFileSet, openFileMap);
openFileMap.putAll(
DFSTestUtil.createOpenFiles(fs, "open-file-1", numOpenFiles));
@@ -252,13 +255,10 @@ public class TestHdfsAdmin {
}
}
- private void verifyOpenFiles(HashSet<Path> closedFiles,
- HashMap<Path, FSDataOutputStream> openFileMap) throws IOException {
- HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
- HashSet<Path> openFiles = new HashSet<>(openFileMap.keySet());
- RemoteIterator<OpenFileEntry> openFilesRemoteItr =
- hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
- OpenFilesIterator.FILTER_PATH_DEFAULT);
+ private void verifyOpenFilesHelper(
+ RemoteIterator<OpenFileEntry> openFilesRemoteItr,
+ HashSet<Path> closedFiles,
+ HashSet<Path> openFiles) throws IOException {
while (openFilesRemoteItr.hasNext()) {
String filePath = openFilesRemoteItr.next().getFilePath();
assertFalse(filePath + " should not be listed under open files!",
@@ -266,6 +266,30 @@ public class TestHdfsAdmin {
assertTrue(filePath + " is not listed under open files!",
openFiles.remove(new Path(filePath)));
}
+ }
+
+ private void verifyOpenFiles(HashSet<Path> closedFiles,
+ HashMap<Path, FSDataOutputStream> openFileMap) throws IOException {
+ HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+ HashSet<Path> openFiles = new HashSet<>(openFileMap.keySet());
+ RemoteIterator<OpenFileEntry> openFilesRemoteItr =
+ hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
+ OpenFilesIterator.FILTER_PATH_DEFAULT);
+ verifyOpenFilesHelper(openFilesRemoteItr, closedFiles, openFiles);
+ assertTrue("Not all open files are listed!", openFiles.isEmpty());
+ }
+
+ /**
+ * Using deprecated HdfsAdmin#listOpenFiles(EnumSet<>) to verify open files.
+ */
+ @SuppressWarnings("deprecation") // call to listOpenFiles(EnumSet<>)
+ private void verifyOpenFilesOld(HashSet<Path> closedFiles,
+ HashMap<Path, FSDataOutputStream> openFileMap) throws IOException {
+ HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+ HashSet<Path> openFiles = new HashSet<>(openFileMap.keySet());
+ RemoteIterator<OpenFileEntry> openFilesRemoteItr =
+ hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES));
+ verifyOpenFilesHelper(openFilesRemoteItr, closedFiles, openFiles);
assertTrue("Not all open files are listed!", openFiles.isEmpty());
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org