You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cu...@apache.org on 2018/02/21 01:14:48 UTC
[02/10] hadoop git commit: HDFS-11848. Enhance dfsadmin listOpenFiles
command to list files under a given path. Contributed by Yiqun Lin.
HDFS-11848. Enhance dfsadmin listOpenFiles command to list files under a given path. Contributed by Yiqun Lin.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3fbcd92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3fbcd92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3fbcd92
Branch: refs/heads/branch-3
Commit: d3fbcd92fe53192a319683b9ac72179cb28bd978
Parents: ee44783
Author: Yiqun Lin <yq...@apache.org>
Authored: Sat Jan 6 14:31:08 2018 +0800
Committer: Manoj Govindassamy <ma...@apache.org>
Committed: Fri Jan 12 13:52:27 2018 -0800
----------------------------------------------------------------------
.../java/org/apache/hadoop/hdfs/DFSClient.java | 36 +++++++++-
.../hadoop/hdfs/DistributedFileSystem.java | 4 +-
.../apache/hadoop/hdfs/client/HdfsAdmin.java | 4 +-
.../hadoop/hdfs/protocol/ClientProtocol.java | 5 +-
.../hadoop/hdfs/protocol/OpenFilesIterator.java | 10 ++-
.../ClientNamenodeProtocolTranslatorPB.java | 8 ++-
.../src/main/proto/ClientNamenodeProtocol.proto | 1 +
...tNamenodeProtocolServerSideTranslatorPB.java | 2 +-
.../federation/router/RouterRpcServer.java | 6 +-
.../hdfs/server/namenode/FSNamesystem.java | 27 +++++---
.../hdfs/server/namenode/LeaseManager.java | 25 +++++--
.../hdfs/server/namenode/NameNodeRpcServer.java | 8 ++-
.../org/apache/hadoop/hdfs/tools/DFSAdmin.java | 23 +++++--
.../src/site/markdown/HDFSCommands.md | 4 +-
.../apache/hadoop/hdfs/TestDecommission.java | 38 ++++++++++-
.../org/apache/hadoop/hdfs/TestHdfsAdmin.java | 4 +-
.../hdfs/server/namenode/TestLeaseManager.java | 8 ++-
.../hdfs/server/namenode/TestListOpenFiles.java | 20 ++++--
.../apache/hadoop/hdfs/tools/TestDFSAdmin.java | 69 +++++++++++++++++++-
19 files changed, 248 insertions(+), 54 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 83c3b94..c20e2a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3030,11 +3030,26 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
@Deprecated
public RemoteIterator<OpenFileEntry> listOpenFiles() throws IOException {
checkOpen();
- return listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES));
+ return listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
+ OpenFilesIterator.FILTER_PATH_DEFAULT);
}
/**
- * Get a remote iterator to the open files list by type, managed by NameNode.
+ * Get a remote iterator to the open files list by path,
+ * managed by NameNode.
+ *
+ * @param path
+ * @throws IOException
+ */
+ public RemoteIterator<OpenFileEntry> listOpenFiles(String path)
+ throws IOException {
+ checkOpen();
+ return listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES), path);
+ }
+
+ /**
+ * Get a remote iterator to the open files list by type,
+ * managed by NameNode.
*
* @param openFilesTypes
* @throws IOException
@@ -3042,6 +3057,21 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public RemoteIterator<OpenFileEntry> listOpenFiles(
EnumSet<OpenFilesType> openFilesTypes) throws IOException {
checkOpen();
- return new OpenFilesIterator(namenode, tracer, openFilesTypes);
+ return listOpenFiles(openFilesTypes,
+ OpenFilesIterator.FILTER_PATH_DEFAULT);
+ }
+
+ /**
+ * Get a remote iterator to the open files list by type and path,
+ * managed by NameNode.
+ *
+ * @param openFilesTypes
+ * @param path
+ * @throws IOException
+ */
+ public RemoteIterator<OpenFileEntry> listOpenFiles(
+ EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
+ checkOpen();
+ return new OpenFilesIterator(namenode, tracer, openFilesTypes, path);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 54b428e..369a5bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -2985,8 +2985,8 @@ public class DistributedFileSystem extends FileSystem
}
public RemoteIterator<OpenFileEntry> listOpenFiles(
- EnumSet<OpenFilesType> openFilesTypes) throws IOException {
- return dfs.listOpenFiles(openFilesTypes);
+ EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
+ return dfs.listOpenFiles(openFilesTypes, path);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
index e620039..2c0659a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
@@ -659,8 +659,8 @@ public class HdfsAdmin {
}
public RemoteIterator<OpenFileEntry> listOpenFiles(
- EnumSet<OpenFilesType> openFilesTypes) throws IOException {
- return dfs.listOpenFiles(openFilesTypes);
+ EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
+ return dfs.listOpenFiles(openFilesTypes, path);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 80e053c..24f0321 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1680,10 +1680,11 @@ public interface ClientProtocol {
* the the list across batches are not atomic.
*
* @param prevId the cursor INode id.
- * @param openFilesTypes types to filter the open files
+ * @param openFilesTypes types to filter the open files.
+ * @param path path to filter the open files.
* @throws IOException
*/
@Idempotent
BatchedEntries<OpenFileEntry> listOpenFiles(long prevId,
- EnumSet<OpenFilesType> openFilesTypes) throws IOException;
+ EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/OpenFilesIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/OpenFilesIterator.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/OpenFilesIterator.java
index d113d65..c2b3781 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/OpenFilesIterator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/OpenFilesIterator.java
@@ -37,6 +37,9 @@ import org.apache.htrace.core.Tracer;
public class OpenFilesIterator extends
BatchedRemoteIterator<Long, OpenFileEntry> {
+ /** No path to be filtered by default. */
+ public static final String FILTER_PATH_DEFAULT = "/";
+
/**
* Open file types to filter the results.
*/
@@ -67,20 +70,23 @@ public class OpenFilesIterator extends
private final ClientProtocol namenode;
private final Tracer tracer;
private final EnumSet<OpenFilesType> types;
+ /** List files filtered by given path. */
+ private String path;
public OpenFilesIterator(ClientProtocol namenode, Tracer tracer,
- EnumSet<OpenFilesType> types) {
+ EnumSet<OpenFilesType> types, String path) {
super(HdfsConstants.GRANDFATHER_INODE_ID);
this.namenode = namenode;
this.tracer = tracer;
this.types = types;
+ this.path = path;
}
@Override
public BatchedEntries<OpenFileEntry> makeRequest(Long prevId)
throws IOException {
try (TraceScope ignored = tracer.newScope("listOpenFiles")) {
- return namenode.listOpenFiles(prevId, types);
+ return namenode.listOpenFiles(prevId, types, path);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index c297302..b843e29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -77,6 +77,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
@@ -1851,17 +1852,20 @@ public class ClientNamenodeProtocolTranslatorPB implements
@Override
public BatchedEntries<OpenFileEntry> listOpenFiles(long prevId)
throws IOException {
- return listOpenFiles(prevId, EnumSet.of(OpenFilesType.ALL_OPEN_FILES));
+ return listOpenFiles(prevId, EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
+ OpenFilesIterator.FILTER_PATH_DEFAULT);
}
@Override
public BatchedEntries<OpenFileEntry> listOpenFiles(long prevId,
- EnumSet<OpenFilesType> openFilesTypes) throws IOException {
+ EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
ListOpenFilesRequestProto.Builder req =
ListOpenFilesRequestProto.newBuilder().setId(prevId);
if (openFilesTypes != null) {
req.addAllTypes(PBHelperClient.convertOpenFileTypes(openFilesTypes));
}
+ req.setPath(path);
+
try {
ListOpenFilesResponseProto response =
rpcProxy.listOpenFiles(null, req.build());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index 216821a..5c30f2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -785,6 +785,7 @@ enum OpenFilesTypeProto {
message ListOpenFilesRequestProto {
required int64 id = 1;
repeated OpenFilesTypeProto types = 2;
+ optional string path = 3;
}
message OpenFilesBatchResponseProto {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index d34032d..d31007b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -1811,7 +1811,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
EnumSet<OpenFilesType> openFilesTypes =
PBHelperClient.convertOpenFileTypes(req.getTypesList());
BatchedEntries<OpenFileEntry> entries = server.listOpenFiles(req.getId(),
- openFilesTypes);
+ openFilesTypes, req.getPath());
ListOpenFilesResponseProto.Builder builder =
ListOpenFilesResponseProto.newBuilder();
builder.setHasMore(entries.hasMore());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index d3d5959..4c317a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -89,6 +89,7 @@ import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
@@ -1918,12 +1919,13 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol {
@Override
public BatchedEntries<OpenFileEntry> listOpenFiles(long prevId)
throws IOException {
- return listOpenFiles(prevId, EnumSet.of(OpenFilesType.ALL_OPEN_FILES));
+ return listOpenFiles(prevId, EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
+ OpenFilesIterator.FILTER_PATH_DEFAULT);
}
@Override
public BatchedEntries<OpenFileEntry> listOpenFiles(long prevId,
- EnumSet<OpenFilesType> openFilesTypes) throws IOException {
+ EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
checkOperation(OperationCategory.READ, false);
return null;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 97423cb..ae57599 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1748,11 +1748,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* TODO: HDFS-12969 - to report open files by type.
*
* @param prevId the cursor INode id.
- * @param openFilesTypes
+ * @param openFilesTypes types to filter the open files.
+ * @param path path to filter the open files.
* @throws IOException
*/
BatchedListEntries<OpenFileEntry> listOpenFiles(long prevId,
- EnumSet<OpenFilesType> openFilesTypes) throws IOException {
+ EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
final String operationName = "listOpenFiles";
checkSuperuserPrivilege();
checkOperation(OperationCategory.READ);
@@ -1761,10 +1762,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
try {
checkOperation(OperationCategory.READ);
if(openFilesTypes.contains(OpenFilesType.ALL_OPEN_FILES)) {
- batchedListEntries = leaseManager.getUnderConstructionFiles(prevId);
+ batchedListEntries = leaseManager.getUnderConstructionFiles(prevId,
+ path);
} else {
if(openFilesTypes.contains(OpenFilesType.BLOCKING_DECOMMISSION)) {
- batchedListEntries = getFilesBlockingDecom(prevId);
+ batchedListEntries = getFilesBlockingDecom(prevId, path);
} else {
throw new IllegalArgumentException("Unknown OpenFileType: "
+ openFilesTypes);
@@ -1780,7 +1782,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
return batchedListEntries;
}
- public BatchedListEntries<OpenFileEntry> getFilesBlockingDecom(long prevId) {
+ public BatchedListEntries<OpenFileEntry> getFilesBlockingDecom(long prevId,
+ String path) {
assert hasReadLock();
final List<OpenFileEntry> openFileEntries = Lists.newArrayList();
LightWeightHashSet<Long> openFileIds = new LightWeightHashSet<>();
@@ -1798,10 +1801,16 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
Preconditions.checkState(ucFile instanceof INodeFile);
openFileIds.add(ucFileId);
INodeFile inodeFile = ucFile.asFile();
- openFileEntries.add(new OpenFileEntry(
- inodeFile.getId(), inodeFile.getFullPathName(),
- inodeFile.getFileUnderConstructionFeature().getClientName(),
- inodeFile.getFileUnderConstructionFeature().getClientMachine()));
+
+ String fullPathName = inodeFile.getFullPathName();
+ if (org.apache.commons.lang.StringUtils.isEmpty(path)
+ || fullPathName.startsWith(path)) {
+ openFileEntries.add(new OpenFileEntry(inodeFile.getId(),
+ inodeFile.getFullPathName(),
+ inodeFile.getFileUnderConstructionFeature().getClientName(),
+ inodeFile.getFileUnderConstructionFeature().getClientMachine()));
+ }
+
if (openFileIds.size() >= this.maxListOpenFilesResponses) {
return new BatchedListEntries<>(openFileEntries, true);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index 45699cb..29b4fe1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -39,10 +39,12 @@ import java.util.concurrent.Future;
import com.google.common.collect.Lists;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.util.Daemon;
@@ -258,6 +260,12 @@ public class LeaseManager {
return iipSet;
}
+ public BatchedListEntries<OpenFileEntry> getUnderConstructionFiles(
+ final long prevId) throws IOException {
+ return getUnderConstructionFiles(prevId,
+ OpenFilesIterator.FILTER_PATH_DEFAULT);
+ }
+
/**
* Get a batch of under construction files from the currently active leases.
* File INodeID is the cursor used to fetch new batch of results and the
@@ -270,7 +278,7 @@ public class LeaseManager {
* @throws IOException
*/
public BatchedListEntries<OpenFileEntry> getUnderConstructionFiles(
- final long prevId) throws IOException {
+ final long prevId, final String path) throws IOException {
assert fsnamesystem.hasReadLock();
SortedMap<Long, Lease> remainingLeases;
synchronized (this) {
@@ -283,6 +291,7 @@ public class LeaseManager {
Lists.newArrayListWithExpectedSize(numResponses);
int count = 0;
+ String fullPathName = null;
for (Long inodeId: inodeIds) {
final INodeFile inodeFile =
fsnamesystem.getFSDirectory().getInode(inodeId).asFile();
@@ -291,11 +300,15 @@ public class LeaseManager {
+ " is not under construction but has lease.");
continue;
}
- openFileEntries.add(new OpenFileEntry(
- inodeFile.getId(), inodeFile.getFullPathName(),
- inodeFile.getFileUnderConstructionFeature().getClientName(),
- inodeFile.getFileUnderConstructionFeature().getClientMachine()));
- count++;
+
+ fullPathName = inodeFile.getFullPathName();
+ if (StringUtils.isEmpty(path) || fullPathName.startsWith(path)) {
+ openFileEntries.add(new OpenFileEntry(inodeFile.getId(), fullPathName,
+ inodeFile.getFileUnderConstructionFeature().getClientName(),
+ inodeFile.getFileUnderConstructionFeature().getClientMachine()));
+ count++;
+ }
+
if (count >= numResponses) {
break;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index b44aaf1..432df34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -114,6 +114,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -1324,14 +1325,15 @@ public class NameNodeRpcServer implements NamenodeProtocols {
@Override // ClientProtocol
public BatchedEntries<OpenFileEntry> listOpenFiles(long prevId)
throws IOException {
- return listOpenFiles(prevId, EnumSet.of(OpenFilesType.ALL_OPEN_FILES));
+ return listOpenFiles(prevId, EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
+ OpenFilesIterator.FILTER_PATH_DEFAULT);
}
@Override // ClientProtocol
public BatchedEntries<OpenFileEntry> listOpenFiles(long prevId,
- EnumSet<OpenFilesType> openFilesTypes) throws IOException {
+ EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
checkNNStartup();
- return namesystem.listOpenFiles(prevId, openFilesTypes);
+ return namesystem.listOpenFiles(prevId, openFilesTypes, path);
}
@Override // ClientProtocol
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 7f79b1c..a3e1bc1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -464,7 +464,7 @@ public class DFSAdmin extends FsShell {
"\t[-getDatanodeInfo <datanode_host:ipc_port>]\n" +
"\t[-metasave filename]\n" +
"\t[-triggerBlockReport [-incremental] <datanode_host:ipc_port>]\n" +
- "\t[-listOpenFiles [-blockingDecommission]]\n" +
+ "\t[-listOpenFiles [-blockingDecommission] [-path <path>]]\n" +
"\t[-help [cmd]]\n";
/**
@@ -918,16 +918,29 @@ public class DFSAdmin extends FsShell {
* @param argv
*/
public int listOpenFiles(String[] argv) throws IOException {
+ String path = null;
List<OpenFilesType> types = new ArrayList<>();
if (argv != null) {
List<String> args = new ArrayList<>(Arrays.asList(argv));
if (StringUtils.popOption("-blockingDecommission", args)) {
types.add(OpenFilesType.BLOCKING_DECOMMISSION);
}
+
+ path = StringUtils.popOptionWithArgument("-path", args);
}
if (types.isEmpty()) {
types.add(OpenFilesType.ALL_OPEN_FILES);
}
+
+ if (path != null) {
+ path = path.trim();
+ if (path.length() == 0) {
+ path = OpenFilesIterator.FILTER_PATH_DEFAULT;
+ }
+ } else {
+ path = OpenFilesIterator.FILTER_PATH_DEFAULT;
+ }
+
EnumSet<OpenFilesType> openFilesTypes = EnumSet.copyOf(types);
DistributedFileSystem dfs = getDFS();
@@ -941,9 +954,9 @@ public class DFSAdmin extends FsShell {
dfsConf, HAUtil.getAddressOfActive(getDFS()), ClientProtocol.class,
UserGroupInformation.getCurrentUser(), false);
openFilesRemoteIterator = new OpenFilesIterator(proxy.getProxy(),
- FsTracer.get(dfsConf), openFilesTypes);
+ FsTracer.get(dfsConf), openFilesTypes, path);
} else {
- openFilesRemoteIterator = dfs.listOpenFiles(openFilesTypes);
+ openFilesRemoteIterator = dfs.listOpenFiles(openFilesTypes, path);
}
printOpenFiles(openFilesRemoteIterator);
return 0;
@@ -1982,7 +1995,7 @@ public class DFSAdmin extends FsShell {
+ " [-triggerBlockReport [-incremental] <datanode_host:ipc_port>]");
} else if ("-listOpenFiles".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
- + " [-listOpenFiles [-blockingDecommission]]");
+ + " [-listOpenFiles [-blockingDecommission] [-path <path>]]");
} else {
System.err.println("Usage: hdfs dfsadmin");
System.err.println("Note: Administrative commands can only be run as the HDFS superuser.");
@@ -2137,7 +2150,7 @@ public class DFSAdmin extends FsShell {
return exitCode;
}
} else if ("-listOpenFiles".equals(cmd)) {
- if ((argv.length != 1) && (argv.length != 2)) {
+ if ((argv.length > 4)) {
printUsage(cmd);
return exitCode;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index db983e2..03661f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -370,7 +370,7 @@ Usage:
hdfs dfsadmin [-getDatanodeInfo <datanode_host:ipc_port>]
hdfs dfsadmin [-metasave filename]
hdfs dfsadmin [-triggerBlockReport [-incremental] <datanode_host:ipc_port>]
- hdfs dfsadmin [-listOpenFiles]
+ hdfs dfsadmin [-listOpenFiles [-blockingDecommission] [-path <path>]]
hdfs dfsadmin [-help [cmd]]
| COMMAND\_OPTION | Description |
@@ -407,7 +407,7 @@ Usage:
| `-getDatanodeInfo` \<datanode\_host:ipc\_port\> | Get the information about the given datanode. See [Rolling Upgrade document](./HdfsRollingUpgrade.html#dfsadmin_-getDatanodeInfo) for the detail. |
| `-metasave` filename | Save Namenode's primary data structures to *filename* in the directory specified by hadoop.log.dir property. *filename* is overwritten if it exists. *filename* will contain one line for each of the following<br/>1. Datanodes heart beating with Namenode<br/>2. Blocks waiting to be replicated<br/>3. Blocks currently being replicated<br/>4. Blocks waiting to be deleted |
| `-triggerBlockReport` `[-incremental]` \<datanode\_host:ipc\_port\> | Trigger a block report for the given datanode. If 'incremental' is specified, it will be otherwise, it will be a full block report. |
-| `-listOpenFiles` `[-blockingDecommission]` | List all open files currently managed by the NameNode along with client name and client machine accessing them. |
+| `-listOpenFiles` `[-blockingDecommission]` `[-path <path>]` | List all open files currently managed by the NameNode along with client name and client machine accessing them. Open files list will be filtered by given type and path. |
| `-help` [cmd] | Displays help for the given command or all commands if none is specified. |
Runs a HDFS dfsadmin client.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index d82025c..c0a595b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -710,13 +710,49 @@ public class TestDecommission extends AdminStatesBaseTest {
@Override
public Boolean get() {
try {
+ boolean result1 = false;
+ boolean result2 = false;
toolOut.reset();
assertEquals(0, ToolRunner.run(dfsAdmin,
new String[]{"-listOpenFiles", "-blockingDecommission"}));
toolOut.flush();
- return verifyOpenFilesListing(
+ result1 = verifyOpenFilesListing(
"dfsadmin -listOpenFiles -blockingDecommission",
closedFileSet, openFilesMap, toolOut, maxOpenFiles);
+
+ // test -blockingDecommission with option -path
+ if (openFilesMap.size() > 0) {
+ String firstOpenFile = null;
+ // Construct a new open-file and close-file map.
+ // Pick the first open file into new open-file map, remaining
+ // open files move into close-files map.
+ HashMap<Path, FSDataOutputStream> newOpenFilesMap =
+ new HashMap<>();
+ HashSet<Path> newClosedFileSet = new HashSet<>();
+ for (Map.Entry<Path, FSDataOutputStream> entry : openFilesMap
+ .entrySet()) {
+ if (firstOpenFile == null) {
+ newOpenFilesMap.put(entry.getKey(), entry.getValue());
+ firstOpenFile = entry.getKey().toString();
+ } else {
+ newClosedFileSet.add(entry.getKey());
+ }
+ }
+
+ toolOut.reset();
+ assertEquals(0,
+ ToolRunner.run(dfsAdmin, new String[] {"-listOpenFiles",
+ "-blockingDecommission", "-path", firstOpenFile}));
+ toolOut.flush();
+ result2 = verifyOpenFilesListing(
+ "dfsadmin -listOpenFiles -blockingDecommission -path"
+ + firstOpenFile,
+ newClosedFileSet, newOpenFilesMap, toolOut, 1);
+ } else {
+ result2 = true;
+ }
+
+ return result1 && result2;
} catch (Exception e) {
LOG.warn("Unexpected exception: " + e);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
index 3cb10bf..cc32a3c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.junit.After;
@@ -256,7 +257,8 @@ public class TestHdfsAdmin {
HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
HashSet<Path> openFiles = new HashSet<>(openFileMap.keySet());
RemoteIterator<OpenFileEntry> openFilesRemoteItr =
- hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES));
+ hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
+ OpenFilesIterator.FILTER_PATH_DEFAULT);
while (openFilesRemoteItr.hasNext()) {
String filePath = openFilesRemoteItr.next().getFilePath();
assertFalse(filePath + " should not be listed under open files!",
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
index 0a8da4b..ccd908b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
@@ -405,8 +406,11 @@ public class TestLeaseManager {
leaseManager.getINodeWithLeases(ancestorDirectory).size());
assertEquals(iNodeIdWithLeaseCount,
leaseManager.getUnderConstructionFiles(0).size());
- assertEquals(0, (fsNamesystem.getFilesBlockingDecom(0) == null ?
- 0 : fsNamesystem.getFilesBlockingDecom(0).size()));
+ assertEquals(0,
+ (fsNamesystem.getFilesBlockingDecom(0,
+ OpenFilesIterator.FILTER_PATH_DEFAULT) == null ? 0
+ : fsNamesystem.getFilesBlockingDecom(0,
+ OpenFilesIterator.FILTER_PATH_DEFAULT).size()));
}
private Map<String, INode> createINodeTree(INodeDirectory parentDir,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
index cfee7ba..70550d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@@ -97,11 +98,13 @@ public class TestListOpenFiles {
verifyOpenFiles(openFiles);
BatchedEntries<OpenFileEntry> openFileEntryBatchedEntries =
- nnRpc.listOpenFiles(0, EnumSet.of(OpenFilesType.ALL_OPEN_FILES));
+ nnRpc.listOpenFiles(0, EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
+ OpenFilesIterator.FILTER_PATH_DEFAULT);
assertTrue("Open files list should be empty!",
openFileEntryBatchedEntries.size() == 0);
BatchedEntries<OpenFileEntry> openFilesBlockingDecomEntries =
- nnRpc.listOpenFiles(0, EnumSet.of(OpenFilesType.BLOCKING_DECOMMISSION));
+ nnRpc.listOpenFiles(0, EnumSet.of(OpenFilesType.BLOCKING_DECOMMISSION),
+ OpenFilesIterator.FILTER_PATH_DEFAULT);
assertTrue("Open files list blocking decommission should be empty!",
openFilesBlockingDecomEntries.size() == 0);
@@ -128,15 +131,16 @@ public class TestListOpenFiles {
}
private void verifyOpenFiles(Map<Path, FSDataOutputStream> openFiles,
- EnumSet<OpenFilesType> openFilesTypes) throws IOException {
+ EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
HashSet<Path> remainingFiles = new HashSet<>(openFiles.keySet());
OpenFileEntry lastEntry = null;
BatchedEntries<OpenFileEntry> batchedEntries;
do {
if (lastEntry == null) {
- batchedEntries = nnRpc.listOpenFiles(0, openFilesTypes);
+ batchedEntries = nnRpc.listOpenFiles(0, openFilesTypes, path);
} else {
- batchedEntries = nnRpc.listOpenFiles(lastEntry.getId(), openFilesTypes);
+ batchedEntries = nnRpc.listOpenFiles(lastEntry.getId(),
+ openFilesTypes, path);
}
assertTrue("Incorrect open files list size!",
batchedEntries.size() <= BATCH_SIZE);
@@ -154,9 +158,11 @@ public class TestListOpenFiles {
private void verifyOpenFiles(Map<Path, FSDataOutputStream> openFiles)
throws IOException {
- verifyOpenFiles(openFiles, EnumSet.of(OpenFilesType.ALL_OPEN_FILES));
+ verifyOpenFiles(openFiles, EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
+ OpenFilesIterator.FILTER_PATH_DEFAULT);
verifyOpenFiles(new HashMap<>(),
- EnumSet.of(OpenFilesType.BLOCKING_DECOMMISSION));
+ EnumSet.of(OpenFilesType.BLOCKING_DECOMMISSION),
+ OpenFilesIterator.FILTER_PATH_DEFAULT);
}
private Set<Path> createFiles(FileSystem fileSystem, String fileNamePrefix,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fbcd92/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 6a01de2..7237c88 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -725,6 +725,67 @@ public class TestDFSAdmin {
new String[]{"-listOpenFiles"}));
verifyOpenFilesListing(closedFileSet, openFilesMap);
}
+
+ // test -listOpenFiles command with option <path>
+ openFilesMap.clear();
+ Path file;
+ HashMap<Path, FSDataOutputStream> openFiles1 = new HashMap<>();
+ HashMap<Path, FSDataOutputStream> openFiles2 = new HashMap<>();
+ for (int i = 0; i < numOpenFiles; i++) {
+ if (i % 2 == 0) {
+ file = new Path(new Path("/tmp/files/a"), "open-file-" + i);
+ } else {
+ file = new Path(new Path("/tmp/files/b"), "open-file-" + i);
+ }
+
+ DFSTestUtil.createFile(fs, file, fileLength, replFactor, 12345L);
+ FSDataOutputStream outputStream = fs.append(file);
+
+ if (i % 2 == 0) {
+ openFiles1.put(file, outputStream);
+ } else {
+ openFiles2.put(file, outputStream);
+ }
+ openFilesMap.put(file, outputStream);
+ }
+
+ resetStream();
+ // list all open files
+ assertEquals(0,
+ ToolRunner.run(dfsAdmin, new String[] {"-listOpenFiles"}));
+ verifyOpenFilesListing(null, openFilesMap);
+
+ resetStream();
+ // list open files under directory path /tmp/files/a
+ assertEquals(0, ToolRunner.run(dfsAdmin,
+ new String[] {"-listOpenFiles", "-path", "/tmp/files/a"}));
+ verifyOpenFilesListing(null, openFiles1);
+
+ resetStream();
+ // list open files without input path
+ assertEquals(-1, ToolRunner.run(dfsAdmin,
+ new String[] {"-listOpenFiles", "-path"}));
+ // verify the error
+ String outStr = scanIntoString(err);
+ assertTrue(outStr.contains("listOpenFiles: option"
+ + " -path requires 1 argument"));
+
+ resetStream();
+ // list open files with empty path
+ assertEquals(0, ToolRunner.run(dfsAdmin,
+ new String[] {"-listOpenFiles", "-path", ""}));
+ // all the open files will be listed
+ verifyOpenFilesListing(null, openFilesMap);
+
+ resetStream();
+ // list invalid path file
+ assertEquals(0, ToolRunner.run(dfsAdmin,
+ new String[] {"-listOpenFiles", "-path", "/invalid_path"}));
+ outStr = scanIntoString(out);
+ for (Path openFilePath : openFilesMap.keySet()) {
+ assertThat(outStr, not(containsString(openFilePath.toString())));
+ }
+ DFSTestUtil.closeOpenFiles(openFilesMap, openFilesMap.size());
}
}
@@ -732,9 +793,13 @@ public class TestDFSAdmin {
HashMap<Path, FSDataOutputStream> openFilesMap) {
final String outStr = scanIntoString(out);
LOG.info("dfsadmin -listOpenFiles output: \n" + out);
- for (Path closedFilePath : closedFileSet) {
- assertThat(outStr, not(containsString(closedFilePath.toString() + "\n")));
+ if (closedFileSet != null) {
+ for (Path closedFilePath : closedFileSet) {
+ assertThat(outStr,
+ not(containsString(closedFilePath.toString() + "\n")));
+ }
}
+
for (Path openFilePath : openFilesMap.keySet()) {
assertThat(outStr, is(containsString(openFilePath.toString() + "\n")));
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org