You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ar...@apache.org on 2013/12/02 18:41:48 UTC
svn commit: r1547122 [1/5] - in
/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project:
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/
hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs/
hadoop-hdfs/dev-support/ h...
Author: arp
Date: Mon Dec 2 17:41:44 2013
New Revision: 1547122
URL: http://svn.apache.org/r1547122
Log:
Merging r1544666 through r1547120 from trunk to branch HDFS-2832
Added:
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolEntry.java
- copied unchanged from r1547120, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolEntry.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java
- copied unchanged from r1547120, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
- copied unchanged from r1547120, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java
- copied unchanged from r1547120, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
- copied unchanged from r1547120, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
Removed:
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java
Modified:
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HsftpFileSystem.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml
Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1544666-1547120
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java Mon Dec 2 17:41:44 2013
@@ -708,15 +708,28 @@ class OpenFileCtx {
}
return response;
}
-
+
+ /**
+ * Check the commit status with the given offset
+ * @param commitOffset the offset to commit
+ * @param channel the channel to return response
+ * @param xid the xid of the commit request
+ * @param preOpAttr the preOp attribute
+ * @param fromRead whether the commit is triggered from read request
+ * @return one commit status: COMMIT_FINISHED, COMMIT_WAIT,
+ * COMMIT_INACTIVE_CTX, COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR
+ */
public COMMIT_STATUS checkCommit(DFSClient dfsClient, long commitOffset,
- Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
- // Keep stream active
- updateLastAccessTime();
+ Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) {
+ if (!fromRead) {
+ Preconditions.checkState(channel != null && preOpAttr != null);
+ // Keep stream active
+ updateLastAccessTime();
+ }
Preconditions.checkState(commitOffset >= 0);
COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid,
- preOpAttr);
+ preOpAttr, fromRead);
if (LOG.isDebugEnabled()) {
LOG.debug("Got commit status: " + ret.name());
}
@@ -743,14 +756,10 @@ class OpenFileCtx {
}
return ret;
}
-
- /**
- * return one commit status: COMMIT_FINISHED, COMMIT_WAIT,
- * COMMIT_INACTIVE_CTX, COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR
- */
+
@VisibleForTesting
synchronized COMMIT_STATUS checkCommitInternal(long commitOffset,
- Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
+ Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) {
if (!activeState) {
if (pendingWrites.isEmpty()) {
return COMMIT_STATUS.COMMIT_INACTIVE_CTX;
@@ -767,9 +776,11 @@ class OpenFileCtx {
if (commitOffset > 0) {
if (commitOffset > flushed) {
- CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid,
- preOpAttr);
- pendingCommits.put(commitOffset, commitCtx);
+ if (!fromRead) {
+ CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid,
+ preOpAttr);
+ pendingCommits.put(commitOffset, commitCtx);
+ }
return COMMIT_STATUS.COMMIT_WAIT;
} else {
return COMMIT_STATUS.COMMIT_DO_SYNC;
@@ -784,11 +795,13 @@ class OpenFileCtx {
// do a sync here though the output stream might be closed.
return COMMIT_STATUS.COMMIT_FINISHED;
} else {
- // Insert commit
- long maxOffset = key.getKey().getMax() - 1;
- Preconditions.checkState(maxOffset > 0);
- CommitCtx commitCtx = new CommitCtx(maxOffset, channel, xid, preOpAttr);
- pendingCommits.put(maxOffset, commitCtx);
+ if (!fromRead) {
+ // Insert commit
+ long maxOffset = key.getKey().getMax() - 1;
+ Preconditions.checkState(maxOffset > 0);
+ CommitCtx commitCtx = new CommitCtx(maxOffset, channel, xid, preOpAttr);
+ pendingCommits.put(maxOffset, commitCtx);
+ }
return COMMIT_STATUS.COMMIT_WAIT;
}
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java Mon Dec 2 17:41:44 2013
@@ -628,6 +628,14 @@ public class RpcProgramNfs3 extends RpcP
}
}
+ // In case there is buffered data for the same file, flush it. This can be
+ // optimized later by reading from the cache.
+ int ret = writeManager.commitBeforeRead(dfsClient, handle, offset + count);
+ if (ret != Nfs3Status.NFS3_OK) {
+ LOG.warn("commitBeforeRead didn't succeed with ret=" + ret
+ + ". Read may not get most recent data.");
+ }
+
try {
int buffSize = Math.min(MAX_READ_TRANSFER_SIZE, count);
byte[] readbuffer = new byte[buffSize];
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java Mon Dec 2 17:41:44 2013
@@ -18,7 +18,6 @@
package org.apache.hadoop.hdfs.nfs.nfs3;
import java.io.IOException;
-import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -41,11 +40,9 @@ import org.apache.hadoop.nfs.nfs3.respon
import org.apache.hadoop.nfs.nfs3.response.WccData;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.VerifierNone;
-import org.apache.hadoop.util.Daemon;
import org.jboss.netty.channel.Channel;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Maps;
/**
* Manage the writes and responds asynchronously.
@@ -207,6 +204,51 @@ public class WriteManager {
return;
}
+ // Do a possible commit before read request in case there is buffered data
+ // inside DFSClient which has been flushed but not synced.
+ int commitBeforeRead(DFSClient dfsClient, FileHandle fileHandle,
+ long commitOffset) {
+ int status;
+ OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
+
+ if (openFileCtx == null) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("No opened stream for fileId:" + fileHandle.getFileId()
+ + " commitOffset=" + commitOffset
+ + ". Return success in this case.");
+ }
+ status = Nfs3Status.NFS3_OK;
+
+ } else {
+ COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset,
+ null, 0, null, true);
+ switch (ret) {
+ case COMMIT_FINISHED:
+ case COMMIT_INACTIVE_CTX:
+ status = Nfs3Status.NFS3_OK;
+ break;
+ case COMMIT_INACTIVE_WITH_PENDING_WRITE:
+ case COMMIT_ERROR:
+ status = Nfs3Status.NFS3ERR_IO;
+ break;
+ case COMMIT_WAIT:
+ /**
+ * This should happen rarely in some possible cases, such as read
+ * request arrives before DFSClient is able to quickly flush data to DN,
+ * or Prerequisite writes is not available. Won't wait since we don't
+ * want to block read.
+ */
+ status = Nfs3Status.NFS3ERR_JUKEBOX;
+ break;
+ default:
+ LOG.error("Should not get commit return code:" + ret.name());
+ throw new RuntimeException("Should not get commit return code:"
+ + ret.name());
+ }
+ }
+ return status;
+ }
+
void handleCommit(DFSClient dfsClient, FileHandle fileHandle,
long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
int status;
@@ -219,9 +261,8 @@ public class WriteManager {
} else {
COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset,
- channel, xid, preOpAttr);
+ channel, xid, preOpAttr, false);
switch (ret) {
- case COMMIT_DO_SYNC:
case COMMIT_FINISHED:
case COMMIT_INACTIVE_CTX:
status = Nfs3Status.NFS3_OK;
@@ -234,6 +275,7 @@ public class WriteManager {
// Do nothing. Commit is async now.
return;
default:
+ LOG.error("Should not get commit return code:" + ret.name());
throw new RuntimeException("Should not get commit return code:"
+ ret.name());
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java Mon Dec 2 17:41:44 2013
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.nfs.nfs3;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -26,6 +27,7 @@ import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.concurrent.ConcurrentNavigableMap;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -39,6 +41,7 @@ import org.apache.hadoop.nfs.nfs3.IdUser
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
+import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.nfs.nfs3.request.CREATE3Request;
import org.apache.hadoop.nfs.nfs3.request.READ3Request;
import org.apache.hadoop.nfs.nfs3.request.SetAttr3;
@@ -47,6 +50,7 @@ import org.apache.hadoop.nfs.nfs3.respon
import org.apache.hadoop.nfs.nfs3.response.READ3Response;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.SecurityHandler;
+import org.jboss.netty.channel.Channel;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
@@ -139,32 +143,33 @@ public class TestWrites {
// Test inactive open file context
ctx.setActiveStatusForTest(false);
- ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
+ Channel ch = Mockito.mock(Channel.class);
+ ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX);
ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
- ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
+ ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE);
// Test request with non zero commit offset
ctx.setActiveStatusForTest(true);
Mockito.when(fos.getPos()).thenReturn((long) 10);
- COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr);
+ COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
// Do_SYNC state will be updated to FINISHED after data sync
- ret = ctx.checkCommit(dfsClient, 5, null, 1, attr);
+ ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
- status = ctx.checkCommitInternal(10, null, 1, attr);
+ status = ctx.checkCommitInternal(10, ch, 1, attr, false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
- ret = ctx.checkCommit(dfsClient, 10, null, 1, attr);
+ ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
ConcurrentNavigableMap<Long, CommitCtx> commits = ctx
.getPendingCommitsForTest();
Assert.assertTrue(commits.size() == 0);
- ret = ctx.checkCommit(dfsClient, 11, null, 1, attr);
+ ret = ctx.checkCommit(dfsClient, 11, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
Assert.assertTrue(commits.size() == 1);
long key = commits.firstKey();
@@ -173,7 +178,7 @@ public class TestWrites {
// Test request with zero commit offset
commits.remove(new Long(11));
// There is one pending write [5,10]
- ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
+ ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
Assert.assertTrue(commits.size() == 1);
key = commits.firstKey();
@@ -181,10 +186,79 @@ public class TestWrites {
// Empty pending writes
ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10));
- ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
+ ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
}
+ @Test
+ // Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS, which
+ // includes COMMIT_FINISHED, COMMIT_WAIT, COMMIT_INACTIVE_CTX,
+ // COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR, and COMMIT_DO_SYNC.
+ public void testCheckCommitFromRead() throws IOException {
+ DFSClient dfsClient = Mockito.mock(DFSClient.class);
+ Nfs3FileAttributes attr = new Nfs3FileAttributes();
+ HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
+ Mockito.when(fos.getPos()).thenReturn((long) 0);
+
+ OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
+ new IdUserGroup());
+
+ FileHandle h = new FileHandle(1); // fake handle for "/dumpFilePath"
+ COMMIT_STATUS ret;
+ WriteManager wm = new WriteManager(new IdUserGroup(), new Configuration());
+ assertTrue(wm.addOpenFileStream(h, ctx));
+
+ // Test inactive open file context
+ ctx.setActiveStatusForTest(false);
+ Channel ch = Mockito.mock(Channel.class);
+ ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
+ assertEquals( COMMIT_STATUS.COMMIT_INACTIVE_CTX, ret);
+ assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0));
+
+ ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
+ new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
+ ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
+ assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE, ret);
+ assertEquals(Nfs3Status.NFS3ERR_IO, wm.commitBeforeRead(dfsClient, h, 0));
+
+ // Test request with non zero commit offset
+ ctx.setActiveStatusForTest(true);
+ Mockito.when(fos.getPos()).thenReturn((long) 10);
+ COMMIT_STATUS status = ctx.checkCommitInternal(5, ch, 1, attr, false);
+ assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC, status);
+ // Do_SYNC state will be updated to FINISHED after data sync
+ ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, true);
+ assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
+ assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 5));
+
+ status = ctx.checkCommitInternal(10, ch, 1, attr, true);
+ assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
+ ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, true);
+ assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
+ assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 10));
+
+ ConcurrentNavigableMap<Long, CommitCtx> commits = ctx
+ .getPendingCommitsForTest();
+ assertTrue(commits.size() == 0);
+ ret = ctx.checkCommit(dfsClient, 11, ch, 1, attr, true);
+ assertEquals(COMMIT_STATUS.COMMIT_WAIT, ret);
+ assertEquals(0, commits.size()); // commit triggered by read doesn't wait
+ assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 11));
+
+ // Test request with zero commit offset
+ // There is one pending write [5,10]
+ ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
+ assertEquals(COMMIT_STATUS.COMMIT_WAIT, ret);
+ assertEquals(0, commits.size());
+ assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 0));
+
+ // Empty pending writes
+ ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10));
+ ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
+ assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
+ assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0));
+ }
+
private void waitWrite(RpcProgramNfs3 nfsd, FileHandle handle, int maxWaitTime)
throws InterruptedException {
int waitedTime = 0;
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Dec 2 17:41:44 2013
@@ -212,6 +212,22 @@ Trunk (Unreleased)
and INodeFileUnderConstructionWithSnapshot with FileUnderContructionFeature.
(jing9 via szetszwo)
+ HDFS-5538. URLConnectionFactory should pick up the SSL related configuration
+ by default. (Haohui Mai via jing9)
+
+ HDFS-5286. Flatten INodeDirectory hierarchy: Replace INodeDirectoryWithQuota
+ with DirectoryWithQuotaFeature. (szetszwo)
+
+ HDFS-5556. Add some more NameNode cache statistics, cache pool stats
+ (cmccabe)
+
+ HDFS-5545. Allow specifying endpoints for listeners in HttpServer. (Haohui
+ Mai via jing9)
+
+ HDFS-5537. Remove FileWithSnapshot interface. (jing9 via szetszwo)
+
+ HDFS-5430. Support TTL on CacheDirectives. (wang)
+
OPTIMIZATIONS
HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
@@ -399,6 +415,12 @@ Trunk (Unreleased)
HDFS-5543. Fix narrow race condition in TestPathBasedCacheRequests
(cmccabe)
+ HDFS-5565. CacheAdmin help should match against non-dashed commands
+ (wang via cmccabe)
+
+ HDFS-5562. TestCacheDirectives and TestFsDatasetCache should stub out
+ native mlock. (Colin McCabe and Akira Ajisaka via wang)
+
Release 2.3.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -536,6 +558,12 @@ Release 2.3.0 - UNRELEASED
HDFS-5525. Inline dust templates for new Web UI. (Haohui Mai via jing9)
+ HDFS-5561. FSNameSystem#getNameJournalStatus() in JMX should return plain
+ text instead of HTML. (Haohui Mai via jing9)
+
+ HDFS-5581. NameNodeFsck should use only one instance of
+ BlockPlacementPolicy. (vinay via cmccabe)
+
OPTIMIZATIONS
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)
@@ -612,6 +640,9 @@ Release 2.3.0 - UNRELEASED
HDFS-5552. Fix wrong information of "Cluster summay" in dfshealth.html.
(Haohui Mai via jing9)
+ HDFS-5533. Symlink delete/create should be treated as DELETE/CREATE in snapshot diff
+ report. (Binglin Chang via jing9)
+
Release 2.2.1 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -634,6 +665,8 @@ Release 2.2.1 - UNRELEASED
HDFS-5544. Adding Test case For Checking dfs.checksum type as NULL value. (Sathish via umamahesh)
+ HDFS-5568. Support includeSnapshots option with Fsck command. (Vinayakumar B via umamahesh)
+
OPTIMIZATIONS
BUG FIXES
@@ -727,6 +760,13 @@ Release 2.2.1 - UNRELEASED
HDFS-5407. Fix typos in DFSClientCache (Haohui Mai via brandonli)
+ HDFS-5548. Use ConcurrentHashMap in portmap (Haohui Mai via brandonli)
+
+ HDFS-5577. NFS user guide update (brandonli)
+
+ HDFS-5563. NFS gateway should commit the buffered data when read request comes
+ after write to the same file (brandonli)
+
Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES
@@ -4018,6 +4058,8 @@ Release 0.23.10 - UNRELEASED
HDFS-4329. DFSShell issues with directories with spaces in name (Cristina
L. Abad via jeagles)
+ HDFS-5526. Datanode cannot roll back to previous layout version (kihwal)
+
Release 0.23.9 - 2013-07-08
INCOMPATIBLE CHANGES
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml Mon Dec 2 17:41:44 2013
@@ -352,6 +352,11 @@
<Method name="getReplication" />
<Bug pattern="ICAST_QUESTIONABLE_UNSIGNED_RIGHT_SHIFT" />
</Match>
+ <Match>
+ <Class name="org.apache.hadoop.hdfs.protocol.CacheDirective" />
+ <Method name="insertInternal" />
+ <Bug pattern="BC_UNCONFIRMED_CAST" />
+ </Match>
<!-- These two are used for shutting down and kicking the CRMon, do not need strong sync -->
<Match>
<Class name="org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor" />
Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1544666-1547120
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Mon Dec 2 17:41:44 2013
@@ -109,6 +109,7 @@ import org.apache.hadoop.hdfs.client.Cli
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
@@ -2358,7 +2359,7 @@ public class DFSClient implements java.i
}
}
- public RemoteIterator<CachePoolInfo> listCachePools() throws IOException {
+ public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
checkOpen();
try {
return namenode.listCachePools("");
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Mon Dec 2 17:41:44 2013
@@ -38,12 +38,15 @@ import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.SecureRandom;
+import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
+import java.util.Date;
import java.util.HashSet;
import java.util.List;
+import java.util.Locale;
import java.util.Map;
import java.util.Random;
import java.util.Set;
@@ -75,6 +78,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
@@ -1427,4 +1431,79 @@ public class DFSUtil {
return (value == null || value.isEmpty()) ?
defaultKey : DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY;
}
-}
\ No newline at end of file
+
+ public static HttpServer.Builder loadSslConfToHttpServerBuilder(
+ HttpServer.Builder builder, Configuration sslConf) {
+ return builder
+ .needsClientAuth(
+ sslConf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
+ DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT))
+ .keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
+ .keyStore(sslConf.get("ssl.server.keystore.location"),
+ sslConf.get("ssl.server.keystore.password"),
+ sslConf.get("ssl.server.keystore.type", "jks"))
+ .trustStore(sslConf.get("ssl.server.truststore.location"),
+ sslConf.get("ssl.server.truststore.password"),
+ sslConf.get("ssl.server.truststore.type", "jks"));
+ }
+
+ /**
+ * Converts a Date into an ISO-8601 formatted datetime string.
+ */
+ public static String dateToIso8601String(Date date) {
+ SimpleDateFormat df =
+ new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ", Locale.ENGLISH);
+ return df.format(date);
+ }
+
+ /**
+ * Converts a time duration in milliseconds into DDD:HH:MM:SS format.
+ */
+ public static String durationToString(long durationMs) {
+ Preconditions.checkArgument(durationMs >= 0, "Invalid negative duration");
+ // Chop off the milliseconds
+ long durationSec = durationMs / 1000;
+ final int secondsPerMinute = 60;
+ final int secondsPerHour = 60*60;
+ final int secondsPerDay = 60*60*24;
+ final long days = durationSec / secondsPerDay;
+ durationSec -= days * secondsPerDay;
+ final long hours = durationSec / secondsPerHour;
+ durationSec -= hours * secondsPerHour;
+ final long minutes = durationSec / secondsPerMinute;
+ durationSec -= minutes * secondsPerMinute;
+ final long seconds = durationSec;
+ return String.format("%03d:%02d:%02d:%02d", days, hours, minutes, seconds);
+ }
+
+ /**
+ * Converts a relative time string into a duration in milliseconds.
+ */
+ public static long parseRelativeTime(String relTime) throws IOException {
+ if (relTime.length() < 2) {
+ throw new IOException("Unable to parse relative time value of " + relTime
+ + ": too short");
+ }
+ String ttlString = relTime.substring(0, relTime.length()-1);
+ int ttl;
+ try {
+ ttl = Integer.parseInt(ttlString);
+ } catch (NumberFormatException e) {
+ throw new IOException("Unable to parse relative time value of " + relTime
+ + ": " + ttlString + " is not a number");
+ }
+ if (relTime.endsWith("s")) {
+ // pass
+ } else if (relTime.endsWith("m")) {
+ ttl *= 60;
+ } else if (relTime.endsWith("h")) {
+ ttl *= 60*60;
+ } else if (relTime.endsWith("d")) {
+ ttl *= 60*60*24;
+ } else {
+ throw new IOException("Unable to parse relative time value of " + relTime
+ + ": unknown time unit " + relTime.charAt(relTime.length() - 1));
+ }
+ return ttl*1000;
+ }
+}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Mon Dec 2 17:41:44 2013
@@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.client.Hdf
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@@ -1713,12 +1714,12 @@ public class DistributedFileSystem exten
/**
* List all cache pools.
*
- * @return A remote iterator from which you can get CachePoolInfo objects.
+ * @return A remote iterator from which you can get CachePoolEntry objects.
* Requests will be made as needed.
* @throws IOException
* If there was an error listing cache pools.
*/
- public RemoteIterator<CachePoolInfo> listCachePools() throws IOException {
+ public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
return dfs.listCachePools();
}
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java Mon Dec 2 17:41:44 2013
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.RemoteIterat
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
@@ -213,12 +214,12 @@ public class HdfsAdmin {
/**
* List all cache pools.
*
- * @return A remote iterator from which you can get CachePoolInfo objects.
+ * @return A remote iterator from which you can get CachePoolEntry objects.
* Requests will be made as needed.
* @throws IOException
* If there was an error listing cache pools.
*/
- public RemoteIterator<CachePoolInfo> listCachePools() throws IOException {
+ public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
return dfs.listCachePools();
}
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java Mon Dec 2 17:41:44 2013
@@ -17,65 +17,94 @@
*/
package org.apache.hadoop.hdfs.protocol;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.util.Date;
+
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.namenode.CachePool;
+import org.apache.hadoop.util.IntrusiveCollection;
+import org.apache.hadoop.util.IntrusiveCollection.Element;
import com.google.common.base.Preconditions;
/**
- * Represents an entry in the PathBasedCache on the NameNode.
+ * Namenode class that tracks state related to a cached path.
*
* This is an implementation class, not part of the public API.
*/
@InterfaceAudience.Private
-public final class CacheDirective {
- private final long entryId;
+public final class CacheDirective implements IntrusiveCollection.Element {
+ private final long id;
private final String path;
private final short replication;
- private final CachePool pool;
+ private CachePool pool;
+ private final long expiryTime;
+
private long bytesNeeded;
private long bytesCached;
private long filesAffected;
+ private Element prev;
+ private Element next;
- public CacheDirective(long entryId, String path,
- short replication, CachePool pool) {
- Preconditions.checkArgument(entryId > 0);
- this.entryId = entryId;
+ public CacheDirective(long id, String path,
+ short replication, long expiryTime) {
+ Preconditions.checkArgument(id > 0);
+ this.id = id;
+ this.path = checkNotNull(path);
Preconditions.checkArgument(replication > 0);
- this.path = path;
- Preconditions.checkNotNull(pool);
this.replication = replication;
- Preconditions.checkNotNull(path);
- this.pool = pool;
+ this.expiryTime = expiryTime;
this.bytesNeeded = 0;
this.bytesCached = 0;
this.filesAffected = 0;
}
- public long getEntryId() {
- return entryId;
+ public long getId() {
+ return id;
}
public String getPath() {
return path;
}
- public CachePool getPool() {
- return pool;
- }
-
public short getReplication() {
return replication;
}
- public CacheDirectiveInfo toDirective() {
+ public CachePool getPool() {
+ return pool;
+ }
+
+ /**
+ * @return When this directive expires, in milliseconds since Unix epoch
+ */
+ public long getExpiryTime() {
+ return expiryTime;
+ }
+
+ /**
+ * @return When this directive expires, as an ISO-8601 formatted string.
+ */
+ public String getExpiryTimeString() {
+ return DFSUtil.dateToIso8601String(new Date(expiryTime));
+ }
+
+ /**
+ * Returns a {@link CacheDirectiveInfo} based on this CacheDirective.
+ * <p>
+ * This always sets an absolute expiry time, never a relative TTL.
+ */
+ public CacheDirectiveInfo toInfo() {
return new CacheDirectiveInfo.Builder().
- setId(entryId).
+ setId(id).
setPath(new Path(path)).
setReplication(replication).
setPool(pool.getPoolName()).
+ setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiryTime)).
build();
}
@@ -84,20 +113,22 @@ public final class CacheDirective {
setBytesNeeded(bytesNeeded).
setBytesCached(bytesCached).
setFilesAffected(filesAffected).
+ setHasExpired(new Date().getTime() > expiryTime).
build();
}
public CacheDirectiveEntry toEntry() {
- return new CacheDirectiveEntry(toDirective(), toStats());
+ return new CacheDirectiveEntry(toInfo(), toStats());
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
- builder.append("{ entryId:").append(entryId).
+ builder.append("{ id:").append(id).
append(", path:").append(path).
append(", replication:").append(replication).
append(", pool:").append(pool).
+ append(", expiryTime: ").append(getExpiryTimeString()).
append(", bytesNeeded:").append(bytesNeeded).
append(", bytesCached:").append(bytesCached).
append(", filesAffected:").append(filesAffected).
@@ -113,12 +144,12 @@ public final class CacheDirective {
return false;
}
CacheDirective other = (CacheDirective)o;
- return entryId == other.entryId;
+ return id == other.id;
}
@Override
public int hashCode() {
- return new HashCodeBuilder().append(entryId).toHashCode();
+ return new HashCodeBuilder().append(id).toHashCode();
}
public long getBytesNeeded() {
@@ -156,4 +187,55 @@ public final class CacheDirective {
public void incrementFilesAffected() {
this.filesAffected++;
}
+
+ @SuppressWarnings("unchecked")
+ @Override // IntrusiveCollection.Element
+ public void insertInternal(IntrusiveCollection<? extends Element> list,
+ Element prev, Element next) {
+ assert this.pool == null;
+ this.pool = ((CachePool.DirectiveList)list).getCachePool();
+ this.prev = prev;
+ this.next = next;
+ }
+
+ @Override // IntrusiveCollection.Element
+ public void setPrev(IntrusiveCollection<? extends Element> list, Element prev) {
+ assert list == pool.getDirectiveList();
+ this.prev = prev;
+ }
+
+ @Override // IntrusiveCollection.Element
+ public void setNext(IntrusiveCollection<? extends Element> list, Element next) {
+ assert list == pool.getDirectiveList();
+ this.next = next;
+ }
+
+ @Override // IntrusiveCollection.Element
+ public void removeInternal(IntrusiveCollection<? extends Element> list) {
+ assert list == pool.getDirectiveList();
+ this.pool = null;
+ this.prev = null;
+ this.next = null;
+ }
+
+ @Override // IntrusiveCollection.Element
+ public Element getPrev(IntrusiveCollection<? extends Element> list) {
+ if (list != pool.getDirectiveList()) {
+ return null;
+ }
+ return this.prev;
+ }
+
+ @Override // IntrusiveCollection.Element
+ public Element getNext(IntrusiveCollection<? extends Element> list) {
+ if (list != pool.getDirectiveList()) {
+ return null;
+ }
+ return this.next;
+ }
+
+ @Override // IntrusiveCollection.Element
+ public boolean isInList(IntrusiveCollection<? extends Element> list) {
+ return pool == null ? false : list == pool.getDirectiveList();
+ }
};
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java Mon Dec 2 17:41:44 2013
@@ -17,11 +17,14 @@
*/
package org.apache.hadoop.hdfs.protocol;
+import java.util.Date;
+
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSUtil;
/**
* Describes a path-based cache directive.
@@ -37,6 +40,7 @@ public class CacheDirectiveInfo {
private Path path;
private Short replication;
private String pool;
+ private Expiration expiration;
/**
* Builds a new CacheDirectiveInfo populated with the set properties.
@@ -44,7 +48,7 @@ public class CacheDirectiveInfo {
* @return New CacheDirectiveInfo.
*/
public CacheDirectiveInfo build() {
- return new CacheDirectiveInfo(id, path, replication, pool);
+ return new CacheDirectiveInfo(id, path, replication, pool, expiration);
}
/**
@@ -62,6 +66,7 @@ public class CacheDirectiveInfo {
this.path = directive.getPath();
this.replication = directive.getReplication();
this.pool = directive.getPool();
+ this.expiration = directive.getExpiration();
}
/**
@@ -107,18 +112,134 @@ public class CacheDirectiveInfo {
this.pool = pool;
return this;
}
+
+ /**
+ * Sets when the CacheDirective should expire. A
+ * {@link CacheDirectiveInfo.Expiration} can specify either an absolute or
+ * relative expiration time.
+ *
+ * @param expiration when this CacheDirective should expire
+ * @return This builder, for call chaining
+ */
+ public Builder setExpiration(Expiration expiration) {
+ this.expiration = expiration;
+ return this;
+ }
+ }
+
+ /**
+ * Denotes a relative or absolute expiration time for a CacheDirective. Use
+ * factory methods {@link CacheDirectiveInfo.Expiration#newAbsolute(Date)} and
+ * {@link CacheDirectiveInfo.Expiration#newRelative(long)} to create an
+ * Expiration.
+ * <p>
+ * In either case, the server-side clock is used to determine when a
+ * CacheDirective expires.
+ */
+ public static class Expiration {
+
+ /** Denotes a CacheDirectiveInfo that never expires **/
+ public static final int EXPIRY_NEVER = -1;
+
+ /**
+ * Create a new relative Expiration.
+ *
+ * @param ms how long until the CacheDirective expires, in milliseconds
+ * @return A relative Expiration
+ */
+ public static Expiration newRelative(long ms) {
+ return new Expiration(ms, true);
+ }
+
+ /**
+ * Create a new absolute Expiration.
+ *
+ * @param date when the CacheDirective expires
+ * @return An absolute Expiration
+ */
+ public static Expiration newAbsolute(Date date) {
+ return new Expiration(date.getTime(), false);
+ }
+
+ /**
+ * Create a new absolute Expiration.
+ *
+ * @param ms when the CacheDirective expires, in milliseconds since the Unix
+ * epoch.
+ * @return An absolute Expiration
+ */
+ public static Expiration newAbsolute(long ms) {
+ return new Expiration(ms, false);
+ }
+
+ private final long ms;
+ private final boolean isRelative;
+
+ private Expiration(long ms, boolean isRelative) {
+ this.ms = ms;
+ this.isRelative = isRelative;
+ }
+
+ /**
+ * @return true if Expiration was specified as a relative duration, false if
+ * specified as an absolute time.
+ */
+ public boolean isRelative() {
+ return isRelative;
+ }
+
+ /**
+ * @return The raw underlying millisecond value, either a relative duration
+ * or an absolute time as milliseconds since the Unix epoch.
+ */
+ public long getMillis() {
+ return ms;
+ }
+
+ /**
+ * @return Expiration time as a {@link Date} object. This converts a
+ * relative Expiration into an absolute Date based on the local
+ * clock.
+ */
+ public Date getAbsoluteDate() {
+ return new Date(getAbsoluteMillis());
+ }
+
+ /**
+ * @return Expiration time in milliseconds from the Unix epoch. This
+ * converts a relative Expiration into an absolute time based on the
+ * local clock.
+ */
+ public long getAbsoluteMillis() {
+ if (!isRelative) {
+ return ms;
+ } else {
+ return new Date().getTime() + ms;
+ }
+ }
+
+ @Override
+ public String toString() {
+ if (isRelative) {
+ return DFSUtil.durationToString(ms);
+ }
+ return DFSUtil.dateToIso8601String(new Date(ms));
+ }
}
private final Long id;
private final Path path;
private final Short replication;
private final String pool;
+ private final Expiration expiration;
- CacheDirectiveInfo(Long id, Path path, Short replication, String pool) {
+ CacheDirectiveInfo(Long id, Path path, Short replication, String pool,
+ Expiration expiration) {
this.id = id;
this.path = path;
this.replication = replication;
this.pool = pool;
+ this.expiration = expiration;
}
/**
@@ -148,7 +269,14 @@ public class CacheDirectiveInfo {
public String getPool() {
return pool;
}
-
+
+ /**
+ * @return When this directive expires.
+ */
+ public Expiration getExpiration() {
+ return expiration;
+ }
+
@Override
public boolean equals(Object o) {
if (o == null) {
@@ -162,6 +290,7 @@ public class CacheDirectiveInfo {
append(getPath(), other.getPath()).
append(getReplication(), other.getReplication()).
append(getPool(), other.getPool()).
+ append(getExpiration(), other.getExpiration()).
isEquals();
}
@@ -171,6 +300,7 @@ public class CacheDirectiveInfo {
append(path).
append(replication).
append(pool).
+ append(expiration).
hashCode();
}
@@ -181,19 +311,23 @@ public class CacheDirectiveInfo {
String prefix = "";
if (id != null) {
builder.append(prefix).append("id: ").append(id);
- prefix = ",";
+ prefix = ", ";
}
if (path != null) {
builder.append(prefix).append("path: ").append(path);
- prefix = ",";
+ prefix = ", ";
}
if (replication != null) {
builder.append(prefix).append("replication: ").append(replication);
- prefix = ",";
+ prefix = ", ";
}
if (pool != null) {
builder.append(prefix).append("pool: ").append(pool);
- prefix = ",";
+ prefix = ", ";
+ }
+ if (expiration != null) {
+ builder.append(prefix).append("expiration: ").append(expiration);
+ prefix = ", ";
}
builder.append("}");
return builder.toString();
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java Mon Dec 2 17:41:44 2013
@@ -30,6 +30,7 @@ public class CacheDirectiveStats {
private long bytesNeeded;
private long bytesCached;
private long filesAffected;
+ private boolean hasExpired;
/**
* Builds a new CacheDirectiveStats populated with the set properties.
@@ -37,7 +38,8 @@ public class CacheDirectiveStats {
* @return New CacheDirectiveStats.
*/
public CacheDirectiveStats build() {
- return new CacheDirectiveStats(bytesNeeded, bytesCached, filesAffected);
+ return new CacheDirectiveStats(bytesNeeded, bytesCached, filesAffected,
+ hasExpired);
}
/**
@@ -52,7 +54,7 @@ public class CacheDirectiveStats {
* @param bytesNeeded The bytes needed.
* @return This builder, for call chaining.
*/
- public Builder setBytesNeeded(Long bytesNeeded) {
+ public Builder setBytesNeeded(long bytesNeeded) {
this.bytesNeeded = bytesNeeded;
return this;
}
@@ -63,7 +65,7 @@ public class CacheDirectiveStats {
* @param bytesCached The bytes cached.
* @return This builder, for call chaining.
*/
- public Builder setBytesCached(Long bytesCached) {
+ public Builder setBytesCached(long bytesCached) {
this.bytesCached = bytesCached;
return this;
}
@@ -74,44 +76,64 @@ public class CacheDirectiveStats {
* @param filesAffected The files affected.
* @return This builder, for call chaining.
*/
- public Builder setFilesAffected(Long filesAffected) {
+ public Builder setFilesAffected(long filesAffected) {
this.filesAffected = filesAffected;
return this;
}
+
+ /**
+ * Sets whether this directive has expired.
+ *
+ * @param hasExpired if this directive has expired
+ * @return This builder, for call chaining.
+ */
+ public Builder setHasExpired(boolean hasExpired) {
+ this.hasExpired = hasExpired;
+ return this;
+ }
}
private final long bytesNeeded;
private final long bytesCached;
private final long filesAffected;
+ private final boolean hasExpired;
private CacheDirectiveStats(long bytesNeeded, long bytesCached,
- long filesAffected) {
+ long filesAffected, boolean hasExpired) {
this.bytesNeeded = bytesNeeded;
this.bytesCached = bytesCached;
this.filesAffected = filesAffected;
+ this.hasExpired = hasExpired;
}
/**
* @return The bytes needed.
*/
- public Long getBytesNeeded() {
+ public long getBytesNeeded() {
return bytesNeeded;
}
/**
* @return The bytes cached.
*/
- public Long getBytesCached() {
+ public long getBytesCached() {
return bytesCached;
}
/**
* @return The files affected.
*/
- public Long getFilesAffected() {
+ public long getFilesAffected() {
return filesAffected;
}
+ /**
+ * @return Whether this directive has expired.
+ */
+ public boolean hasExpired() {
+ return hasExpired;
+ }
+
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
@@ -119,6 +141,7 @@ public class CacheDirectiveStats {
builder.append("bytesNeeded: ").append(bytesNeeded);
builder.append(", ").append("bytesCached: ").append(bytesCached);
builder.append(", ").append("filesAffected: ").append(filesAffected);
+ builder.append(", ").append("hasExpired: ").append(hasExpired);
builder.append("}");
return builder.toString();
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java Mon Dec 2 17:41:44 2013
@@ -30,6 +30,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.InvalidRequestException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
@@ -150,7 +151,10 @@ public class CachePoolInfo {
public static void validate(CachePoolInfo info) throws IOException {
if (info == null) {
- throw new IOException("CachePoolInfo is null");
+ throw new InvalidRequestException("CachePoolInfo is null");
+ }
+ if ((info.getWeight() != null) && (info.getWeight() < 0)) {
+ throw new InvalidRequestException("CachePool weight is negative.");
}
validateName(info.poolName);
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Mon Dec 2 17:41:44 2013
@@ -1179,6 +1179,6 @@ public interface ClientProtocol {
* @return A RemoteIterator which returns CachePool objects.
*/
@Idempotent
- public RemoteIterator<CachePoolInfo> listCachePools(String prevPool)
+ public RemoteIterator<CachePoolEntry> listCachePools(String prevPool)
throws IOException;
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java Mon Dec 2 17:41:44 2013
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.RemoteIterat
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@@ -51,6 +52,8 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
@@ -103,7 +106,6 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
@@ -1141,18 +1143,15 @@ public class ClientNamenodeProtocolServe
public ListCachePoolsResponseProto listCachePools(RpcController controller,
ListCachePoolsRequestProto request) throws ServiceException {
try {
- RemoteIterator<CachePoolInfo> iter =
+ RemoteIterator<CachePoolEntry> iter =
server.listCachePools(request.getPrevPoolName());
ListCachePoolsResponseProto.Builder responseBuilder =
ListCachePoolsResponseProto.newBuilder();
String prevPoolName = null;
while (iter.hasNext()) {
- CachePoolInfo pool = iter.next();
- ListCachePoolsResponseElementProto.Builder elemBuilder =
- ListCachePoolsResponseElementProto.newBuilder();
- elemBuilder.setInfo(PBHelper.convert(pool));
- responseBuilder.addElements(elemBuilder.build());
- prevPoolName = pool.getPoolName();
+ CachePoolEntry entry = iter.next();
+ responseBuilder.addEntries(PBHelper.convert(entry));
+ prevPoolName = entry.getInfo().getPoolName();
}
// fill in hasNext
if (prevPoolName == null) {
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java Mon Dec 2 17:41:44 2013
@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.permission.F
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -61,6 +62,7 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
@@ -96,7 +98,6 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
@@ -1141,23 +1142,23 @@ public class ClientNamenodeProtocolTrans
}
}
- private static class BatchedCachePoolInfo
- implements BatchedEntries<CachePoolInfo> {
+ private static class BatchedCachePoolEntries
+ implements BatchedEntries<CachePoolEntry> {
private final ListCachePoolsResponseProto proto;
- public BatchedCachePoolInfo(ListCachePoolsResponseProto proto) {
+ public BatchedCachePoolEntries(ListCachePoolsResponseProto proto) {
this.proto = proto;
}
@Override
- public CachePoolInfo get(int i) {
- ListCachePoolsResponseElementProto elem = proto.getElements(i);
- return PBHelper.convert(elem.getInfo());
+ public CachePoolEntry get(int i) {
+ CachePoolEntryProto elem = proto.getEntries(i);
+ return PBHelper.convert(elem);
}
@Override
public int size() {
- return proto.getElementsCount();
+ return proto.getEntriesCount();
}
@Override
@@ -1165,19 +1166,19 @@ public class ClientNamenodeProtocolTrans
return proto.getHasMore();
}
}
-
+
private class CachePoolIterator
- extends BatchedRemoteIterator<String, CachePoolInfo> {
+ extends BatchedRemoteIterator<String, CachePoolEntry> {
public CachePoolIterator(String prevKey) {
super(prevKey);
}
@Override
- public BatchedEntries<CachePoolInfo> makeRequest(String prevKey)
+ public BatchedEntries<CachePoolEntry> makeRequest(String prevKey)
throws IOException {
try {
- return new BatchedCachePoolInfo(
+ return new BatchedCachePoolEntries(
rpcProxy.listCachePools(null,
ListCachePoolsRequestProto.newBuilder().
setPrevPoolName(prevKey).build()));
@@ -1187,13 +1188,13 @@ public class ClientNamenodeProtocolTrans
}
@Override
- public String elementToPrevKey(CachePoolInfo element) {
- return element.getPoolName();
+ public String elementToPrevKey(CachePoolEntry entry) {
+ return entry.getInfo().getPoolName();
}
}
@Override
- public RemoteIterator<CachePoolInfo> listCachePools(String prevKey)
+ public RemoteIterator<CachePoolEntry> listCachePools(String prevKey)
throws IOException {
return new CachePoolIterator(prevKey);
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Mon Dec 2 17:41:44 2013
@@ -39,7 +39,9 @@ import org.apache.hadoop.hdfs.StorageTyp
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolStats;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -60,8 +62,11 @@ import org.apache.hadoop.hdfs.protocol.S
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
@@ -1698,6 +1703,9 @@ public class PBHelper {
if (info.getPool() != null) {
builder.setPool(info.getPool());
}
+ if (info.getExpiration() != null) {
+ builder.setExpiration(convert(info.getExpiration()));
+ }
return builder.build();
}
@@ -1718,15 +1726,35 @@ public class PBHelper {
if (proto.hasPool()) {
builder.setPool(proto.getPool());
}
+ if (proto.hasExpiration()) {
+ builder.setExpiration(convert(proto.getExpiration()));
+ }
return builder.build();
}
-
+
+ public static CacheDirectiveInfoExpirationProto convert(
+ CacheDirectiveInfo.Expiration expiration) {
+ return CacheDirectiveInfoExpirationProto.newBuilder()
+ .setIsRelative(expiration.isRelative())
+ .setMillis(expiration.getMillis())
+ .build();
+ }
+
+ public static CacheDirectiveInfo.Expiration convert(
+ CacheDirectiveInfoExpirationProto proto) {
+ if (proto.getIsRelative()) {
+ return CacheDirectiveInfo.Expiration.newRelative(proto.getMillis());
+ }
+ return CacheDirectiveInfo.Expiration.newAbsolute(proto.getMillis());
+ }
+
public static CacheDirectiveStatsProto convert(CacheDirectiveStats stats) {
CacheDirectiveStatsProto.Builder builder =
CacheDirectiveStatsProto.newBuilder();
builder.setBytesNeeded(stats.getBytesNeeded());
builder.setBytesCached(stats.getBytesCached());
builder.setFilesAffected(stats.getFilesAffected());
+ builder.setHasExpired(stats.hasExpired());
return builder.build();
}
@@ -1735,6 +1763,7 @@ public class PBHelper {
builder.setBytesNeeded(proto.getBytesNeeded());
builder.setBytesCached(proto.getBytesCached());
builder.setFilesAffected(proto.getFilesAffected());
+ builder.setHasExpired(proto.getHasExpired());
return builder.build();
}
@@ -1789,6 +1818,35 @@ public class PBHelper {
return info;
}
+ public static CachePoolStatsProto convert(CachePoolStats stats) {
+ CachePoolStatsProto.Builder builder = CachePoolStatsProto.newBuilder();
+ builder.setBytesNeeded(stats.getBytesNeeded());
+ builder.setBytesCached(stats.getBytesCached());
+ builder.setFilesAffected(stats.getFilesAffected());
+ return builder.build();
+ }
+
+ public static CachePoolStats convert (CachePoolStatsProto proto) {
+ CachePoolStats.Builder builder = new CachePoolStats.Builder();
+ builder.setBytesNeeded(proto.getBytesNeeded());
+ builder.setBytesCached(proto.getBytesCached());
+ builder.setFilesAffected(proto.getFilesAffected());
+ return builder.build();
+ }
+
+ public static CachePoolEntryProto convert(CachePoolEntry entry) {
+ CachePoolEntryProto.Builder builder = CachePoolEntryProto.newBuilder();
+ builder.setInfo(PBHelper.convert(entry.getInfo()));
+ builder.setStats(PBHelper.convert(entry.getStats()));
+ return builder.build();
+ }
+
+ public static CachePoolEntry convert (CachePoolEntryProto proto) {
+ CachePoolInfo info = PBHelper.convert(proto.getInfo());
+ CachePoolStats stats = PBHelper.convert(proto.getStats());
+ return new CachePoolEntry(info, stats);
+ }
+
public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) {
return HdfsProtos.ChecksumTypeProto.valueOf(type.id);
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java Mon Dec 2 17:41:44 2013
@@ -150,5 +150,5 @@ interface AsyncLogger {
* Append an HTML-formatted report for this logger's status to the provided
* StringBuilder. This is displayed on the NN web UI.
*/
- public void appendHtmlReport(StringBuilder sb);
+ public void appendReport(StringBuilder sb);
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java Mon Dec 2 17:41:44 2013
@@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.qjournal.p
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
-import org.apache.jasper.compiler.JspUtil;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
@@ -177,17 +176,16 @@ class AsyncLoggerSet {
* state of the underlying loggers.
* @param sb the StringBuilder to append to
*/
- void appendHtmlReport(StringBuilder sb) {
- sb.append("<table class=\"storage\">");
- sb.append("<thead><tr><td>JN</td><td>Status</td></tr></thead>\n");
- for (AsyncLogger l : loggers) {
- sb.append("<tr>");
- sb.append("<td>" + JspUtil.escapeXml(l.toString()) + "</td>");
- sb.append("<td>");
- l.appendHtmlReport(sb);
- sb.append("</td></tr>\n");
+ void appendReport(StringBuilder sb) {
+ for (int i = 0, len = loggers.size(); i < len; ++i) {
+ AsyncLogger l = loggers.get(i);
+ if (i != 0) {
+ sb.append(", ");
+ }
+ sb.append(l).append(" (");
+ l.appendReport(sb);
+ sb.append(")");
}
- sb.append("</table>");
}
/**
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java Mon Dec 2 17:41:44 2013
@@ -569,7 +569,7 @@ public class IPCLoggerChannel implements
}
@Override
- public synchronized void appendHtmlReport(StringBuilder sb) {
+ public synchronized void appendReport(StringBuilder sb) {
sb.append("Written txid ").append(highestAckedTxId);
long behind = getLagTxns();
if (behind > 0) {