You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cm...@apache.org on 2013/10/29 01:49:23 UTC
svn commit: r1536572 [1/4] - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ dev-support/
src/main/bin/ src/main/java/ src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apache/hadoop/hdfs/client/
src/main/java/org/apache/hadoop/hdfs/p...
Author: cmccabe
Date: Tue Oct 29 00:49:20 2013
New Revision: 1536572
URL: http://svn.apache.org/r1536572
Log:
Merge HDFS-4949 branch back into trunk
Added:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ClientMmap.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ClientMmap.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ClientMmapManager.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ClientMmapManager.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDescriptor.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDescriptor.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathBasedCacheDescriptorException.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathBasedCacheDescriptorException.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathBasedCacheEntryException.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathBasedCacheEntryException.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachedBlock.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachedBlock.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockIdCommand.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockIdCommand.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCacheAdmin.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCacheAdmin.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CacheAdminCmdExecutor.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CacheAdminCmdExecutor.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestClientNamenodeProtocolServerSideTranslatorPB.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestClientNamenodeProtocolServerSideTranslatorPB.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCachedBlocksList.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCachedBlocksList.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml
- copied unchanged from r1536569, hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StepType.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs:r1509426-1536569
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Oct 29 00:49:20 2013
@@ -123,7 +123,73 @@ Trunk (Unreleased)
HDFS-5041. Add the time of last heartbeat to dead server Web UI (Shinichi
Yamashita via brandonli)
+ HDFS-5049. Add JNI mlock support. (Andrew Wang via Colin Patrick McCabe)
+
+ HDFS-5051. Propagate cache status information from the DataNode to the
+ NameNode (Andrew Wang via Colin Patrick McCabe)
+
+ HDFS-5052. Add cacheRequest/uncacheRequest support to NameNode.
+ (contributed by Colin Patrick McCabe)
+
+ HDFS-5050. Add DataNode support for mlock and munlock
+ (Andrew Wang via Colin Patrick McCabe)
+
+ HDFS-5141. Add cache status information to datanode heartbeat.
+ (Contributed by Andrew Wang)
+
+ HDFS-5121. Add RPCs for creating and manipulating cache pools.
+ (Contributed by Colin Patrick McCabe)
+
+ HDFS-5163. Miscellaneous cache pool RPC fixes. (Contributed by Colin
+ Patrick McCabe)
+
+ HDFS-5120. Add command-line support for manipulating cache pools.
+ (Contributed by Colin Patrick McCabe)
+
+ HDFS-5158. Add command-line support for manipulating cache directives.
+ (Contributed by Colin Patrick McCabe)
+
+ HDFS-5053. NameNode should invoke DataNode APIs to coordinate caching.
+ (Andrew Wang)
+
+ HDFS-5197. Document dfs.cachereport.intervalMsec in hdfs-default.xml.
+ (cnauroth)
+
+ HDFS-5213. Separate PathBasedCacheEntry and PathBasedCacheDirectiveWithId.
+ (Contributed by Colin Patrick McCabe)
+
+ HDFS-5236. Change PathBasedCacheDirective APIs to be a single value
+ rather than batch. (Contributed by Andrew Wang)
+
+ HDFS-5191. Revisit zero-copy API in FSDataInputStream to make it more
+ intuitive. (Contributed by Colin Patrick McCabe)
+
+ HDFS-5119. Persist CacheManager state in the edit log.
+ (Contributed by Andrew Wang)
+
+ HDFS-5190. Move cache pool related CLI commands to CacheAdmin.
+ (Contributed by Andrew Wang)
+
+ HDFS-5304. Expose if a block replica is cached in getFileBlockLocations.
+ (Contributed by Andrew Wang)
+
+ HDFS-5224. Refactor PathBasedCache* methods to use a Path rather than a
+ String. (cnauroth)
+
+ HDFS-5358. Add replication field to PathBasedCacheDirective.
+ (Contributed by Colin Patrick McCabe)
+
+ HDFS-5359. Allow LightWeightGSet#Iterator to remove elements.
+ (Contributed by Colin Patrick McCabe)
+
+ HDFS-5096. Automatically cache new data added to a cached path.
+ (Contributed by Colin Patrick McCabe)
+
+ HDFS-5378. In CacheReport, don't send genstamp and length on the wire
+ (Contributed by Colin Patrick McCabe)
+
OPTIMIZATIONS
+ HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
BUG FIXES
HADOOP-9635 Fix potential Stack Overflow in DomainSocket.c (V. Karthik Kumar
@@ -241,6 +307,53 @@ Trunk (Unreleased)
HDFS-4366. Block Replication Policy Implementation May Skip Higher-Priority
Blocks for Lower-Priority Blocks (Derek Dagit via kihwal)
+ HDFS-5169. hdfs.c: translateZCRException: null pointer deref when
+ translating some exceptions. (Contributed by Colin Patrick McCabe)
+
+ HDFS-5198. NameNodeRpcServer must not send back DNA_FINALIZE in reply to a
+ cache report. (Contributed by Colin Patrick McCabe)
+
+ HDFS-5195. Prevent passing null pointer to mlock and munlock. (cnauroth)
+
+ HDFS-5201. NativeIO: consolidate getrlimit into NativeIO#getMemlockLimit
+ (Contributed by Colin Patrick McCabe)
+
+ HDFS-5210. Fix some failing unit tests on HDFS-4949 branch.
+ (Contributed by Andrew Wang)
+
+ HDFS-5266. ElasticByteBufferPool#Key does not implement equals. (cnauroth)
+
+ HDFS-5309. Fix failing caching unit tests. (Andrew Wang)
+
+ HDFS-5314. Do not expose CachePool type in AddCachePoolOp (Colin Patrick
+ McCabe)
+
+ HDFS-5348. Fix error message when dfs.datanode.max.locked.memory is
+ improperly configured. (Colin Patrick McCabe)
+
+ HDFS-5373. hdfs cacheadmin -addDirective short usage does not mention
+ -replication parameter. (cnauroth)
+
+ HDFS-5383. fix broken caching unit tests. (Andrew Wang)
+
+ HDFS-5388. Loading fsimage fails to find cache pools during namenode
+ startup. (Chris Nauroth via Colin Patrick McCabe)
+
+ HDFS-5203. Concurrent clients that add a cache directive on the same path
+ may prematurely uncache from each other. (Chris Nauroth via Colin Patrick
+ McCabe)
+
+ HDFS-5385. Caching RPCs are AtMostOnce, but do not persist client ID and
+ call ID to edit log. (Chris Nauroth via Colin Patrick McCabe)
+
+ HDFS-5404. Resolve regressions in Windows compatibility on HDFS-4949
+ branch. (Chris Nauroth via Andrew Wang)
+
+ HDFS-5405. Fix possible RetryCache hang for caching RPC handlers in
+ FSNamesystem. (wang)
+
+ HDFS-5419. Fixup test-patch.sh warnings on HDFS-4949 branch. (wang)
+
Release 2.3.0 - UNRELEASED
INCOMPATIBLE CHANGES
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml Tue Oct 29 00:49:20 2013
@@ -346,4 +346,22 @@
<Method name="create" />
<Bug pattern="UL_UNRELEASED_LOCK" />
</Match>
+ <!-- Manually verified to be okay, we want to throw away the top bit here -->
+ <Match>
+ <Class name="org.apache.hadoop.hdfs.server.namenode.CachedBlock" />
+ <Method name="getReplication" />
+ <Bug pattern="ICAST_QUESTIONABLE_UNSIGNED_RIGHT_SHIFT" />
+ </Match>
+ <!-- These two are used for shutting down and kicking the CRMon, do not need strong sync -->
+ <Match>
+ <Class name="org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor" />
+ <Field name="shutdown" />
+ <Bug pattern="IS2_INCONSISTENT_SYNC" />
+ </Match>
+ <Match>
+ <Class name="org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor" />
+ <Field name="rescanImmediately" />
+ <Bug pattern="IS2_INCONSISTENT_SYNC" />
+ </Match>
+
</FindBugsFilter>
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs Tue Oct 29 00:49:20 2013
@@ -59,6 +59,7 @@ function print_usage(){
echo " Use -help to see options"
echo " portmap run a portmap service"
echo " nfs3 run an NFS version 3 gateway"
+ echo " cacheadmin configure the HDFS cache"
echo ""
echo "Most commands print help when invoked w/o parameters."
}
@@ -155,6 +156,8 @@ elif [ "$COMMAND" = "portmap" ] ; then
CLASS=org.apache.hadoop.portmap.Portmap
elif [ "$COMMAND" = "nfs3" ] ; then
CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
+elif [ "$COMMAND" = "cacheadmin" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
else
CLASS="$COMMAND"
fi
Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1509426-1536569
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Tue Oct 29 00:49:20 2013
@@ -98,6 +98,7 @@ import org.apache.hadoop.fs.MD5MD5CRC32C
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
@@ -107,6 +108,7 @@ import org.apache.hadoop.fs.permission.F
import org.apache.hadoop.hdfs.client.ClientMmapManager;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -115,6 +117,8 @@ import org.apache.hadoop.hdfs.protocol.D
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor;
+import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -2286,7 +2290,73 @@ public class DFSClient implements java.i
throw re.unwrapRemoteException();
}
}
+
+ public PathBasedCacheDescriptor addPathBasedCacheDirective(
+ PathBasedCacheDirective directive) throws IOException {
+ checkOpen();
+ try {
+ return namenode.addPathBasedCacheDirective(directive);
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException();
+ }
+ }
+ public void removePathBasedCacheDescriptor(long id)
+ throws IOException {
+ checkOpen();
+ try {
+ namenode.removePathBasedCacheDescriptor(id);
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException();
+ }
+ }
+
+ public RemoteIterator<PathBasedCacheDescriptor> listPathBasedCacheDescriptors(
+ String pool, String path) throws IOException {
+ checkOpen();
+ try {
+ return namenode.listPathBasedCacheDescriptors(0, pool, path);
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException();
+ }
+ }
+
+ public void addCachePool(CachePoolInfo info) throws IOException {
+ checkOpen();
+ try {
+ namenode.addCachePool(info);
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException();
+ }
+ }
+
+ public void modifyCachePool(CachePoolInfo info) throws IOException {
+ checkOpen();
+ try {
+ namenode.modifyCachePool(info);
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException();
+ }
+ }
+
+ public void removeCachePool(String poolName) throws IOException {
+ checkOpen();
+ try {
+ namenode.removeCachePool(poolName);
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException();
+ }
+ }
+
+ public RemoteIterator<CachePoolInfo> listCachePools() throws IOException {
+ checkOpen();
+ try {
+ return namenode.listCachePools("");
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException();
+ }
+ }
+
/**
* Save namespace image.
*
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Tue Oct 29 00:49:20 2013
@@ -99,6 +99,12 @@ public class DFSConfigKeys extends Commo
public static final boolean DFS_DATANODE_DROP_CACHE_BEHIND_READS_DEFAULT = false;
public static final String DFS_DATANODE_USE_DN_HOSTNAME = "dfs.datanode.use.datanode.hostname";
public static final boolean DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT = false;
+ public static final String DFS_DATANODE_MAX_LOCKED_MEMORY_KEY = "dfs.datanode.max.locked.memory";
+ public static final long DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT = 0;
+ public static final String DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY = "dfs.datanode.fsdatasetcache.max.threads.per.volume";
+ public static final int DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_DEFAULT = 4;
+ public static final String DFS_NAMENODE_CACHING_ENABLED_KEY = "dfs.namenode.caching.enabled";
+ public static final boolean DFS_NAMENODE_CACHING_ENABLED_DEFAULT = false;
public static final String DFS_NAMENODE_HTTP_PORT_KEY = "dfs.http.port";
public static final int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070;
@@ -197,6 +203,16 @@ public class DFSConfigKeys extends Commo
public static final String DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY = "dfs.namenode.datanode.registration.ip-hostname-check";
public static final boolean DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT = true;
+ public static final String DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES =
+ "dfs.namenode.list.cache.pools.num.responses";
+ public static final int DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT = 100;
+ public static final String DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES =
+ "dfs.namenode.list.cache.descriptors.num.responses";
+ public static final int DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES_DEFAULT = 100;
+ public static final String DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS =
+ "dfs.namenode.path.based.cache.refresh.interval.ms";
+ public static final long DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT = 300000L;
+
// Whether to enable datanode's stale state detection and usage for reads
public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY = "dfs.namenode.avoid.read.stale.datanode";
public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT = false;
@@ -362,6 +378,8 @@ public class DFSConfigKeys extends Commo
public static final long DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT = 60 * 60 * 1000;
public static final String DFS_BLOCKREPORT_INITIAL_DELAY_KEY = "dfs.blockreport.initialDelay";
public static final int DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT = 0;
+ public static final String DFS_CACHEREPORT_INTERVAL_MSEC_KEY = "dfs.cachereport.intervalMsec";
+ public static final long DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT = 10 * 1000;
public static final String DFS_BLOCK_INVALIDATE_LIMIT_KEY = "dfs.block.invalidate.limit";
public static final int DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT = 1000;
public static final String DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY = "dfs.corruptfilesreturned.max";
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Tue Oct 29 00:49:20 2013
@@ -57,6 +57,7 @@ import org.apache.hadoop.fs.permission.F
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -66,6 +67,8 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
@@ -1579,5 +1582,113 @@ public class DistributedFileSystem exten
}
}.resolve(this, absF);
}
+
+ /**
+ * Add a new PathBasedCacheDirective.
+ *
+ * @param directive A PathBasedCacheDirectives to add
+ * @return PathBasedCacheDescriptor associated with the added directive
+ * @throws IOException if the directive could not be added
+ */
+ public PathBasedCacheDescriptor addPathBasedCacheDirective(
+ PathBasedCacheDirective directive) throws IOException {
+ Path path = new Path(getPathName(fixRelativePart(directive.getPath()))).
+ makeQualified(getUri(), getWorkingDirectory());
+ return dfs.addPathBasedCacheDirective(new PathBasedCacheDirective.Builder().
+ setPath(path).
+ setReplication(directive.getReplication()).
+ setPool(directive.getPool()).
+ build());
+ }
+
+ /**
+ * Remove a PathBasedCacheDescriptor.
+ *
+ * @param descriptor PathBasedCacheDescriptor to remove
+ * @throws IOException if the descriptor could not be removed
+ */
+ public void removePathBasedCacheDescriptor(PathBasedCacheDescriptor descriptor)
+ throws IOException {
+ dfs.removePathBasedCacheDescriptor(descriptor.getEntryId());
+ }
+ /**
+ * List the set of cached paths of a cache pool. Incrementally fetches results
+ * from the server.
+ *
+ * @param pool The cache pool to list, or null to list all pools.
+ * @param path The path name to list, or null to list all paths.
+ * @return A RemoteIterator which returns PathBasedCacheDescriptor objects.
+ */
+ public RemoteIterator<PathBasedCacheDescriptor> listPathBasedCacheDescriptors(
+ String pool, final Path path) throws IOException {
+ String pathName = path != null ? getPathName(fixRelativePart(path)) : null;
+ final RemoteIterator<PathBasedCacheDescriptor> iter =
+ dfs.listPathBasedCacheDescriptors(pool, pathName);
+ return new RemoteIterator<PathBasedCacheDescriptor>() {
+ @Override
+ public boolean hasNext() throws IOException {
+ return iter.hasNext();
+ }
+
+ @Override
+ public PathBasedCacheDescriptor next() throws IOException {
+ PathBasedCacheDescriptor desc = iter.next();
+ Path qualPath = desc.getPath().makeQualified(getUri(), path);
+ return new PathBasedCacheDescriptor(desc.getEntryId(), qualPath,
+ desc.getReplication(), desc.getPool());
+ }
+ };
+ }
+
+ /**
+ * Add a cache pool.
+ *
+ * @param info
+ * The request to add a cache pool.
+ * @throws IOException
+ * If the request could not be completed.
+ */
+ public void addCachePool(CachePoolInfo info) throws IOException {
+ CachePoolInfo.validate(info);
+ dfs.addCachePool(info);
+ }
+
+ /**
+ * Modify an existing cache pool.
+ *
+ * @param info
+ * The request to modify a cache pool.
+ * @throws IOException
+ * If the request could not be completed.
+ */
+ public void modifyCachePool(CachePoolInfo info) throws IOException {
+ CachePoolInfo.validate(info);
+ dfs.modifyCachePool(info);
+ }
+
+ /**
+ * Remove a cache pool.
+ *
+ * @param poolName
+ * Name of the cache pool to remove.
+ * @throws IOException
+ * if the cache pool did not exist, or could not be removed.
+ */
+ public void removeCachePool(String poolName) throws IOException {
+ CachePoolInfo.validateName(poolName);
+ dfs.removeCachePool(poolName);
+ }
+
+ /**
+ * List all cache pools.
+ *
+ * @return A remote iterator from which you can get CachePoolInfo objects.
+ * Requests will be made as needed.
+ * @throws IOException
+ * If there was an error listing cache pools.
+ */
+ public RemoteIterator<CachePoolInfo> listCachePools() throws IOException {
+ return dfs.listCachePools();
+ }
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Tue Oct 29 00:49:20 2013
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.InvalidPathE
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException;
+import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -1093,5 +1094,79 @@ public interface ClientProtocol {
@Idempotent
public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot,
String fromSnapshot, String toSnapshot) throws IOException;
-}
+ /**
+ * Add a PathBasedCache entry to the CacheManager.
+ *
+ * @param directive A PathBasedCacheDirective to be added
+ * @return A PathBasedCacheDescriptor associated with the added directive
+ * @throws IOException if the directive could not be added
+ */
+ @AtMostOnce
+ public PathBasedCacheDescriptor addPathBasedCacheDirective(
+ PathBasedCacheDirective directive) throws IOException;
+
+ /**
+ * Remove a PathBasedCacheDescriptor from the CacheManager.
+ *
+ * @param id of a PathBasedCacheDescriptor
+ * @throws IOException if the cache descriptor could not be removed
+ */
+ @AtMostOnce
+ public void removePathBasedCacheDescriptor(Long id) throws IOException;
+
+ /**
+ * List the set of cached paths of a cache pool. Incrementally fetches results
+ * from the server.
+ *
+ * @param prevId The last listed entry ID, or -1 if this is the first call to
+ * listPathBasedCacheDescriptors.
+ * @param pool The cache pool to list, or null to list all pools.
+ * @param path The path name to list, or null to list all paths.
+ * @return A RemoteIterator which returns PathBasedCacheDescriptor objects.
+ */
+ @Idempotent
+ public RemoteIterator<PathBasedCacheDescriptor> listPathBasedCacheDescriptors(
+ long prevId, String pool, String path) throws IOException;
+
+ /**
+ * Add a new cache pool.
+ *
+ * @param info Description of the new cache pool
+ * @throws IOException If the request could not be completed.
+ */
+ @AtMostOnce
+ public void addCachePool(CachePoolInfo info) throws IOException;
+
+ /**
+ * Modify an existing cache pool.
+ *
+ * @param req
+ * The request to modify a cache pool.
+ * @throws IOException
+ * If the request could not be completed.
+ */
+ @AtMostOnce
+ public void modifyCachePool(CachePoolInfo req) throws IOException;
+
+ /**
+ * Remove a cache pool.
+ *
+ * @param pool name of the cache pool to remove.
+ * @throws IOException if the cache pool did not exist, or could not be
+ * removed.
+ */
+ @AtMostOnce
+ public void removeCachePool(String pool) throws IOException;
+
+ /**
+ * List the set of cache pools. Incrementally fetches results from the server.
+ *
+ * @param prevPool name of the last pool listed, or the empty string if this is
+ * the first invocation of listCachePools
+ * @return A RemoteIterator which returns CachePool objects.
+ */
+ @Idempotent
+ public RemoteIterator<CachePoolInfo> listCachePools(String prevPool)
+ throws IOException;
+}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java Tue Oct 29 00:49:20 2013
@@ -44,6 +44,8 @@ public class DatanodeInfo extends Datano
private long dfsUsed;
private long remaining;
private long blockPoolUsed;
+ private long cacheCapacity;
+ private long cacheUsed;
private long lastUpdate;
private int xceiverCount;
private String location = NetworkTopology.DEFAULT_RACK;
@@ -82,6 +84,8 @@ public class DatanodeInfo extends Datano
this.dfsUsed = from.getDfsUsed();
this.remaining = from.getRemaining();
this.blockPoolUsed = from.getBlockPoolUsed();
+ this.cacheCapacity = from.getCacheCapacity();
+ this.cacheUsed = from.getCacheUsed();
this.lastUpdate = from.getLastUpdate();
this.xceiverCount = from.getXceiverCount();
this.location = from.getNetworkLocation();
@@ -94,6 +98,8 @@ public class DatanodeInfo extends Datano
this.dfsUsed = 0L;
this.remaining = 0L;
this.blockPoolUsed = 0L;
+ this.cacheCapacity = 0L;
+ this.cacheUsed = 0L;
this.lastUpdate = 0L;
this.xceiverCount = 0;
this.adminState = null;
@@ -106,12 +112,14 @@ public class DatanodeInfo extends Datano
public DatanodeInfo(DatanodeID nodeID, String location,
final long capacity, final long dfsUsed, final long remaining,
- final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
+ final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
+ final long lastUpdate, final int xceiverCount,
final AdminStates adminState) {
this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getStorageID(),
nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getInfoSecurePort(),
nodeID.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed,
- lastUpdate, xceiverCount, location, adminState);
+ cacheCapacity, cacheUsed, lastUpdate, xceiverCount, location,
+ adminState);
}
/** Constructor */
@@ -119,7 +127,8 @@ public class DatanodeInfo extends Datano
final String storageID, final int xferPort, final int infoPort,
final int infoSecurePort, final int ipcPort,
final long capacity, final long dfsUsed, final long remaining,
- final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
+ final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
+ final long lastUpdate, final int xceiverCount,
final String networkLocation, final AdminStates adminState) {
super(ipAddr, hostName, storageID, xferPort, infoPort,
infoSecurePort, ipcPort);
@@ -127,6 +136,8 @@ public class DatanodeInfo extends Datano
this.dfsUsed = dfsUsed;
this.remaining = remaining;
this.blockPoolUsed = blockPoolUsed;
+ this.cacheCapacity = cacheCapacity;
+ this.cacheUsed = cacheUsed;
this.lastUpdate = lastUpdate;
this.xceiverCount = xceiverCount;
this.location = networkLocation;
@@ -172,6 +183,42 @@ public class DatanodeInfo extends Datano
return DFSUtil.getPercentRemaining(remaining, capacity);
}
+ /**
+ * @return Amount of cache capacity in bytes
+ */
+ public long getCacheCapacity() {
+ return cacheCapacity;
+ }
+
+ /**
+ * @return Amount of cache used in bytes
+ */
+ public long getCacheUsed() {
+ return cacheUsed;
+ }
+
+ /**
+ * @return Cache used as a percentage of the datanode's total cache capacity
+ */
+ public float getCacheUsedPercent() {
+ return DFSUtil.getPercentUsed(cacheUsed, cacheCapacity);
+ }
+
+ /**
+ * @return Amount of cache remaining in bytes
+ */
+ public long getCacheRemaining() {
+ return cacheCapacity - cacheUsed;
+ }
+
+ /**
+ * @return Cache remaining as a percentage of the datanode's total cache
+ * capacity
+ */
+ public float getCacheRemainingPercent() {
+ return DFSUtil.getPercentRemaining(getCacheRemaining(), cacheCapacity);
+ }
+
/** The time when this information was accurate. */
public long getLastUpdate() { return lastUpdate; }
@@ -198,6 +245,16 @@ public class DatanodeInfo extends Datano
this.blockPoolUsed = bpUsed;
}
+ /** Sets cache capacity. */
+ public void setCacheCapacity(long cacheCapacity) {
+ this.cacheCapacity = cacheCapacity;
+ }
+
+ /** Sets cache used. */
+ public void setCacheUsed(long cacheUsed) {
+ this.cacheUsed = cacheUsed;
+ }
+
/** Sets time when this information was accurate. */
public void setLastUpdate(long lastUpdate) {
this.lastUpdate = lastUpdate;
@@ -227,6 +284,11 @@ public class DatanodeInfo extends Datano
long nonDFSUsed = getNonDfsUsed();
float usedPercent = getDfsUsedPercent();
float remainingPercent = getRemainingPercent();
+ long cc = getCacheCapacity();
+ long cr = getCacheRemaining();
+ long cu = getCacheUsed();
+ float cacheUsedPercent = getCacheUsedPercent();
+ float cacheRemainingPercent = getCacheRemainingPercent();
String lookupName = NetUtils.getHostNameOfIP(getName());
buffer.append("Name: "+ getName());
@@ -253,6 +315,12 @@ public class DatanodeInfo extends Datano
buffer.append("DFS Remaining: " +r+ " ("+StringUtils.byteDesc(r)+")"+"\n");
buffer.append("DFS Used%: "+percent2String(usedPercent) + "\n");
buffer.append("DFS Remaining%: "+percent2String(remainingPercent) + "\n");
+ buffer.append("Configured Cache Capacity: "+c+" ("+StringUtils.byteDesc(cc)+")"+"\n");
+ buffer.append("Cache Used: "+cu+" ("+StringUtils.byteDesc(u)+")"+"\n");
+ buffer.append("Cache Remaining: " +cr+ " ("+StringUtils.byteDesc(r)+")"+"\n");
+ buffer.append("Cache Used%: "+percent2String(cacheUsedPercent) + "\n");
+ buffer.append("Cache Remaining%: "+percent2String(cacheRemainingPercent) + "\n");
+
buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
return buffer.toString();
}
@@ -263,6 +331,9 @@ public class DatanodeInfo extends Datano
long c = getCapacity();
long r = getRemaining();
long u = getDfsUsed();
+ long cc = getCacheCapacity();
+ long cr = getCacheRemaining();
+ long cu = getCacheUsed();
buffer.append(getName());
if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
buffer.append(" "+location);
@@ -278,6 +349,10 @@ public class DatanodeInfo extends Datano
buffer.append(" " + u + "(" + StringUtils.byteDesc(u)+")");
buffer.append(" " + percent2String(u/(double)c));
buffer.append(" " + r + "(" + StringUtils.byteDesc(r)+")");
+ buffer.append(" " + cc + "(" + StringUtils.byteDesc(cc)+")");
+ buffer.append(" " + cu + "(" + StringUtils.byteDesc(cu)+")");
+ buffer.append(" " + percent2String(cu/(double)cc));
+ buffer.append(" " + cr + "(" + StringUtils.byteDesc(cr)+")");
buffer.append(" " + new Date(lastUpdate));
return buffer.toString();
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java Tue Oct 29 00:49:20 2013
@@ -106,7 +106,8 @@ public class LayoutVersion {
SEQUENTIAL_BLOCK_ID(-46, "Allocate block IDs sequentially and store " +
"block IDs in the edits log and image files"),
EDITLOG_SUPPORT_RETRYCACHE(-47, "Record ClientId and CallId in editlog to "
- + "enable rebuilding retry cache in case of HA failover");
+ + "enable rebuilding retry cache in case of HA failover"),
+ CACHING(-48, "Support for cache pools and path-based caching");
final int lv;
final int ancestorLV;
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java Tue Oct 29 00:49:20 2013
@@ -17,15 +17,21 @@
*/
package org.apache.hadoop.hdfs.protocol;
+import java.util.List;
+
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.security.token.Token;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
/**
* Associates a block with the Datanodes that contain its replicas
* and other block metadata (E.g. the file offset associated with this
- * block, whether it is corrupt, security token, etc).
+ * block, whether it is corrupt, a location is cached in memory,
+ * security token, etc).
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
@@ -39,9 +45,16 @@ public class LocatedBlock {
// their locations are not part of this object
private boolean corrupt;
private Token<BlockTokenIdentifier> blockToken = new Token<BlockTokenIdentifier>();
+ /**
+ * List of cached datanode locations
+ */
+ private DatanodeInfo[] cachedLocs;
+
+ // Used when there are no locations
+ private static final DatanodeInfo[] EMPTY_LOCS = new DatanodeInfo[0];
public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs) {
- this(b, locs, -1, false); // startOffset is unknown
+ this(b, locs, -1); // startOffset is unknown
}
public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, long startOffset) {
@@ -50,14 +63,26 @@ public class LocatedBlock {
public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, long startOffset,
boolean corrupt) {
+ this(b, locs, startOffset, corrupt, EMPTY_LOCS);
+ }
+
+ public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, long startOffset,
+ boolean corrupt, DatanodeInfo[] cachedLocs) {
this.b = b;
this.offset = startOffset;
this.corrupt = corrupt;
if (locs==null) {
- this.locs = new DatanodeInfo[0];
+ this.locs = EMPTY_LOCS;
} else {
this.locs = locs;
}
+ Preconditions.checkArgument(cachedLocs != null,
+ "cachedLocs should not be null, use a different constructor");
+ if (cachedLocs.length == 0) {
+ this.cachedLocs = EMPTY_LOCS;
+ } else {
+ this.cachedLocs = cachedLocs;
+ }
}
public Token<BlockTokenIdentifier> getBlockToken() {
@@ -96,6 +121,36 @@ public class LocatedBlock {
return this.corrupt;
}
+ /**
+ * Add a the location of a cached replica of the block.
+ *
+ * @param loc of datanode with the cached replica
+ */
+ public void addCachedLoc(DatanodeInfo loc) {
+ List<DatanodeInfo> cachedList = Lists.newArrayList(cachedLocs);
+ if (cachedList.contains(loc)) {
+ return;
+ }
+ // Try to re-use a DatanodeInfo already in loc
+ for (int i=0; i<locs.length; i++) {
+ if (locs[i].equals(loc)) {
+ cachedList.add(locs[i]);
+ cachedLocs = cachedList.toArray(cachedLocs);
+ return;
+ }
+ }
+ // Not present in loc, add it and go
+ cachedList.add(loc);
+ cachedLocs = cachedList.toArray(cachedLocs);
+ }
+
+ /**
+ * @return Datanodes with a cached block replica
+ */
+ public DatanodeInfo[] getCachedLocations() {
+ return cachedLocs;
+ }
+
@Override
public String toString() {
return getClass().getSimpleName() + "{" + b
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java Tue Oct 29 00:49:20 2013
@@ -25,18 +25,32 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.EmptyPathError;
+import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor;
+import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.InvalidIdException;
+import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.NoSuchIdException;
+import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.RemovePermissionDeniedException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
@@ -92,16 +106,29 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsElementProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathBasedCacheDirectiveProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
@@ -151,6 +178,9 @@ import org.apache.hadoop.security.proto.
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto;
import org.apache.hadoop.security.token.Token;
+import org.apache.commons.lang.StringUtils;
+
+import com.google.common.primitives.Shorts;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@@ -1003,5 +1033,183 @@ public class ClientNamenodeProtocolServe
throw new ServiceException(e);
}
}
+
+ @Override
+ public AddPathBasedCacheDirectiveResponseProto addPathBasedCacheDirective(
+ RpcController controller, AddPathBasedCacheDirectiveRequestProto request)
+ throws ServiceException {
+ try {
+ PathBasedCacheDirectiveProto proto = request.getDirective();
+ if (StringUtils.isEmpty(proto.getPath())) {
+ throw new EmptyPathError();
+ }
+ PathBasedCacheDirective directive = new PathBasedCacheDirective.Builder().
+ setPath(new Path(proto.getPath())).
+ setReplication(Shorts.checkedCast(proto.getReplication())).
+ setPool(proto.getPool()).
+ build();
+ PathBasedCacheDescriptor descriptor =
+ server.addPathBasedCacheDirective(directive);
+ AddPathBasedCacheDirectiveResponseProto.Builder builder =
+ AddPathBasedCacheDirectiveResponseProto.newBuilder();
+ builder.setDescriptorId(descriptor.getEntryId());
+ return builder.build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public RemovePathBasedCacheDescriptorResponseProto removePathBasedCacheDescriptor(
+ RpcController controller,
+ RemovePathBasedCacheDescriptorRequestProto request)
+ throws ServiceException {
+ try {
+ server.removePathBasedCacheDescriptor(request.getDescriptorId());
+ RemovePathBasedCacheDescriptorResponseProto.Builder builder =
+ RemovePathBasedCacheDescriptorResponseProto.newBuilder();
+ return builder.build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public ListPathBasedCacheDescriptorsResponseProto listPathBasedCacheDescriptors(
+ RpcController controller, ListPathBasedCacheDescriptorsRequestProto request)
+ throws ServiceException {
+ try {
+ RemoteIterator<PathBasedCacheDescriptor> iter =
+ server.listPathBasedCacheDescriptors(request.getPrevId(),
+ request.hasPool() ? request.getPool() : null,
+ request.hasPath() ? request.getPath() : null);
+ ListPathBasedCacheDescriptorsResponseProto.Builder builder =
+ ListPathBasedCacheDescriptorsResponseProto.newBuilder();
+ long prevId = 0;
+ while (iter.hasNext()) {
+ PathBasedCacheDescriptor directive = iter.next();
+ builder.addElements(
+ ListPathBasedCacheDescriptorsElementProto.newBuilder().
+ setId(directive.getEntryId()).
+ setPath(directive.getPath().toUri().getPath()).
+ setReplication(directive.getReplication()).
+ setPool(directive.getPool()));
+ prevId = directive.getEntryId();
+ }
+ if (prevId == 0) {
+ builder.setHasMore(false);
+ } else {
+ iter = server.listPathBasedCacheDescriptors(prevId,
+ request.hasPool() ? request.getPool() : null,
+ request.hasPath() ? request.getPath() : null);
+ builder.setHasMore(iter.hasNext());
+ }
+ return builder.build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public AddCachePoolResponseProto addCachePool(RpcController controller,
+ AddCachePoolRequestProto request) throws ServiceException {
+ try {
+ CachePoolInfo info =
+ new CachePoolInfo(request.getPoolName());
+ if (request.hasOwnerName()) {
+ info.setOwnerName(request.getOwnerName());
+ }
+ if (request.hasGroupName()) {
+ info.setGroupName(request.getGroupName());
+ }
+ if (request.hasMode()) {
+ info.setMode(new FsPermission((short)request.getMode()));
+ }
+ if (request.hasWeight()) {
+ info.setWeight(request.getWeight());
+ }
+ server.addCachePool(info);
+ return AddCachePoolResponseProto.newBuilder().build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+ @Override
+ public ModifyCachePoolResponseProto modifyCachePool(RpcController controller,
+ ModifyCachePoolRequestProto request) throws ServiceException {
+ try {
+ CachePoolInfo info =
+ new CachePoolInfo(request.getPoolName());
+ if (request.hasOwnerName()) {
+ info.setOwnerName(request.getOwnerName());
+ }
+ if (request.hasGroupName()) {
+ info.setGroupName(request.getGroupName());
+ }
+ if (request.hasMode()) {
+ info.setMode(new FsPermission((short)request.getMode()));
+ }
+ if (request.hasWeight()) {
+ info.setWeight(request.getWeight());
+ }
+ server.modifyCachePool(info);
+ return ModifyCachePoolResponseProto.newBuilder().build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public RemoveCachePoolResponseProto removeCachePool(RpcController controller,
+ RemoveCachePoolRequestProto request) throws ServiceException {
+ try {
+ server.removeCachePool(request.getPoolName());
+ return RemoveCachePoolResponseProto.newBuilder().build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public ListCachePoolsResponseProto listCachePools(RpcController controller,
+ ListCachePoolsRequestProto request) throws ServiceException {
+ try {
+ RemoteIterator<CachePoolInfo> iter =
+ server.listCachePools(request.getPrevPoolName());
+ ListCachePoolsResponseProto.Builder responseBuilder =
+ ListCachePoolsResponseProto.newBuilder();
+ String prevPoolName = null;
+ while (iter.hasNext()) {
+ CachePoolInfo pool = iter.next();
+ ListCachePoolsResponseElementProto.Builder elemBuilder =
+ ListCachePoolsResponseElementProto.newBuilder();
+ elemBuilder.setPoolName(pool.getPoolName());
+ if (pool.getOwnerName() != null) {
+ elemBuilder.setOwnerName(pool.getOwnerName());
+ }
+ if (pool.getGroupName() != null) {
+ elemBuilder.setGroupName(pool.getGroupName());
+ }
+ if (pool.getMode() != null) {
+ elemBuilder.setMode(pool.getMode().toShort());
+ }
+ if (pool.getWeight() != null) {
+ elemBuilder.setWeight(pool.getWeight());
+ }
+ responseBuilder.addElements(elemBuilder.build());
+ prevPoolName = pool.getPoolName();
+ }
+ // fill in hasNext
+ if (prevPoolName == null) {
+ responseBuilder.setHasMore(false);
+ } else {
+ iter = server.listCachePools(prevPoolName);
+ responseBuilder.setHasMore(iter.hasNext());
+ }
+ return responseBuilder.build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java Tue Oct 29 00:49:20 2013
@@ -24,15 +24,20 @@ import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.BatchedRemoteIterator;
+import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
+import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -46,10 +51,15 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor;
+import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
@@ -87,11 +97,21 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsElementProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathBasedCacheDirectiveProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
@@ -128,6 +148,7 @@ import org.apache.hadoop.security.proto.
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
import org.apache.hadoop.security.token.Token;
+import com.google.common.primitives.Shorts;
import com.google.protobuf.ByteString;
import com.google.protobuf.ServiceException;
@@ -982,4 +1003,231 @@ public class ClientNamenodeProtocolTrans
throw ProtobufHelper.getRemoteException(e);
}
}
+
+ @Override
+ public PathBasedCacheDescriptor addPathBasedCacheDirective(
+ PathBasedCacheDirective directive) throws IOException {
+ try {
+ AddPathBasedCacheDirectiveRequestProto.Builder builder =
+ AddPathBasedCacheDirectiveRequestProto.newBuilder();
+ builder.setDirective(PathBasedCacheDirectiveProto.newBuilder()
+ .setPath(directive.getPath().toUri().getPath())
+ .setReplication(directive.getReplication())
+ .setPool(directive.getPool())
+ .build());
+ AddPathBasedCacheDirectiveResponseProto result =
+ rpcProxy.addPathBasedCacheDirective(null, builder.build());
+ return new PathBasedCacheDescriptor(result.getDescriptorId(),
+ directive.getPath(), directive.getReplication(),
+ directive.getPool());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public void removePathBasedCacheDescriptor(Long id)
+ throws IOException {
+ try {
+ RemovePathBasedCacheDescriptorRequestProto.Builder builder =
+ RemovePathBasedCacheDescriptorRequestProto.newBuilder();
+ builder.setDescriptorId(id);
+ rpcProxy.removePathBasedCacheDescriptor(null, builder.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
+
+ private static class BatchedPathBasedCacheEntries
+ implements BatchedEntries<PathBasedCacheDescriptor> {
+ private ListPathBasedCacheDescriptorsResponseProto response;
+
+ BatchedPathBasedCacheEntries(ListPathBasedCacheDescriptorsResponseProto response) {
+ this.response = response;
+ }
+
+ @Override
+ public PathBasedCacheDescriptor get(int i) {
+ ListPathBasedCacheDescriptorsElementProto elementProto =
+ response.getElements(i);
+ return new PathBasedCacheDescriptor(elementProto.getId(),
+ new Path(elementProto.getPath()),
+ Shorts.checkedCast(elementProto.getReplication()),
+ elementProto.getPool());
+ }
+
+ @Override
+ public int size() {
+ return response.getElementsCount();
+ }
+
+ @Override
+ public boolean hasMore() {
+ return response.getHasMore();
+ }
+ }
+
+ private class PathBasedCacheEntriesIterator
+ extends BatchedRemoteIterator<Long, PathBasedCacheDescriptor> {
+ private final String pool;
+ private final String path;
+
+ public PathBasedCacheEntriesIterator(long prevKey, String pool, String path) {
+ super(prevKey);
+ this.pool = pool;
+ this.path = path;
+ }
+
+ @Override
+ public BatchedEntries<PathBasedCacheDescriptor> makeRequest(
+ Long nextKey) throws IOException {
+ ListPathBasedCacheDescriptorsResponseProto response;
+ try {
+ ListPathBasedCacheDescriptorsRequestProto.Builder builder =
+ ListPathBasedCacheDescriptorsRequestProto.newBuilder().setPrevId(nextKey);
+ if (pool != null) {
+ builder.setPool(pool);
+ }
+ if (path != null) {
+ builder.setPath(path);
+ }
+ ListPathBasedCacheDescriptorsRequestProto req = builder.build();
+ response = rpcProxy.listPathBasedCacheDescriptors(null, req);
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ return new BatchedPathBasedCacheEntries(response);
+ }
+
+ @Override
+ public Long elementToPrevKey(PathBasedCacheDescriptor element) {
+ return element.getEntryId();
+ }
+ }
+
+ @Override
+ public RemoteIterator<PathBasedCacheDescriptor> listPathBasedCacheDescriptors(long prevId,
+ String pool, String path) throws IOException {
+ return new PathBasedCacheEntriesIterator(prevId, pool, path);
+ }
+
+ @Override
+ public void addCachePool(CachePoolInfo info) throws IOException {
+ AddCachePoolRequestProto.Builder builder =
+ AddCachePoolRequestProto.newBuilder();
+ builder.setPoolName(info.getPoolName());
+ if (info.getOwnerName() != null) {
+ builder.setOwnerName(info.getOwnerName());
+ }
+ if (info.getGroupName() != null) {
+ builder.setGroupName(info.getGroupName());
+ }
+ if (info.getMode() != null) {
+ builder.setMode(info.getMode().toShort());
+ }
+ if (info.getWeight() != null) {
+ builder.setWeight(info.getWeight());
+ }
+ try {
+ rpcProxy.addCachePool(null, builder.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public void modifyCachePool(CachePoolInfo req) throws IOException {
+ ModifyCachePoolRequestProto.Builder builder =
+ ModifyCachePoolRequestProto.newBuilder();
+ builder.setPoolName(req.getPoolName());
+ if (req.getOwnerName() != null) {
+ builder.setOwnerName(req.getOwnerName());
+ }
+ if (req.getGroupName() != null) {
+ builder.setGroupName(req.getGroupName());
+ }
+ if (req.getMode() != null) {
+ builder.setMode(req.getMode().toShort());
+ }
+ if (req.getWeight() != null) {
+ builder.setWeight(req.getWeight());
+ }
+ try {
+ rpcProxy.modifyCachePool(null, builder.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public void removeCachePool(String cachePoolName) throws IOException {
+ try {
+ rpcProxy.removeCachePool(null,
+ RemoveCachePoolRequestProto.newBuilder().
+ setPoolName(cachePoolName).build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
+
+ private static class BatchedPathDirectiveEntries
+ implements BatchedEntries<CachePoolInfo> {
+ private final ListCachePoolsResponseProto proto;
+
+ public BatchedPathDirectiveEntries(ListCachePoolsResponseProto proto) {
+ this.proto = proto;
+ }
+
+ @Override
+ public CachePoolInfo get(int i) {
+ ListCachePoolsResponseElementProto elem = proto.getElements(i);
+ return new CachePoolInfo(elem.getPoolName()).
+ setOwnerName(elem.getOwnerName()).
+ setGroupName(elem.getGroupName()).
+ setMode(new FsPermission((short)elem.getMode())).
+ setWeight(elem.getWeight());
+ }
+
+ @Override
+ public int size() {
+ return proto.getElementsCount();
+ }
+
+ @Override
+ public boolean hasMore() {
+ return proto.getHasMore();
+ }
+ }
+
+ private class CachePoolIterator
+ extends BatchedRemoteIterator<String, CachePoolInfo> {
+
+ public CachePoolIterator(String prevKey) {
+ super(prevKey);
+ }
+
+ @Override
+ public BatchedEntries<CachePoolInfo> makeRequest(String prevKey)
+ throws IOException {
+ try {
+ return new BatchedPathDirectiveEntries(
+ rpcProxy.listCachePools(null,
+ ListCachePoolsRequestProto.newBuilder().
+ setPrevPoolName(prevKey).build()));
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public String elementToPrevKey(CachePoolInfo element) {
+ return element.getPoolName();
+ }
+ }
+
+ @Override
+ public RemoteIterator<CachePoolInfo> listCachePools(String prevKey)
+ throws IOException {
+ return new CachePoolIterator(prevKey);
+ }
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java Tue Oct 29 00:49:20 2013
@@ -22,6 +22,7 @@ import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
@@ -36,6 +37,8 @@ import org.apache.hadoop.hdfs.protocol.L
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto;
@@ -152,8 +155,9 @@ public class DatanodeProtocolClientSideT
@Override
public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration,
- StorageReport[] reports, int xmitsInProgress, int xceiverCount,
- int failedVolumes) throws IOException {
+ StorageReport[] reports, long dnCacheCapacity, long dnCacheUsed,
+ int xmitsInProgress, int xceiverCount, int failedVolumes)
+ throws IOException {
HeartbeatRequestProto.Builder builder = HeartbeatRequestProto.newBuilder()
.setRegistration(PBHelper.convert(registration))
.setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount)
@@ -161,7 +165,12 @@ public class DatanodeProtocolClientSideT
for (StorageReport r : reports) {
builder.addReports(PBHelper.convert(r));
}
-
+ if (dnCacheCapacity != 0) {
+ builder.setDnCacheCapacity(dnCacheCapacity);
+ }
+ if (dnCacheUsed != 0) {
+ builder.setDnCacheUsed(dnCacheUsed);
+ }
HeartbeatResponseProto resp;
try {
resp = rpcProxy.sendHeartbeat(NULL_CONTROLLER, builder.build());
@@ -203,6 +212,29 @@ public class DatanodeProtocolClientSideT
}
@Override
+ public DatanodeCommand cacheReport(DatanodeRegistration registration,
+ String poolId, List<Long> blockIds) throws IOException {
+ CacheReportRequestProto.Builder builder =
+ CacheReportRequestProto.newBuilder()
+ .setRegistration(PBHelper.convert(registration))
+ .setBlockPoolId(poolId);
+ for (Long blockId : blockIds) {
+ builder.addBlocks(blockId);
+ }
+
+ CacheReportResponseProto resp;
+ try {
+ resp = rpcProxy.cacheReport(NULL_CONTROLLER, builder.build());
+ } catch (ServiceException se) {
+ throw ProtobufHelper.getRemoteException(se);
+ }
+ if (resp.hasCmd()) {
+ return PBHelper.convert(resp.getCmd());
+ }
+ return null;
+ }
+
+ @Override
public void blockReceivedAndDeleted(DatanodeRegistration registration,
String poolId, StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks)
throws IOException {