You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2011/04/29 20:16:38 UTC

svn commit: r1097905 [1/14] - in /hadoop/hdfs/trunk: ./ bin/ src/c++/libhdfs/ src/contrib/hdfsproxy/ src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/ja...

Author: suresh
Date: Fri Apr 29 18:16:32 2011
New Revision: 1097905

URL: http://svn.apache.org/viewvc?rev=1097905&view=rev
Log:
HDFS-1052. HDFS Federation - Merge of umbrella jira changes from HDFS-1052 branch into trunk.


Added:
    hadoop/hdfs/trunk/bin/distribute-exclude.sh
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/bin/distribute-exclude.sh
    hadoop/hdfs/trunk/bin/refresh-namenodes.sh
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/bin/refresh-namenodes.sh
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/GetConf.java
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/GetConf.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/util/DaemonFactory.java
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/util/DaemonFactory.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestStorageInfo.java
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestStorageInfo.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetConf.java
      - copied unchanged from r1095512, hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetConf.java
Removed:
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataXceiver.java
Modified:
    hadoop/hdfs/trunk/   (props changed)
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/bin/hdfs
    hadoop/hdfs/trunk/bin/start-dfs.sh
    hadoop/hdfs/trunk/bin/stop-dfs.sh
    hadoop/hdfs/trunk/build.xml   (props changed)
    hadoop/hdfs/trunk/src/c++/libhdfs/   (props changed)
    hadoop/hdfs/trunk/src/contrib/hdfsproxy/   (props changed)
    hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java
    hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java
    hadoop/hdfs/trunk/src/java/   (props changed)
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/Block.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java   (props changed)
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicasMap.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/DFSck.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
    hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/fs/TestFiRename.java
    hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/TestFiHftp.java
    hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj
    hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataTransferProtocolAspects.aj
    hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
    hadoop/hdfs/trunk/src/test/hdfs/   (props changed)
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsPermission.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsSymlink.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockMissingException.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSFinalize.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUtil.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeConfig.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHftpFileSystem.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestListFilesInFileContext.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestListPathServlet.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java
    hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
    hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
    hadoop/hdfs/trunk/src/webapps/datanode/   (props changed)
    hadoop/hdfs/trunk/src/webapps/hdfs/   (props changed)
    hadoop/hdfs/trunk/src/webapps/hdfs/dfsnodelist.jsp
    hadoop/hdfs/trunk/src/webapps/hdfs/nn_browsedfscontent.jsp
    hadoop/hdfs/trunk/src/webapps/secondary/   (props changed)

Propchange: hadoop/hdfs/trunk/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Apr 29 18:16:32 2011
@@ -1,3 +1,4 @@
 /hadoop/core/branches/branch-0.19/hdfs:713112
+/hadoop/hdfs/branches/HDFS-1052:987665-1095512
 /hadoop/hdfs/branches/HDFS-265:796829-820463
 /hadoop/hdfs/branches/branch-0.21:820487

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1097905&r1=1097904&r2=1097905&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Fri Apr 29 18:16:32 2011
@@ -11,20 +11,253 @@ Trunk (unreleased changes)
 
   NEW FEATURES
 
+    HDFS-1365. Federation: propose ClusterID and BlockPoolID format 
+    (Tanping via boryas)
+
+    HDFS-1394. Federation: modify -format option for namenode to generated 
+    new blockpool id and accept newcluster (boryas)
+
+    HDFS-1400. Federation: DataTransferProtocol uses ExtendedBlockPool to 
+    include BlockPoolID in the protocol. (suresh)
+
+    HDFS-1428. Federation : add cluster ID and block pool ID into 
+    Name node web UI(Tanping via boryas)
+
+    HDFS-1450. Federation: Introduce block pool ID into FSDatasetInterface.
+    (suresh)
+
+    HDFS-1632. Federation: data node storage structure changes and
+    introduce block pool storage. (Tanping via suresh)
+
+    HDFS-1634. Federation: Convert single threaded DataNode into 
+    per BlockPool thread model.(boryas)
+
+    HDFS-1637. Federation: FSDataset in Datanode should be created after 
+    initial handshake with namenode. (boryas and jitendra)
+
+    HDFS-1653. Federation: Block received message from datanode sends invalid 
+    DatanodeRegistration. (Tanping via suresh)
+
+    HDFS-1645. Federation: DatanodeCommond.Finalize needs to include 
+    BlockPoolId.  (suresh)
+
+    HDFS-1638. Federation: DataNode.handleDiskError needs to inform 
+    ALL namenodes if a disk failed (boryas)
+
+    HDFS-1647. Federation: Multiple namenode configuration. (jitendra)
+
+    HDFS-1639. Federation: Add block pool management to FSDataset. (suresh)
+
+    HDFS-1648. Federation: Only DataStorage must be locked using in_use.lock 
+    and no locks must be associated with BlockPoolStorage. (Tanping via suresh)
+
+    HDFS-1641. Federation: Datanode fields that are no longer used should 
+    be removed (boryas)
+
+    HDFS-1642. Federation: add Datanode.getDNRegistration(String bpid) 
+    method  (boryas)
+
+    HDFS-1643. Federation: remove namenode argument from DataNode 
+    constructor (boryas)
+
+    HDFS-1657. Federation: Tests that corrupt block files fail due to changed 
+    file path in federation. (suresh)
+
+    HDFS-1661. Federation: Remove unnecessary TODO:FEDERATION comments.
+    (jitendra)
+
+    HDFS-1660. Federation: Datanode doesn't start with two namenodes (boryas)
+
+    HDFS-1650. Federation: TestReplication fails. (Tanping via suresh)
+
+    HDFS-1651. Federation: Tests fail due to null pointer exception in 
+    Datnode#shutdown() method. (Tanping via suresh)
+
+    HDFS-1649. Federation: Datanode command to refresh namenode list at 
+    the datanode. (jitendra)
+
+    HDFS-1646. Federation: MiniDFSClsuter#waitActive() waits for ever 
+    with the introduction of BPOfferService in datanode. (suresh)
+
+    HDFS-1659. Federation: BPOfferService exits after one iteration 
+    incorrectly.  (Tanping via suresh)
+
+    HDFS-1654. Federation: Fix TestDFSUpgrade and TestDFSRollback failures.
+    (suresh)
+    
+    HDFS-1668. Federation: Datanodes sends block pool usage information 
+    to the namenode in heartbeat. (suresh)
+
+    HDFS-1669. Federation: Fix TestHftpFileSystem failure. (suresh)
+
+    HDFS-1670. Federation: remove dnRegistration from Datanode (boryas)
+
+    HDFS-1662. Federation: fix unit test case, TestCheckpoint 
+    and TestDataNodeMXBean (tanping via boryas)
+
+    HDFS-1671. Federation: shutdown in DataNode should be able to 
+    shutdown individual BP threads as well as the whole DN (boryas).
+
+    HDFS-1663. Federation: Rename getPoolId() everywhere to 
+    getBlockPoolId() (tanping via boryas)
+
+    HDFS-1652. FederationL Add support for multiple namenodes in MiniDFSCluster.
+    (suresh)
+
+    HDFS-1672. Federation: refactor stopDatanode(name) to work 
+    with multiple Block Pools (boryas)
+
+    HDFS-1687. Federation: DirectoryScanner changes for 
+    federation (Matt Foley via boryas)
+
+    HDFS-1626. Make BLOCK_INVALIDATE_LIMIT configurable. (szetszwo)
+
+    HDFS-1655. Federation: DatablockScanner should scan blocks for 
+    all the block pools. (jitendra)
+
+    HDFS-1664. Federation: Add block pool storage usage to Namenode WebUI.
+    (Tanping via suresh)
+
+    HDFS-1674. Federation: Rename BlockPool class to BlockPoolSlice. 
+    (jghoman, Tanping via suresh)
+
+    HDFS-1673. Federation: Datanode changes to track block token secret per 
+    namenode. (suresh)
+
+    HDFS-1677. Federation: Fix TestFsck and TestListCorruptFileBlocks 
+    failures. (Tanping via suresh)
+
+    HDFS-1678. Federation: Remove unnecessary #getBlockpool() 
+    for NameNodeMXBean in FSNameSystem. (Tanping via Suresh)
+
+    HDFS-1688. Federation: Fix failures in fault injection tests,
+    TestDiskError, TestDatanodeRestart and TestDFSTartupVersions. (suresh)
+
+    HDFS-1696. Federation: when build version doesn't match - 
+    datanode should wait (keep connecting) untill NN comes up 
+    with the right version (boryas)
+
+    HDFS-1681. Balancer: support per pool and per node policies. (szetszwo)
+
+    HDFS-1695. Federation: Fix testOIV and TestDatanodeUtils 
+    (jhoman and tanping via boryas)
+
+    HDFS:1699. Federation: Fix failure of TestBlockReport.
+    (Matt Foley via suresh)
+
+    HDFS-1698. Federation: TestOverReplicatedBlocks and TestWriteToReplica 
+    failing. (jhoman and jitendra)
+
+    HDFS-1701. Federation: Fix TestHeartbeathandling.
+    (Erik Steffl and Tanping Wang via suresh)
+
+    HDFS-1693. Federation: Fix TestDFSStorageStateRecovery failure. (suresh)
+
+    HDFS-1694. Federation: SimulatedFSDataset changes to work with
+    federation and multiple block pools. (suresh)
+
+    HDFS-1689. Federation: Configuration for namenodes. (suresh and jitendra)
+
+    HDFS-1682. Change Balancer CLI for multiple namenodes and balancing
+    policy.  (szetszwo)
+
+    HDFS-1697. Federation: fix TestBlockRecovery (boryas)
+
+    HDFS-1702. Federation: fix TestBackupNode and TestRefreshNamendoes
+    failures. (suresh)
+
+    HDFS-1706. Federation: TestFileAppend2, TestFileAppend3 and 
+    TestBlockTokenWithDFS failing. (jitendra)
+
+    HDFS-1704. Federation: Add a tool that lists namenodes, secondary and
+    backup from configuration file. (suresh)
+
+    HDFS-1711. Federation: create method for updating machine name in 
+    DataNode.java (boryas)
+
+    HDFS-1712. Federation: when looking up datanode we should use machineNmae 
+    (in testOverReplicatedBlocks) (boryas)
+
+    HDFS-1709. Federation: Error "nnaddr url param is null" when clicking on a 
+    node from NN Live Node Link. (jitendra)
+
+    HDFS-1714. Federation: refactor upgrade object in DataNode (boryas) 
+
+    HDFS-1715. Federation: warning/error not generated when datanode sees 
+    inconsistent/different Cluster ID between namenodes (boryas)
+
+    HDFS-1715. Federation: warning/error not generated when datanode sees 
+    inconsistent/different Cluster ID between namenodes (boryas)
+
+    HDFS-1716. Federation: Add decommission tests for federated namenodes.
+    (suresh)
+
+    HDFS-1713. Federation: Prevent DataBlockScanner from running in tight loop.
+    (jitendra)
+
+    HDFS-1721. Federation: Configuration for principal names should not be 
+    namenode specific. (jitendra)
+
+    HDFS-1717. Federation: FSDataset volumeMap access is not synchronized
+    correctly. (suresh)
+
+    HDFS-1722. Federation: Add flag to MiniDFSCluser to differentiate between
+    federation and non-federation modes. (boryas via suresh)
+
+    HDFS-1718. Federation: MiniDFSCluster#waitActive() bug causes some tests
+    to fail. (suresh)
+
+    HDFS-1719. Federation: Fix TestDFSRemove that fails intermittently.
+    (suresh)
+
+    HDFS-1720. Federation: FSVolumeSet volumes is not synchronized correctly.
+    (suresh)
+
+    HDFS-1700. Federation: fsck needs to work with federation changes.
+    (Matt Foley via suresh)
+
     HDFS-1482. Add listCorruptFileBlocks to DistributedFileSystem.
     (Patrick Kling via hairong)
 
     HDFS-1448. Add a new tool Offline Edits Viewer (oev).  (Erik Steffl
     via szetszwo)
 
-    HDFS-1626. Make BLOCK_INVALIDATE_LIMIT configurable. (szetszwo)
+    HDFS-1735. Federation: merge FSImage change in federation to
+    FSImage+NNStorage refactoring in trunk. (suresh)
+
+    HDFS-1737. Federation: Update the layout version for federation
+    changes. (suresh)
+
+    HDFS-1744. Federation: Add new layout version to offline image viewer
+    and edits viewer. (suresh)
+
+    HDFS-1745. Federation: Fix fault injection test failures. (suresh)
+
+    HDFS-1746. Federation: TestFileAppend3 fails intermittently. (jitendra)
+
+    HDFS-1703. Improve start/stop scripts and add decommission tool for
+    federation. (Tanping Wang, Erik Steffl via suresh)
+
+    HDFS-1749. Federation: TestListCorruptFileBlocks failing in federation 
+    branch. (jitendra)
+
+    HDFS-1754. Federation: testFsck fails. (boryas)
+
+    HDFS-1755. Federation: The BPOfferService must always connect to namenode as 
+    the login user. (jitendra)
 
     HDFS-1675. Support transferring RBW between datanodes. (szetszwo)
 
+    HDFS-1791. Federation: Add command to delete block pool directories 
+    from a datanode. (jitendra)
+
     HDFS-1761. Add a new DataTransferProtocol operation, Op.TRANSFER_BLOCK,
     for transferring RBW/Finalized with acknowledgement and without using RPC.
     (szetszwo)
 
+    HDFS-1813. Federation: Authentication using BlockToken in RPC to datanode 
+               fails. (jitendra)
+
     HDFS_1630. Support fsedits checksum. (hairong)
 
     HDFS-1606. Provide a stronger data guarantee in the write pipeline by
@@ -41,10 +274,19 @@ Trunk (unreleased changes)
 
     HDFS-1510. Added test-patch.properties required by test-patch.sh (nigel)
 
-    HDFS-1481. NameNode should validate fsimage before rolling. (hairong)
+    HDFS-1628. Display full path in AccessControlException.  (John George
+    via szetszwo)
+
+    HDFS-1707. Federation: Failure in browsing data on new namenodes. (jitendra)
+
+    HDFS-1683. Test Balancer with multiple NameNodes.  (szetszwo)
+
+    HDFS-1547. Improve decommission mechanism. (suresh)
 
-    HDFS-1518. Wrong description in FSNamesystem's javadoc. 
-    (Jingguo Yao via eli)
+    HDFS-1588. Remove hardcoded strings for configuration keys, "dfs.hosts"
+    and "dfs.hosts.exlude". (Erik Steffl via suresh)
+
+    HDFS-1481. NameNode should validate fsimage before rolling. (hairong)
 
     HDFS-1506. Refactor fsimage loading code. (hairong)
 
@@ -59,14 +301,6 @@ Trunk (unreleased changes)
     HDFS-1539. A config option for the datanode to fsycn a block file
     when block is completely written. (dhruba)
 
-    HDFS-1547. Improve decommission mechanism. (suresh)
-
-    HDFS-1586. Add InterfaceAudience and InterfaceStability annotations to 
-    MiniDFSCluster. (suresh)
-
-    HDFS-1588. Remove hardcoded strings for configuration keys, "dfs.hosts"
-    and "dfs.hosts.exlude". (Erik Steffl via suresh)
-
     HDFS-1335. HDFS side change of HADDOP-6904: RPC compatibility. (hairong)
 
     HDFS-1557. Separate Storage from FSImage. (Ivan Kelly via jitendra)
@@ -76,9 +310,6 @@ Trunk (unreleased changes)
     HDFS-1629. Add a method to BlockPlacementPolicy for keeping the chosen
     nodes in the output array.  (szetszwo)
 
-    HDFS-1628. Display full path in AccessControlException.  (John George
-    via szetszwo)
-
     HDFS-1731. Allow using a file to exclude certain tests from build (todd)
 
     HDFS-1736. Remove the dependency from DatanodeJspHelper to FsShell.
@@ -155,6 +386,21 @@ Trunk (unreleased changes)
 
   BUG FIXES
 
+    HDFS-1449. Fix test failures - ExtendedBlock must return 
+    block file name in #getBlockName(). (suresh)
+
+    HDFS-1680. Fix TestBalancer. (szetszwo)
+
+    HDFS-1705. Balancer command throws NullPointerException. (suresh via
+    szetszwo)
+
+    HDFS-1559. Add missing UGM overrides to TestRefreshUserMappings
+    (Todd Lipcon via eli)
+
+    HDFS-1585. Fix build after HDFS-1547 (todd)
+
+    HDFS-1684. Balancer cannot start with with multiple namenodes.  (szetszwo)
+
     HDFS-1516. mvn-install is broken after 0.22 branch creation. (cos)
 
     HDFS-1360. TestBlockRecovery should bind ephemeral ports.
@@ -162,9 +408,6 @@ Trunk (unreleased changes)
 
     HDFS-1551. Fix pom templates dependency list (gkesavan)
 
-    HDFS-1559. Add missing UGM overrides to TestRefreshUserMappings
-    (Todd Lipcon via eli)
-
     HDFS-1509. A savenamespace command writes the fsimage and edits into
     all configured directories. (dhruba)
 
@@ -173,27 +416,25 @@ Trunk (unreleased changes)
 
     HDFS-1463. Accesstime of a file is not updated in safeMode. (dhruba)
 
-    HDFS-1585. Fix build after HDFS-1547 (todd)
-
     HDFS-863. Potential deadlock in TestOverReplicatedBlocks. 
     (Ken Goodhope via jghoman)
 
-    HDFS-1610. Fix TestClientProtocolWithDelegationToken and TestBlockToken
-    on trunk after HADOOP-6904 (todd)
-
     HDFS-1607. Fix referenced to misspelled method name getProtocolSigature
     (todd)
 
+    HDFS-1610. Fix TestClientProtocolWithDelegationToken and TestBlockToken
+    on trunk after HADOOP-6904 (todd)
+
     HDFS-1600. Fix release audit warnings on trunk. (todd)
 
     HDFS-1691. Remove a duplicated static initializer for reading default
     configurations in DFSck.  (Alexey Diomin via szetszwo)
 
+    HDFS-1748. Balancer utilization classification is incomplete.  (szetszwo)
+
     HDFS-1738. change hdfs jmxget to return an empty string instead of 
     null when an attribute value is not available (tanping vi boryas)
 
-    HDFS-1748. Balancer utilization classification is incomplete.  (szetszwo)
-
     HDFS-1757. Don't compile fuse-dfs by default. (eli)
 
     HDFS-1770. TestFiRename fails due to invalid block size. (eli)
@@ -267,6 +508,8 @@ Release 0.22.0 - Unreleased
     HDFS piggyback block locations to each file status when listing a
     directory.  (hairong)
 
+    HDFS-1359. Add BlockPoolID to Block. (suresh)
+
     HDFS-1361. Add -fileStatus operation to NNThroughputBenchmark. (shv)
 
     HDFS-1435. Provide an option to store fsimage compressed. (hairong)
@@ -395,9 +638,6 @@ Release 0.22.0 - Unreleased
 
     HDFS-1426. Remove unused method BlockInfo#listCount. (hairong)
 
-    HDFS-1456. Provide builder for constructing instances of MiniDFSCluster.
-    (jghoman)
-
     HDFS-1472. Allow programmatic access to fsck output.
     (Ramkumar Vadali via dhruba)
 
@@ -455,9 +695,15 @@ Release 0.22.0 - Unreleased
 
     HDFS-1582. Remove auto-generated native build files. (rvs via eli)
 
+    HDFS-1456. Provide builder for constructing instances of MiniDFSCluster.
+    (jghoman)
+
     HDFS-1861. Rename dfs.datanode.max.xcievers and bump its default value.
     (eli)
 
+    HDFS-1052. HDFS Federation - Merge of umbrella jira changes from
+    HDFS-1052 branch into trunk.
+
   OPTIMIZATIONS
 
     HDFS-1140. Speedup INode.getPathComponents. (Dmytro Molkov via shv)
@@ -594,6 +840,9 @@ Release 0.22.0 - Unreleased
     HDFS-1357. HFTP traffic served by DataNode shouldn't use service port 
     on NameNode. (Kan Zhang via jghoman)
 
+    HDFS-1419. HDFS Federation: Three test cases need minor modification after 
+    the new block id change (Tanping Wang via suresh)
+
     HDFS-96. HDFS supports blocks larger than 2 GB.
     (Patrick Kling via dhruba)
 
@@ -610,6 +859,9 @@ Release 0.22.0 - Unreleased
     HDFS-1498. FSDirectory#unprotectedConcat calls setModificationTime 
     on a file. (eli)
 
+    HDFS-1625. Ignore disk space values in TestDataNodeMXBean.  (szetszwo)
+
+Release 0.21.1 - Unreleased
     HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
 
     HDFS-874. TestHDFSFileContextMainOperations fails on weirdly 
@@ -620,15 +872,12 @@ Release 0.22.0 - Unreleased
     HDFS-1487. FSDirectory.removeBlock() should update diskspace count 
     of the block owner node (Zhong Wang via eli).
 
-    HDFS-1001. DataXceiver and BlockReader disagree on when to send/recv
-    CHECKSUM_OK. (bc Wong via eli)
+    HDFS-1467. Append pipeline never succeeds with more than one replica.
+    (Todd Lipcon via eli)
 
     HDFS-1167. New property for local conf directory in system-test-hdfs.xml
     file. (Vinay Thota via cos)
 
-    HDFS-1467. Append pipeline never succeeds with more than one replica.
-    (Todd Lipcon via eli)
-
     HDFS-1503. TestSaveNamespace fails. (Todd Lipcon via cos)
 
     HDFS-1524. Image loader should make sure to read every byte in image file.
@@ -666,11 +915,6 @@ Release 0.22.0 - Unreleased
     HDFS-884. DataNode throws IOException if all data directories are 
     unavailable. (Steve Loughran and shv)
 
-    HDFS-1572. Checkpointer should trigger checkpoint with specified period.
-    (jghoman)
-
-    HDFS-1561. BackupNode listens on the default host. (shv)
-
     HDFS-1591. HDFS part of HADOOP-6642. (Chris Douglas, Po Cheung via shv)
 
     HDFS-900. Corrupt replicas are not processed correctly in block report (shv)
@@ -688,7 +932,8 @@ Release 0.22.0 - Unreleased
 
     HDFS-981. test-contrib fails due to test-cactus failure (cos)
 
-    HDFS-1625. Ignore disk space values in TestDataNodeMXBean.  (szetszwo)
+    HDFS-1001. DataXceiver and BlockReader disagree on when to send/recv
+    CHECKSUM_OK. (bc Wong via eli)
 
     HDFS-1781. Fix the path for jsvc in bin/hdfs.  (John George via szetszwo)
 
@@ -711,8 +956,6 @@ Release 0.22.0 - Unreleased
 
 Release 0.21.1 - Unreleased
 
-  IMPROVEMENTS
-
     HDFS-1411. Correct backup node startup command in hdfs user guide.
     (Ching-Shen Chen via shv)
 
@@ -736,6 +979,17 @@ Release 0.21.1 - Unreleased
     HDFS-1292. Allow artifacts to be published to the staging Apache Nexus
     Maven Repository.  (Giridharan Kesavan via tomwhite)
 
+    HDFS-1552. Remove java5 dependencies from build. (cos) 
+
+    HDFS-1189. Quota counts missed between clear quota and set quota.
+    (John George via szetszwo)
+
+    HDFS-1665. Balancer misuses dfs.heartbeat.interval as milliseconds.
+    (szetszwo)
+
+    HDFS-1728. SecondaryNameNode.checkpointSize is in bytes but not in MB.
+    (szetszwo)
+
     HDFS-1206. TestFiHFlush fails intermittently. (cos)
 
     HDFS-1548. Fault-injection tests are executed multiple times if invoked
@@ -749,14 +1003,6 @@ Release 0.21.1 - Unreleased
     block placement and checkpoint/backup node features.  (Joe Crobak
     via szetszwo)
 
-    HDFS-1189. Quota counts missed between clear quota and set quota.
-    (John George via szetszwo)
-
-    HDFS-1665. Balancer misuses dfs.heartbeat.interval as milliseconds.
-    (szetszwo)
-
-    HDFS-1728. SecondaryNameNode.checkpointSize is in bytes but not in MB.
-    (szetszwo)
 
     HDFS-1596. Replace fs.checkpoint.* with dfs.namenode.checkpoint.*
     in documentations.  (Harsh J Chouraria via szetszwo)
@@ -810,6 +1056,8 @@ Release 0.21.0 - 2010-08-13
     error message on the screen when cat a directory or a 
     non-existent file. (hairong)
 
+    HDFS-1439. HDFS Federation: Fix compilation error in TestFiHftp. (suresh)
+
   NEW FEATURES
 
     HDFS-1134. Large-scale Automated Framework. (cos)

Modified: hadoop/hdfs/trunk/bin/hdfs
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/bin/hdfs?rev=1097905&r1=1097904&r2=1097905&view=diff
==============================================================================
--- hadoop/hdfs/trunk/bin/hdfs (original)
+++ hadoop/hdfs/trunk/bin/hdfs Fri Apr 29 18:16:32 2011
@@ -34,6 +34,7 @@ function print_usage(){
   echo "  oiv                  apply the offline fsimage viewer to an fsimage"
   echo "  oev                  apply the offline edits viewer to an edits file"
   echo "  fetchdt              fetch a delegation token from the NameNode"
+  echo "  getconf              get config values from configuration"
   echo "						Use -help to see options"
   echo ""
   echo "Most commands print help when invoked w/o parameters."
@@ -94,6 +95,8 @@ elif [ "$COMMAND" = "oev" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
 elif [ "$COMMAND" = "fetchdt" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
+elif [ "$COMMAND" = "getconf" ] ; then
+  CLASS=org.apache.hadoop.hdfs.tools.GetConf
 else
   echo $COMMAND - invalid command
   print_usage

Modified: hadoop/hdfs/trunk/bin/start-dfs.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/bin/start-dfs.sh?rev=1097905&r1=1097904&r2=1097905&view=diff
==============================================================================
--- hadoop/hdfs/trunk/bin/start-dfs.sh (original)
+++ hadoop/hdfs/trunk/bin/start-dfs.sh Fri Apr 29 18:16:32 2011
@@ -25,17 +25,17 @@ usage="Usage: start-dfs.sh [-upgrade|-ro
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
-. "$bin"/hdfs-config.sh
+. "$bin/hdfs-config.sh"
 
 # get arguments
 if [ $# -ge 1 ]; then
-	nameStartOpt=$1
+	nameStartOpt="$1"
 	shift
-	case $nameStartOpt in
+	case "$nameStartOpt" in
 	  (-upgrade)
 	  	;;
 	  (-rollback) 
-	  	dataStartOpt=$nameStartOpt
+	  	dataStartOpt="$nameStartOpt"
 	  	;;
 	  (*)
 		  echo $usage
@@ -44,14 +44,50 @@ if [ $# -ge 1 ]; then
 	esac
 fi
 
-# start dfs daemons
-# start namenode after datanodes, to minimize time namenode is up w/o data
-# note: datanodes will log connection errors until namenode starts
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start namenode $nameStartOpt
-#
+#---------------------------------------------------------
+# namenodes
+
+NAMENODES=$($HADOOP_HOME/bin/hdfs getconf -namenodes)
+
+echo "Starting namenodes on [$NAMENODES]"
+
+"$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+  --config "$HADOOP_CONF_DIR" \
+  --hostnames "$NAMENODES" \
+  --script "$bin/hdfs" start namenode $nameStartOpt
+
+#---------------------------------------------------------
+# datanodes (using defalut slaves file)
+
 if [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  echo "Attempting to start secure cluster, skipping datanodes. Run start-secure-dns.sh as root to complete startup."
+  echo \
+    "Attempting to start secure cluster, skipping datanodes. " \
+    "Run start-secure-dns.sh as root to complete startup."
+else
+  "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+    --config "$HADOOP_CONF_DIR" \
+    --script "$bin/hdfs" start datanode $dataStartOpt
+fi
+
+#---------------------------------------------------------
+# secondary namenodes (if any)
+
+# if there are no secondary namenodes configured it returns
+# 0.0.0.0 or empty string
+SECONDARY_NAMENODES=$($HADOOP_HOME/bin/hdfs getconf -secondarynamenodes 2>&-)
+SECONDARY_NAMENODES=${SECONDARY_NAMENODES:='0.0.0.0'}
+
+if [ "$SECONDARY_NAMENODES" = '0.0.0.0' ] ; then
+  echo \
+    "Secondary namenodes are not configured. " \
+    "Cannot start secondary namenodes."
 else
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start datanode $dataStartOpt
+  echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
+
+  "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+    --config "$HADOOP_CONF_DIR" \
+    --hostnames "$SECONDARY_NAMENODES" \
+    --script "$bin/hdfs" start secondarynamenode
 fi
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --hosts masters --script "$bin"/hdfs start secondarynamenode
+
+# eof

Modified: hadoop/hdfs/trunk/bin/stop-dfs.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/bin/stop-dfs.sh?rev=1097905&r1=1097904&r2=1097905&view=diff
==============================================================================
--- hadoop/hdfs/trunk/bin/stop-dfs.sh (original)
+++ hadoop/hdfs/trunk/bin/stop-dfs.sh Fri Apr 29 18:16:32 2011
@@ -15,18 +15,55 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
-# Stop hadoop DFS daemons.  Run this on master node.
-
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
 . "$bin"/hdfs-config.sh
 
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop namenode
+#---------------------------------------------------------
+# namenodes
+
+NAMENODES=$($HADOOP_HOME/bin/hdfs getconf -namenodes)
+
+echo "Stopping namenodes on [$NAMENODES]"
+
+"$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+  --config "$HADOOP_CONF_DIR" \
+  --hostnames "$NAMENODES" \
+  --script "$bin/hdfs" stop namenode
+
+#---------------------------------------------------------
+# datanodes (using default slaves file)
+
 if [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  echo "Attempting to stop secure cluster, skipping datanodes. Run stop-secure-dns.sh as root to complete shutdown."
+  echo \
+    "Attempting to stop secure cluster, skipping datanodes. " \
+    "Run stop-secure-dns.sh as root to complete shutdown."
 else
-  "$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop datanode
+  "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+    --config "$HADOOP_CONF_DIR" \
+    --script "$bin/hdfs" stop datanode
 fi
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --hosts masters --script "$bin"/hdfs stop secondarynamenode
\ No newline at end of file
+
+#---------------------------------------------------------
+# secondary namenodes (if any)
+
+# if there are no secondary namenodes configured it returns
+# 0.0.0.0 or empty string
+SECONDARY_NAMENODES=$($HADOOP_HOME/bin/hdfs getconf -secondarynamenodes 2>&-)
+SECONDARY_NAMENODES=${SECONDARY_NAMENODES:-'0.0.0.0'}
+
+if [ "$SECONDARY_NAMENODES" = '0.0.0.0' ] ; then
+  echo \
+    "Secondary namenodes are not configured. " \
+    "Cannot stop secondary namenodes."
+else
+  echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
+
+  "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+    --config "$HADOOP_CONF_DIR" \
+    --hostnames "$SECONDARY_NAMENODES" \
+    --script "$bin/hdfs" stop secondarynamenode
+fi
+
+# eof

Propchange: hadoop/hdfs/trunk/build.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Apr 29 18:16:32 2011
@@ -1,4 +1,5 @@
 /hadoop/core/branches/branch-0.19/hdfs/build.xml:713112
 /hadoop/core/trunk/build.xml:779102
+/hadoop/hdfs/branches/HDFS-1052/build.xml:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/build.xml:796829-820463
 /hadoop/hdfs/branches/branch-0.21/build.xml:820487

Propchange: hadoop/hdfs/trunk/src/c++/libhdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Apr 29 18:16:32 2011
@@ -1,2 +1,3 @@
 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
 /hadoop/core/trunk/src/c++/libhdfs:776175-784663
+/hadoop/hdfs/branches/HDFS-1052/src/c++/libhdfs:987665-1095512

Propchange: hadoop/hdfs/trunk/src/contrib/hdfsproxy/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Apr 29 18:16:32 2011
@@ -1,4 +1,5 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/contrib/hdfsproxy:713112
 /hadoop/core/trunk/src/contrib/hdfsproxy:776175-784663
+/hadoop/hdfs/branches/HDFS-1052/src/contrib/hdfsproxy:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/contrib/hdfsproxy:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy:820487

Modified: hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java?rev=1097905&r1=1097904&r2=1097905&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java (original)
+++ hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java Fri Apr 29 18:16:32 2011
@@ -32,6 +32,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 
 /**
  * A HTTPS/SSL proxy to HDFS, implementing certificate based access control.
@@ -69,7 +70,7 @@ public class HdfsProxy {
 
     this.server = new ProxyHttpServer(sslAddr, sslConf);
     this.server.setAttribute("proxy.https.port", server.getPort());
-    this.server.setAttribute("name.node.address", nnAddr);
+    this.server.setAttribute(NameNode.NAMENODE_ADDRESS_ATTRIBUTE_KEY, nnAddr);
     this.server.setAttribute(JspHelper.CURRENT_CONF, new HdfsConfiguration());
     this.server.addGlobalFilter("ProxyFilter", ProxyFilter.class.getName(), null);
     this.server.addServlet("listPaths", "/listPaths/*", ProxyListPathsServlet.class);

Modified: hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java?rev=1097905&r1=1097904&r2=1097905&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java (original)
+++ hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java Fri Apr 29 18:16:32 2011
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfsproxy;
 
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
 
@@ -28,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.namenode.FileDataServlet;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.security.UserGroupInformation;
 
 /** {@inheritDoc} */
@@ -44,10 +46,15 @@ public class ProxyFileDataServlet extend
     if (dt != null) {
       dtParam=JspHelper.getDelegationTokenUrlParam(dt);
     }
-
+    InetSocketAddress nnAddress = (InetSocketAddress) getServletContext()
+        .getAttribute(NameNode.NAMENODE_ADDRESS_ATTRIBUTE_KEY);
+    String nnHostPort = nnAddress == null ? null : NameNode
+        .getHostPortString(nnAddress);
+    String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS,
+        nnHostPort);
     return new URI(request.getScheme(), null, request.getServerName(), request
         .getServerPort(), "/streamFile" + i.getFullName(parent),
-        "&ugi=" + ugi.getShortUserName() + dtParam, null);
+        "&ugi=" + ugi.getShortUserName() + dtParam + addrParam, null);
   }
 
   /** {@inheritDoc} */

Propchange: hadoop/hdfs/trunk/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Apr 29 18:16:32 2011
@@ -1,4 +1,5 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/java:713112
 /hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
+/hadoop/hdfs/branches/HDFS-1052/src/java:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/java:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/java:820487

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java?rev=1097905&r1=1097904&r2=1097905&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java Fri Apr 29 18:16:32 2011
@@ -35,7 +35,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.fs.FSInputChecker;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
@@ -317,13 +317,11 @@ public class BlockReader extends FSInput
     return bytesToRead;
   }
   
-  private BlockReader( String file, long blockId, DataInputStream in, 
-                       DataChecksum checksum, boolean verifyChecksum,
-                       long startOffset, long firstChunkOffset,
-                       long bytesToRead,
-                       Socket dnSock ) {
+  private BlockReader(String file, String bpid, long blockId,
+      DataInputStream in, DataChecksum checksum, boolean verifyChecksum,
+      long startOffset, long firstChunkOffset, long bytesToRead, Socket dnSock) {
     // Path is used only for printing block and file information in debug
-    super(new Path("/blk_" + blockId + ":of:" + file)/*too non path-like?*/,
+    super(new Path("/blk_" + blockId + ":" + bpid + ":of:"+ file)/*too non path-like?*/,
           1, verifyChecksum,
           checksum.getChecksumSize() > 0? checksum : null, 
           checksum.getBytesPerChecksum(),
@@ -349,7 +347,7 @@ public class BlockReader extends FSInput
   }
 
   public static BlockReader newBlockReader(Socket sock, String file,
-      Block block, Token<BlockTokenIdentifier> blockToken, 
+      ExtendedBlock block, Token<BlockTokenIdentifier> blockToken, 
       long startOffset, long len, int bufferSize) throws IOException {
     return newBlockReader(sock, file, block, blockToken, startOffset, len, bufferSize,
         true);
@@ -357,7 +355,7 @@ public class BlockReader extends FSInput
 
   /** Java Doc required */
   public static BlockReader newBlockReader( Socket sock, String file, 
-                                     Block block, 
+                                     ExtendedBlock block, 
                                      Token<BlockTokenIdentifier> blockToken,
                                      long startOffset, long len,
                                      int bufferSize, boolean verifyChecksum)
@@ -367,7 +365,7 @@ public class BlockReader extends FSInput
   }
 
   public static BlockReader newBlockReader( Socket sock, String file,
-                                     Block block, 
+                                     ExtendedBlock block, 
                                      Token<BlockTokenIdentifier> blockToken,
                                      long startOffset, long len,
                                      int bufferSize, boolean verifyChecksum,
@@ -394,14 +392,14 @@ public class BlockReader extends FSInput
             "Got access token error for OP_READ_BLOCK, self="
                 + sock.getLocalSocketAddress() + ", remote="
                 + sock.getRemoteSocketAddress() + ", for file " + file
-                + ", for block " + block.getBlockId() 
-                + "_" + block.getGenerationStamp());
+                + ", for pool " + block.getBlockPoolId() + " block " 
+                + block.getBlockId() + "_" + block.getGenerationStamp());
       } else {
         throw new IOException("Got error for OP_READ_BLOCK, self="
             + sock.getLocalSocketAddress() + ", remote="
             + sock.getRemoteSocketAddress() + ", for file " + file
-            + ", for block " + block.getBlockId() + "_" 
-            + block.getGenerationStamp());
+            + ", for pool " + block.getBlockPoolId() + " block " 
+            + block.getBlockId() + "_" + block.getGenerationStamp());
       }
     }
     DataChecksum checksum = DataChecksum.newDataChecksum( in );
@@ -417,8 +415,8 @@ public class BlockReader extends FSInput
                             startOffset + " for file " + file);
     }
 
-    return new BlockReader(file, block.getBlockId(), in, checksum,
-        verifyChecksum, startOffset, firstChunkOffset, len, sock);
+    return new BlockReader(file, block.getBlockPoolId(), block.getBlockId(),
+        in, checksum, verifyChecksum, startOffset, firstChunkOffset, len, sock);
   }
 
   @Override
@@ -453,9 +451,15 @@ public class BlockReader extends FSInput
     }
   }
   
-  // File name to print when accessing a block directory from servlets
+  /**
+   * File name to print when accessing a block directly (from servlets)
+   * @param s Address of the block location
+   * @param poolId Block pool ID of the block
+   * @param blockId Block ID of the block
+   * @return string that has a file name for debug purposes
+   */
   public static String getFileName(final InetSocketAddress s,
-      final long blockId) {
-    return s.toString() + ":" + blockId;
+      final String poolId, final long blockId) {
+    return s.toString() + ":" + poolId + ":" + blockId;
   }
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1097905&r1=1097904&r2=1097905&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java Fri Apr 29 18:16:32 2011
@@ -64,7 +64,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
@@ -197,7 +197,7 @@ public class DFSClient implements FSCons
       ClientDatanodeProtocol.LOG.debug("ClientDatanodeProtocol addr=" + addr);
     }
     UserGroupInformation ticket = UserGroupInformation
-        .createRemoteUser(locatedBlock.getBlock().toString());
+        .createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());
     ticket.addToken(locatedBlock.getBlockToken());
     return (ClientDatanodeProtocol)RPC.getProxy(ClientDatanodeProtocol.class,
         ClientDatanodeProtocol.versionID, addr, ticket, conf, NetUtils
@@ -979,7 +979,7 @@ public class DFSClient implements FSCons
         refetchBlocks = false;
       }
       LocatedBlock lb = locatedblocks.get(i);
-      final Block block = lb.getBlock();
+      final ExtendedBlock block = lb.getBlock();
       final DatanodeInfo[] datanodes = lb.getLocations();
       
       //try each datanode location of the block
@@ -1589,7 +1589,7 @@ public class DFSClient implements FSCons
     /**
      * Returns the block containing the target position. 
      */
-    public Block getCurrentBlock() {
+    public ExtendedBlock getCurrentBlock() {
       return ((DFSInputStream)in).getCurrentBlock();
     }
 
@@ -1608,7 +1608,7 @@ public class DFSClient implements FSCons
     }
   }
 
-  void reportChecksumFailure(String file, Block blk, DatanodeInfo dn) {
+  void reportChecksumFailure(String file, ExtendedBlock blk, DatanodeInfo dn) {
     DatanodeInfo [] dnArr = { dn };
     LocatedBlock [] lblocks = { new LocatedBlock(blk, dnArr) };
     reportChecksumFailure(file, lblocks);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1097905&r1=1097904&r2=1097905&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Fri Apr 29 18:16:32 2011
@@ -54,6 +54,7 @@ public class DFSConfigKeys extends Commo
   public static final long    DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = 1024*1024;
   public static final String  DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
   public static final String  DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50070";
+  public static final String  DFS_NAMENODE_RPC_ADDRESS_KEY = "dfs.namenode.rpc-address";
   public static final String  DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.servicerpc-address";
   public static final String  DFS_NAMENODE_MAX_OBJECTS_KEY = "dfs.namenode.max.objects";
   public static final long    DFS_NAMENODE_MAX_OBJECTS_DEFAULT = 0;
@@ -260,6 +261,9 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.https.principal";
   public static final String  DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold";
   public static final int     DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10;
+  
+  public static final String DFS_FEDERATION_NAMESERVICES = "dfs.federation.nameservices";
+  public static final String DFS_FEDERATION_NAMESERVICE_ID = "dfs.federation.nameservice.id";
   public static final String  DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY = "dfs.namenode.resource.check.interval";
   public static final int     DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT = 5000;
   public static final String  DFS_NAMENODE_DU_RESERVED_KEY = "dfs.namenode.resource.du.reserved";

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=1097905&r1=1097904&r2=1097905&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java Fri Apr 29 18:16:32 2011
@@ -30,7 +30,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.UnresolvedLinkException;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -62,7 +62,7 @@ public class DFSInputStream extends FSIn
   private LocatedBlocks locatedBlocks = null;
   private long lastBlockBeingWrittenLength = 0;
   private DatanodeInfo currentNode = null;
-  private Block currentBlock = null;
+  private ExtendedBlock currentBlock = null;
   private long pos = 0;
   private long blockEnd = -1;
 
@@ -204,7 +204,7 @@ public class DFSInputStream extends FSIn
   /**
    * Returns the block containing the target position. 
    */
-  public Block getCurrentBlock() {
+  public ExtendedBlock getCurrentBlock() {
     return currentBlock;
   }
 
@@ -384,10 +384,10 @@ public class DFSInputStream extends FSIn
         s = dfsClient.socketFactory.createSocket();
         NetUtils.connect(s, targetAddr, dfsClient.socketTimeout);
         s.setSoTimeout(dfsClient.socketTimeout);
-        Block blk = targetBlock.getBlock();
+        ExtendedBlock blk = targetBlock.getBlock();
         Token<BlockTokenIdentifier> accessToken = targetBlock.getBlockToken();
         
-        blockReader = BlockReader.newBlockReader(s, src, blk, 
+        blockReader = BlockReader.newBlockReader(s, src, blk,
             accessToken, 
             offsetIntoBlock, blk.getNumBytes() - offsetIntoBlock,
             buffersize, verifyChecksum, dfsClient.clientName);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1097905&r1=1097904&r2=1097905&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java Fri Apr 29 18:16:32 2011
@@ -45,7 +45,6 @@ import org.apache.hadoop.fs.ParentNotDir
 import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
@@ -53,6 +52,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
@@ -282,7 +282,7 @@ class DFSOutputStream extends FSOutputSu
   //
   class DataStreamer extends Daemon {
     private volatile boolean streamerClosed = false;
-    private Block block; // its length is number of bytes acked
+    private ExtendedBlock block; // its length is number of bytes acked
     private Token<BlockTokenIdentifier> accessToken;
     private DataOutputStream blockStream;
     private DataInputStream blockReplyStream;
@@ -929,8 +929,8 @@ class DFSOutputStream extends FSOutputSu
 
       if (success) {
         // update pipeline at the namenode
-        Block newBlock = new Block(
-            block.getBlockId(), block.getNumBytes(), newGS);
+        ExtendedBlock newBlock = new ExtendedBlock(
+            block.getBlockPoolId(), block.getBlockId(), block.getNumBytes(), newGS);
         dfsClient.namenode.updatePipeline(dfsClient.clientName, block, newBlock, nodes);
         // update client side generation stamp
         block = newBlock;
@@ -1015,8 +1015,8 @@ class DFSOutputStream extends FSOutputSu
         blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));
 
         // send the request
-        DataTransferProtocol.Sender.opWriteBlock(out, block, nodes.length,
-            recoveryFlag ? stage.getRecoveryStage() : stage, newGS, 
+        DataTransferProtocol.Sender.opWriteBlock(out, block,
+            nodes.length, recoveryFlag ? stage.getRecoveryStage() : stage, newGS, 
             block.getNumBytes(), bytesSent, dfsClient.clientName, null, nodes,
             accessToken);
         checksum.writeHeader(out);
@@ -1120,7 +1120,7 @@ class DFSOutputStream extends FSOutputSu
       } 
     }
 
-    Block getBlock() {
+    ExtendedBlock getBlock() {
       return block;
     }
 
@@ -1636,7 +1636,7 @@ class DFSOutputStream extends FSOutputSu
 
       flushInternal();             // flush all data to Datanodes
       // get last block before destroying the streamer
-      Block lastBlock = streamer.getBlock();
+      ExtendedBlock lastBlock = streamer.getBlock();
       closeThreads(false);
       completeFile(lastBlock);
       dfsClient.leasechecker.remove(src);
@@ -1647,7 +1647,7 @@ class DFSOutputStream extends FSOutputSu
 
   // should be called holding (this) lock since setTestFilename() may 
   // be called during unit tests
-  private void completeFile(Block last) throws IOException {
+  private void completeFile(ExtendedBlock last) throws IOException {
     long localstart = System.currentTimeMillis();
     boolean fileComplete = false;
     while (!fileComplete) {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1097905&r1=1097904&r2=1097905&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java Fri Apr 29 18:16:32 2011
@@ -18,20 +18,31 @@
 
 package org.apache.hadoop.hdfs;
 
+import java.io.IOException;
 import java.io.UnsupportedEncodingException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
 import java.util.Comparator;
 import java.util.StringTokenizer;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.security.UserGroupInformation;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 
 @InterfaceAudience.Private
 public class DFSUtil {
+  
   /**
    * Compartor for sorting DataNodeInfo[] based on decommissioned states.
    * Decommissioned nodes are moved to the end of the array on sorting with
@@ -245,6 +256,324 @@ public class DFSUtil {
     return blkLocations;
   }
 
+  /**
+   * Returns collection of nameservice Ids from the configuration.
+   * @param conf configuration
+   * @return collection of nameservice Ids
+   */
+  public static Collection<String> getNameServiceIds(Configuration conf) {
+    return conf.getStringCollection(DFS_FEDERATION_NAMESERVICES);
+  }
 
-}
+  /**
+   * Given a list of keys in the order of preference, returns a value
+   * for the key in the given order from the configuration.
+   * @param defaultValue default value to return, when key was not found
+   * @param keySuffix suffix to add to the key, if it is not null
+   * @param conf Configuration
+   * @param keys list of keys in the order of preference
+   * @return value of the key or default if a key was not found in configuration
+   */
+  private static String getConfValue(String defaultValue, String keySuffix,
+      Configuration conf, String... keys) {
+    String value = null;
+    for (String key : keys) {
+      if (keySuffix != null) {
+        key += "." + keySuffix;
+      }
+      value = conf.get(key);
+      if (value != null) {
+        break;
+      }
+    }
+    if (value == null) {
+      value = defaultValue;
+    }
+    return value;
+  }
+  
+  /**
+   * Returns list of InetSocketAddress for a given set of keys.
+   * @param conf configuration
+   * @param defaultAddress default address to return in case key is not found
+   * @param keys Set of keys to look for in the order of preference
+   * @return list of InetSocketAddress corresponding to the key
+   */
+  private static List<InetSocketAddress> getAddresses(Configuration conf,
+      String defaultAddress, String... keys) {
+    Collection<String> nameserviceIds = getNameServiceIds(conf);
+    List<InetSocketAddress> isas = new ArrayList<InetSocketAddress>();
+
+    // Configuration with a single namenode
+    if (nameserviceIds == null || nameserviceIds.isEmpty()) {
+      String address = getConfValue(defaultAddress, null, conf, keys);
+      if (address == null) {
+        return null;
+      }
+      isas.add(NetUtils.createSocketAddr(address));
+    } else {
+      // Get the namenodes for all the configured nameServiceIds
+      for (String nameserviceId : nameserviceIds) {
+        String address = getConfValue(null, nameserviceId, conf, keys);
+        if (address == null) {
+          return null;
+        }
+        isas.add(NetUtils.createSocketAddr(address));
+      }
+    }
+    return isas;
+  }
+  
+  /**
+   * Returns list of InetSocketAddress corresponding to  backup node rpc 
+   * addresses from the configuration.
+   * 
+   * @param conf configuration
+   * @return list of InetSocketAddresses
+   * @throws IOException on error
+   */
+  public static List<InetSocketAddress> getBackupNodeAddresses(
+      Configuration conf) throws IOException {
+    List<InetSocketAddress> addressList = getAddresses(conf,
+        null, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
+    if (addressList == null) {
+      throw new IOException("Incorrect configuration: backup node address "
+          + DFS_NAMENODE_BACKUP_ADDRESS_KEY + " is not configured.");
+    }
+    return addressList;
+  }
 
+  /**
+   * Returns list of InetSocketAddresses of corresponding to secondary namenode
+   * http addresses from the configuration.
+   * 
+   * @param conf configuration
+   * @return list of InetSocketAddresses
+   * @throws IOException on error
+   */
+  public static List<InetSocketAddress> getSecondaryNameNodeAddresses(
+      Configuration conf) throws IOException {
+    List<InetSocketAddress> addressList = getAddresses(conf, null,
+        DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
+    if (addressList == null) {
+      throw new IOException("Incorrect configuration: secondary namenode address "
+          + DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY + " is not configured.");
+    }
+    return addressList;
+  }
+
+  /**
+   * Returns list of InetSocketAddresses corresponding to namenodes from the
+   * configuration. Note this is to be used by datanodes to get the list of
+   * namenode addresses to talk to.
+   * 
+   * Returns namenode address specifically configured for datanodes (using
+   * service ports), if found. If not, regular RPC address configured for other
+   * clients is returned.
+   * 
+   * @param conf configuration
+   * @return list of InetSocketAddress
+   * @throws IOException on error
+   */
+  public static List<InetSocketAddress> getNNServiceRpcAddresses(
+      Configuration conf) throws IOException {
+    // Use default address as fall back
+    String defaultAddress;
+    try {
+      defaultAddress = NameNode.getHostPortString(NameNode.getAddress(conf));
+    } catch (IllegalArgumentException e) {
+      defaultAddress = null;
+    }
+    
+    List<InetSocketAddress> addressList = getAddresses(conf, defaultAddress,
+        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
+    if (addressList == null) {
+      throw new IOException("Incorrect configuration: namenode address "
+          + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "  
+          + DFS_NAMENODE_RPC_ADDRESS_KEY
+          + " is not configured.");
+    }
+    return addressList;
+  }
+  
+  /**
+   * Given the InetSocketAddress for any configured communication with a 
+   * namenode, this method returns the corresponding nameservice ID,
+   * by doing a reverse lookup on the list of nameservices until it
+   * finds a match.
+   * If null is returned, client should try {@link #isDefaultNamenodeAddress}
+   * to check pre-Federated configurations.
+   * Since the process of resolving URIs to Addresses is slightly expensive,
+   * this utility method should not be used in performance-critical routines.
+   * 
+   * @param conf - configuration
+   * @param address - InetSocketAddress for configured communication with NN.
+   *     Configured addresses are typically given as URIs, but we may have to
+   *     compare against a URI typed in by a human, or the server name may be
+   *     aliased, so we compare unambiguous InetSocketAddresses instead of just
+   *     comparing URI substrings.
+   * @param keys - list of configured communication parameters that should
+   *     be checked for matches.  For example, to compare against RPC addresses,
+   *     provide the list DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+   *     DFS_NAMENODE_RPC_ADDRESS_KEY.  Use the generic parameter keys,
+   *     not the NameServiceId-suffixed keys.
+   * @return nameserviceId, or null if no match found
+   */
+  public static String getNameServiceIdFromAddress(Configuration conf, 
+      InetSocketAddress address, String... keys) {
+    Collection<String> nameserviceIds = getNameServiceIds(conf);
+
+    // Configuration with a single namenode and no nameserviceId
+    if (nameserviceIds == null || nameserviceIds.isEmpty()) {
+      // client should try {@link isDefaultNamenodeAddress} instead
+      return null;
+    }
+    // Get the candidateAddresses for all the configured nameServiceIds
+    for (String nameserviceId : nameserviceIds) {
+      for (String key : keys) {
+        String candidateAddress = conf.get(
+            getNameServiceIdKey(key, nameserviceId));
+        if (candidateAddress != null
+            && address.equals(NetUtils.createSocketAddr(candidateAddress)))
+          return nameserviceId;
+      }
+    }
+    // didn't find a match
+    // client should try {@link isDefaultNamenodeAddress} instead
+    return null;
+  }
+
+  /**
+   * return HTTP server info from the configuration
+   * @param conf
+   * @param namenode - namenode address
+   * @return http server info
+   */
+  public static String getInfoServer(
+      InetSocketAddress namenode, Configuration conf) {
+    String httpAddress = null;
+    
+    String httpAddressKey = UserGroupInformation.isSecurityEnabled() ?
+        DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY
+        : DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
+    String httpAddressDefault = UserGroupInformation.isSecurityEnabled() ?
+        DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT 
+        :DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
+    
+    if(namenode != null) {
+      // if non-default namenode, try reverse look up 
+      // the nameServiceID if it is available
+      String nameServiceId = DFSUtil.getNameServiceIdFromAddress(
+          conf, namenode,
+          DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+          DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+
+      if (nameServiceId != null) {
+        httpAddress = conf.get(DFSUtil.getNameServiceIdKey(
+            httpAddressKey, nameServiceId));
+      }
+    }
+    // else - Use non-federation style configuration
+    if (httpAddress == null) {
+      httpAddress = conf.get(httpAddressKey, httpAddressDefault);
+    }
+
+    return httpAddress;
+  }
+  
+  /**
+   * Given the InetSocketAddress for any configured communication with a 
+   * namenode, this method determines whether it is the configured
+   * communication channel for the "default" namenode.
+   * It does a reverse lookup on the list of default communication parameters
+   * to see if the given address matches any of them.
+   * Since the process of resolving URIs to Addresses is slightly expensive,
+   * this utility method should not be used in performance-critical routines.
+   * 
+   * @param conf - configuration
+   * @param address - InetSocketAddress for configured communication with NN.
+   *     Configured addresses are typically given as URIs, but we may have to
+   *     compare against a URI typed in by a human, or the server name may be
+   *     aliased, so we compare unambiguous InetSocketAddresses instead of just
+   *     comparing URI substrings.
+   * @param keys - list of configured communication parameters that should
+   *     be checked for matches.  For example, to compare against RPC addresses,
+   *     provide the list DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+   *     DFS_NAMENODE_RPC_ADDRESS_KEY
+   * @return - boolean confirmation if matched generic parameter
+   */
+  public static boolean isDefaultNamenodeAddress(Configuration conf,
+      InetSocketAddress address, String... keys) {
+    for (String key : keys) {
+      String candidateAddress = conf.get(key);
+      if (candidateAddress != null
+          && address.equals(NetUtils.createSocketAddr(candidateAddress)))
+        return true;
+    }
+    return false;
+  }
+  
+  /**
+   * @return key specific to a nameserviceId from a generic key
+   */
+  public static String getNameServiceIdKey(String key, String nameserviceId) {
+    return key + "." + nameserviceId;
+  }
+  
+  /**
+   * Sets the node specific setting into generic configuration key. Looks up
+   * value of "key.nameserviceId" and if found sets that value into generic key 
+   * in the conf. Note that this only modifies the runtime conf.
+   * 
+   * @param conf
+   *          Configuration object to lookup specific key and to set the value
+   *          to the key passed. Note the conf object is modified.
+   * @param nameserviceId
+   *          nameservice Id to construct the node specific key.
+   * @param keys
+   *          The key for which node specific value is looked up
+   */
+  public static void setGenericConf(Configuration conf,
+      String nameserviceId, String... keys) {
+    for (String key : keys) {
+      String value = conf.get(getNameServiceIdKey(key, nameserviceId));
+      if (value != null) {
+        conf.set(key, value);
+      }
+    }
+  }
+  
+  /**
+   * Returns the configured nameservice Id
+   * 
+   * @param conf
+   *          Configuration object to lookup the nameserviceId
+   * @return nameserviceId string from conf
+   */
+  public static String getNameServiceId(Configuration conf) {
+    return conf.get(DFS_FEDERATION_NAMESERVICE_ID);
+  }
+  
+  /** Return used as percentage of capacity */
+  public static float getPercentUsed(long used, long capacity) {
+    return capacity <= 0 ? 100 : ((float)used * 100.0f)/(float)capacity; 
+  }
+  
+  /** Return remaining as percentage of capacity */
+  public static float getPercentRemaining(long remaining, long capacity) {
+    return capacity <= 0 ? 0 : ((float)remaining * 100.0f)/(float)capacity; 
+  }
+
+  /**
+   * @param address address of format host:port
+   * @return InetSocketAddress for the address
+   */
+  public static InetSocketAddress getSocketAddress(String address) {
+    int colon = address.indexOf(":");
+    if (colon < 0) {
+      return new InetSocketAddress(address, 0);
+    }
+    return new InetSocketAddress(address.substring(0, colon), 
+        Integer.parseInt(address.substring(colon + 1)));
+  }
+}

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1097905&r1=1097904&r2=1097905&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Fri Apr 29 18:16:32 2011
@@ -46,9 +46,9 @@ import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
@@ -701,7 +701,7 @@ public class DistributedFileSystem exten
 
     // Find block in data stream.
     DFSClient.DFSDataInputStream dfsIn = (DFSClient.DFSDataInputStream) in;
-    Block dataBlock = dfsIn.getCurrentBlock();
+    ExtendedBlock dataBlock = dfsIn.getCurrentBlock();
     if (dataBlock == null) {
       LOG.error("Error: Current block in data stream is null! ");
       return false;
@@ -714,7 +714,7 @@ public class DistributedFileSystem exten
 
     // Find block in checksum stream
     DFSClient.DFSDataInputStream dfsSums = (DFSClient.DFSDataInputStream) sums;
-    Block sumsBlock = dfsSums.getCurrentBlock();
+    ExtendedBlock sumsBlock = dfsSums.getCurrentBlock();
     if (sumsBlock == null) {
       LOG.error("Error: Current block in checksum stream is null! ");
       return false;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/Block.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/Block.java?rev=1097905&r1=1097904&r2=1097905&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/Block.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/Block.java Fri Apr 29 18:16:32 2011
@@ -153,13 +153,23 @@ public class Block implements Writable, 
   /////////////////////////////////////
   // Writable
   /////////////////////////////////////
+  @Override // Writable
   public void write(DataOutput out) throws IOException {
+    writeHelper(out);
+  }
+
+  @Override // Writable
+  public void readFields(DataInput in) throws IOException {
+    readHelper(in);
+  }
+  
+  final void writeHelper(DataOutput out) throws IOException {
     out.writeLong(blockId);
     out.writeLong(numBytes);
     out.writeLong(generationStamp);
   }
-
-  public void readFields(DataInput in) throws IOException {
+  
+  final void readHelper(DataInput in) throws IOException {
     this.blockId = in.readLong();
     this.numBytes = in.readLong();
     this.generationStamp = in.readLong();

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java?rev=1097905&r1=1097904&r2=1097905&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java Fri Apr 29 18:16:32 2011
@@ -23,23 +23,47 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
 import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.token.TokenInfo;
 
 /** An client-datanode protocol for block recovery
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
+@KerberosInfo(
+    serverPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
 @TokenInfo(BlockTokenSelector.class)
 public interface ClientDatanodeProtocol extends VersionedProtocol {
   public static final Log LOG = LogFactory.getLog(ClientDatanodeProtocol.class);
 
   /**
-   * 6: recoverBlock() removed.
+   * 9: Added deleteBlockPool method
    */
-  public static final long versionID = 6L;
+  public static final long versionID = 9L;
 
   /** Return the visible length of a replica. */
-  long getReplicaVisibleLength(Block b) throws IOException;
+  long getReplicaVisibleLength(ExtendedBlock b) throws IOException;
+  
+  /**
+   * Refresh the list of federated namenodes from updated configuration
+   * Adds new namenodes and stops the deleted namenodes.
+   * 
+   * @throws IOException on error
+   **/
+  void refreshNamenodes() throws IOException;
+
+  /**
+   * Delete the block pool directory. If force is false it is deleted only if
+   * it is empty, otherwise it is deleted along with its contents.
+   * 
+   * @param bpid Blockpool id to be deleted.
+   * @param force If false blockpool directory is deleted only if it is empty 
+   *          i.e. if it doesn't contain any block files, otherwise it is 
+   *          deleted along with its contents.
+   * @throws IOException
+   */
+  void deleteBlockPool(String bpid, boolean force) throws IOException; 
 }