You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2016/02/24 21:03:32 UTC
[50/50] [abbrv] hadoop git commit: Merge changes from trunk
Merge changes from trunk
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b7df69d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b7df69d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b7df69d
Branch: refs/heads/HDFS-7240
Commit: 1b7df69d3774fa91eb9acfbea7667f2c9b538f2c
Parents: fac4633 def754e
Author: Jing Zhao <ji...@apache.org>
Authored: Wed Feb 24 11:55:15 2016 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Wed Feb 24 11:55:15 2016 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 122 +-
.../src/main/bin/hadoop-functions.sh | 1 +
.../hadoop-common/src/main/conf/hadoop-env.sh | 6 +
.../src/main/conf/ssl-server.xml.example | 10 +
.../org/apache/hadoop/conf/Reconfigurable.java | 8 +-
.../apache/hadoop/conf/ReconfigurableBase.java | 47 +-
.../hadoop/crypto/OpensslAesCtrCryptoCodec.java | 3 +
.../org/apache/hadoop/crypto/key/KeyShell.java | 2 +-
.../crypto/key/kms/KMSClientProvider.java | 14 +-
.../java/org/apache/hadoop/fs/CanUnbuffer.java | 2 +-
.../java/org/apache/hadoop/fs/FileSystem.java | 54 +-
.../org/apache/hadoop/fs/FilterFileSystem.java | 5 +-
.../java/org/apache/hadoop/fs/TrashPolicy.java | 2 +-
.../apache/hadoop/fs/TrashPolicyDefault.java | 9 +-
.../org/apache/hadoop/fs/shell/AclCommands.java | 12 +-
.../org/apache/hadoop/fs/shell/FsCommand.java | 2 +-
.../org/apache/hadoop/http/HttpServer2.java | 12 +
.../hadoop/io/compress/zlib/ZlibCompressor.java | 42 +-
.../rawcoder/AbstractRawErasureDecoder.java | 39 +-
.../io/erasurecode/rawcoder/RSRawDecoder.java | 11 +-
.../io/erasurecode/rawcoder/RSRawDecoder2.java | 176 +++
.../io/erasurecode/rawcoder/RSRawEncoder2.java | 76 +
.../rawcoder/RSRawErasureCoderFactory2.java | 37 +
.../erasurecode/rawcoder/RawErasureCoder.java | 2 +-
.../io/erasurecode/rawcoder/util/CoderUtil.java | 83 ++
.../io/erasurecode/rawcoder/util/DumpUtil.java | 18 +-
.../io/erasurecode/rawcoder/util/GF256.java | 339 +++++
.../io/erasurecode/rawcoder/util/RSUtil.java | 2 +-
.../io/erasurecode/rawcoder/util/RSUtil2.java | 172 +++
.../metrics2/sink/RollingFileSystemSink.java | 409 ++++--
.../hadoop/metrics2/source/JvmMetrics.java | 2 +-
.../net/NetworkTopologyWithNodeGroup.java | 7 +-
.../java/org/apache/hadoop/net/NodeBase.java | 9 +-
.../java/org/apache/hadoop/security/KDiag.java | 976 +++++++++++++
.../hadoop/security/UserGroupInformation.java | 36 +-
.../security/http/RestCsrfPreventionFilter.java | 218 ++-
.../security/ssl/FileBasedKeyStoresFactory.java | 2 +
.../apache/hadoop/security/ssl/SSLFactory.java | 3 +-
.../org/apache/hadoop/util/DataChecksum.java | 147 +-
.../org/apache/hadoop/util/JvmPauseMonitor.java | 2 +-
.../org/apache/hadoop/util/StringUtils.java | 1 +
.../hadoop/util/concurrent/ExecutorHelper.java | 67 +
.../hadoop/util/concurrent/HadoopExecutors.java | 96 ++
.../HadoopScheduledThreadPoolExecutor.java | 71 +
.../concurrent/HadoopThreadPoolExecutor.java | 92 ++
.../hadoop/util/concurrent/package-info.java | 26 +
.../src/main/resources/core-default.xml | 15 +-
...he.hadoop.application-classloader.properties | 1 +
.../src/site/markdown/CommandsManual.md | 14 +-
.../src/site/markdown/CredentialProviderAPI.md | 133 ++
.../src/site/markdown/FileSystemShell.md | 2 +-
.../hadoop-common/src/site/markdown/Metrics.md | 27 +-
.../src/site/markdown/SecureMode.md | 198 +++
.../apache/hadoop/conf/TestReconfiguration.java | 143 +-
.../fs/contract/localfs/LocalFSContract.java | 6 +-
.../hadoop/fs/shell/TestCopyPreserveFlag.java | 5 +-
.../java/org/apache/hadoop/fs/shell/TestLs.java | 2 +-
.../apache/hadoop/http/TestHttpCookieFlag.java | 5 +-
.../apache/hadoop/http/TestHttpServerLogs.java | 2 +-
.../apache/hadoop/http/TestSSLHttpServer.java | 199 ++-
.../zlib/TestZlibCompressorDecompressor.java | 25 +
.../hadoop/io/erasurecode/TestECSchema.java | 6 +
.../erasurecode/coder/TestRSErasureCoder.java | 4 +
.../io/erasurecode/coder/TestXORCoder.java | 5 +
.../io/erasurecode/rawcoder/TestRSRawCoder.java | 91 --
.../erasurecode/rawcoder/TestRSRawCoder2.java | 33 +
.../rawcoder/TestRSRawCoderBase.java | 109 +-
.../erasurecode/rawcoder/TestXORRawCoder.java | 1 -
.../sink/RollingFileSystemSinkTestBase.java | 87 +-
.../net/TestNetworkTopologyWithNodeGroup.java | 15 +-
.../org/apache/hadoop/security/TestKDiag.java | 226 +++
.../apache/hadoop/security/TestKDiagNoKDC.java | 123 ++
.../security/TestUserGroupInformation.java | 2 +-
.../http/TestRestCsrfPreventionFilter.java | 93 +-
.../hadoop/security/ssl/KeyStoreTestUtil.java | 81 +-
.../hadoop/util/Crc32PerformanceTest.java | 350 +++++
.../apache/hadoop/util/TestDataChecksum.java | 6 +
.../apache/hadoop/security/secure-hdfs-site.xml | 26 +
.../hadoop-kms/src/main/conf/kms-env.sh | 4 +
...rKeyGeneratorKeyProviderCryptoExtension.java | 10 +
.../key/kms/server/KMSExceptionsProvider.java | 2 +
.../hadoop/crypto/key/kms/server/KMSWebApp.java | 1 +
.../hadoop-kms/src/main/libexec/kms-config.sh | 1 +
.../hadoop-kms/src/main/tomcat/server.xml | 3 +-
.../src/main/tomcat/ssl-server.xml.conf | 1 +
.../hadoop-kms/src/site/markdown/index.md.vm | 78 +-
.../hadoop/crypto/key/kms/server/TestKMS.java | 21 +-
.../main/java/org/apache/hadoop/fs/Hdfs.java | 535 ++++++++
.../java/org/apache/hadoop/fs/SWebHdfs.java | 64 +
.../main/java/org/apache/hadoop/fs/WebHdfs.java | 63 +
.../main/java/org/apache/hadoop/fs/package.html | 26 +
.../java/org/apache/hadoop/hdfs/DFSClient.java | 1 +
.../org/apache/hadoop/hdfs/DFSOutputStream.java | 28 +-
.../hadoop/hdfs/DFSStripedInputStream.java | 7 +-
.../hadoop/hdfs/DFSStripedOutputStream.java | 132 +-
.../org/apache/hadoop/hdfs/DataStreamer.java | 43 +-
.../hadoop/hdfs/DistributedFileSystem.java | 67 +-
.../hdfs/client/HdfsClientConfigKeys.java | 16 +
.../hadoop/hdfs/client/impl/DfsClientConf.java | 13 +
.../hadoop/hdfs/protocol/HdfsConstants.java | 2 +-
.../hdfs/server/protocol/DatanodeStorage.java | 1 +
.../hadoop/hdfs/util/StripedBlockUtil.java | 24 +
.../hadoop/hdfs/web/WebHdfsFileSystem.java | 65 +-
...onfRefreshTokenBasedAccessTokenProvider.java | 8 +-
.../CredentialBasedAccessTokenProvider.java | 8 +-
.../src/main/proto/erasurecoding.proto | 4 +-
.../apache/hadoop/lib/servlet/ServerWebApp.java | 23 +-
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 222 ++-
.../jdiff/Apache_Hadoop_HDFS_2.6.0.xml | 2 +-
hadoop-hdfs-project/hadoop-hdfs/pom.xml | 4 +
.../main/java/org/apache/hadoop/fs/Hdfs.java | 535 --------
.../java/org/apache/hadoop/fs/SWebHdfs.java | 64 -
.../main/java/org/apache/hadoop/fs/WebHdfs.java | 63 -
.../main/java/org/apache/hadoop/fs/package.html | 26 -
.../org/apache/hadoop/hdfs/DFSConfigKeys.java | 36 +-
.../java/org/apache/hadoop/hdfs/DFSUtil.java | 4 +-
.../java/org/apache/hadoop/hdfs/HAUtil.java | 27 +
.../apache/hadoop/hdfs/net/TcpPeerServer.java | 6 +-
.../DatanodeProtocolClientSideTranslatorPB.java | 5 +-
.../apache/hadoop/hdfs/protocolPB/PBHelper.java | 95 +-
.../hadoop/hdfs/server/balancer/Balancer.java | 11 +-
.../hdfs/server/blockmanagement/BlockInfo.java | 202 +--
.../blockmanagement/BlockInfoContiguous.java | 29 +-
.../blockmanagement/BlockInfoStriped.java | 30 +-
.../server/blockmanagement/BlockManager.java | 651 +++++----
.../BlockPlacementPolicyDefault.java | 12 +-
.../BlockPlacementPolicyRackFaultTolerant.java | 10 +-
.../BlockPlacementPolicyWithNodeGroup.java | 46 +
.../BlockPlacementStatusDefault.java | 10 +-
.../BlockPlacementStatusWithNodeGroup.java | 81 ++
.../BlockReconstructionWork.java | 116 ++
.../blockmanagement/BlockRecoveryWork.java | 111 --
.../BlockUnderConstructionFeature.java | 25 +
.../hdfs/server/blockmanagement/BlocksMap.java | 75 +-
.../CacheReplicationMonitor.java | 27 +-
.../blockmanagement/DatanodeDescriptor.java | 13 +-
.../server/blockmanagement/DatanodeManager.java | 21 +-
.../blockmanagement/DatanodeStorageInfo.java | 123 +-
.../blockmanagement/ErasureCodingWork.java | 111 +-
.../PendingReplicationBlocks.java | 9 +-
.../server/blockmanagement/ReplicationWork.java | 11 +-
.../hdfs/server/datanode/BPOfferService.java | 54 +-
.../hdfs/server/datanode/BPServiceActor.java | 276 +---
.../server/datanode/BlockPoolSliceStorage.java | 56 +-
.../server/datanode/BlockRecoveryWorker.java | 12 +-
.../hadoop/hdfs/server/datanode/DataNode.java | 123 +-
.../hdfs/server/datanode/DataStorage.java | 187 ++-
.../hdfs/server/datanode/DataXceiver.java | 1 +
.../hdfs/server/datanode/DataXceiverServer.java | 2 +-
.../datanode/IncrementalBlockReportManager.java | 224 +++
.../server/datanode/SecureDataNodeStarter.java | 6 +-
.../hdfs/server/datanode/VolumeScanner.java | 7 +-
.../erasurecode/ErasureCodingWorker.java | 247 ++--
.../AvailableSpaceVolumeChoosingPolicy.java | 35 +-
.../RoundRobinVolumeChoosingPolicy.java | 53 +-
.../datanode/fsdataset/impl/BlockPoolSlice.java | 29 +-
.../datanode/fsdataset/impl/FsDatasetImpl.java | 60 +-
.../datanode/fsdataset/impl/FsVolumeImpl.java | 69 +-
.../datanode/fsdataset/impl/FsVolumeList.java | 47 +-
.../datanode/fsdataset/impl/ReplicaMap.java | 71 +-
.../server/datanode/web/DatanodeHttpServer.java | 113 +-
.../web/PortUnificationServerHandler.java | 18 +-
.../web/RestCsrfPreventionFilterHandler.java | 137 ++
.../server/namenode/EncryptionZoneManager.java | 73 +-
.../hdfs/server/namenode/FSDirTruncateOp.java | 8 +-
.../hdfs/server/namenode/FSEditLogOp.java | 41 +-
.../hdfs/server/namenode/FSNamesystem.java | 23 +-
.../hadoop/hdfs/server/namenode/INode.java | 6 +-
.../hadoop/hdfs/server/namenode/INodeFile.java | 40 +-
.../hdfs/server/namenode/INodeReference.java | 5 +-
.../hadoop/hdfs/server/namenode/NameNode.java | 3 +-
.../server/namenode/NameNodeHttpServer.java | 26 +-
.../hadoop/hdfs/server/namenode/Namesystem.java | 23 -
.../ha/RequestHedgingProxyProvider.java | 41 +-
.../server/namenode/ha/StandbyCheckpointer.java | 7 +-
.../snapshot/DirectoryWithSnapshotFeature.java | 11 +-
.../snapshot/FileWithSnapshotFeature.java | 2 +-
.../top/window/RollingWindowManager.java | 8 +-
.../protocol/BlockECReconstructionCommand.java | 148 ++
.../server/protocol/BlockECRecoveryCommand.java | 147 --
.../server/protocol/BlockReportContext.java | 10 +-
.../hdfs/server/protocol/DatanodeProtocol.java | 3 +-
.../org/apache/hadoop/hdfs/tools/DFSAdmin.java | 2 +-
.../tools/offlineImageViewer/FSImageLoader.java | 12 +-
.../IgnoreSnapshotException.java | 28 +
.../PBImageDelimitedTextWriter.java | 27 +-
.../offlineImageViewer/PBImageTextWriter.java | 90 +-
.../apache/hadoop/hdfs/util/FoldedTreeSet.java | 1285 +++++++++++++++++
.../org/apache/hadoop/hdfs/web/AuthFilter.java | 10 +-
.../org/apache/hadoop/hdfs/web/JsonUtil.java | 27 +-
.../StorageContainerManager.java | 5 +-
.../StorageContainerNameService.java | 21 -
.../src/main/proto/DatanodeProtocol.proto | 17 +-
.../src/main/resources/hdfs-default.xml | 89 +-
.../src/main/webapps/datanode/index.html | 2 +-
.../src/main/webapps/hdfs/dfshealth.html | 2 +-
.../src/main/webapps/hdfs/explorer.html | 18 +-
.../src/main/webapps/hdfs/explorer.js | 38 +
.../src/main/webapps/journal/index.html | 2 +-
.../src/main/webapps/secondary/status.html | 2 +-
.../src/main/webapps/static/rest-csrf.js | 91 ++
.../src/site/markdown/HDFSCommands.md | 2 +
.../src/site/markdown/TransparentEncryption.md | 64 +-
.../hadoop-hdfs/src/site/markdown/ViewFs.md | 8 +-
.../hadoop-hdfs/src/site/markdown/WebHDFS.md | 36 +
.../apache/hadoop/cli/TestErasureCodingCLI.java | 6 +-
.../org/apache/hadoop/hdfs/DFSTestUtil.java | 2 +-
.../hdfs/ErasureCodeBenchmarkThroughput.java | 17 +-
.../org/apache/hadoop/hdfs/MiniDFSCluster.java | 22 +
.../apache/hadoop/hdfs/StripedFileTestUtil.java | 13 +-
.../TestClientProtocolForPipelineRecovery.java | 60 +-
.../hadoop/hdfs/TestDFSClientRetries.java | 8 +-
.../hadoop/hdfs/TestDFSStripedInputStream.java | 35 +-
.../hadoop/hdfs/TestDFSStripedOutputStream.java | 5 +
.../TestDFSStripedOutputStreamWithFailure.java | 119 +-
.../apache/hadoop/hdfs/TestEncryptionZones.java | 60 +-
.../TestErasureCodeBenchmarkThroughput.java | 5 +
.../TestErasureCodingPolicyWithSnapshot.java | 8 +-
.../org/apache/hadoop/hdfs/TestFileAppend.java | 23 +-
.../org/apache/hadoop/hdfs/TestFileAppend2.java | 8 -
.../org/apache/hadoop/hdfs/TestFileAppend4.java | 5 -
.../hadoop/hdfs/TestFileStatusWithECPolicy.java | 5 +
.../org/apache/hadoop/hdfs/TestLargeBlock.java | 5 -
.../hadoop/hdfs/TestLeaseRecoveryStriped.java | 58 +-
.../hdfs/TestReadStripedFileWithDecoding.java | 29 +-
.../TestReadStripedFileWithMissingBlocks.java | 55 +-
.../hadoop/hdfs/TestReconstructStripedFile.java | 431 ++++++
.../hadoop/hdfs/TestRecoverStripedFile.java | 425 ------
.../apache/hadoop/hdfs/TestRollingUpgrade.java | 4 +-
.../hdfs/TestSafeModeWithStripedFile.java | 9 +-
.../hadoop/hdfs/TestWriteReadStripedFile.java | 5 +
.../hdfs/TestWriteStripedFileWithFailure.java | 7 +-
.../apache/hadoop/hdfs/UpgradeUtilities.java | 10 +
.../hdfs/protocol/TestBlockListAsLongs.java | 4 +-
.../hadoop/hdfs/protocolPB/TestPBHelper.java | 28 +-
.../blockmanagement/BlockManagerTestUtil.java | 2 +-
.../server/blockmanagement/TestBlockInfo.java | 88 --
.../blockmanagement/TestBlockInfoStriped.java | 5 +
.../blockmanagement/TestBlockManager.java | 75 +-
.../TestBlockManagerSafeMode.java | 8 +-
.../TestBlockTokenWithDFSStriped.java | 5 +
.../TestBlocksWithNotEnoughRacks.java | 12 +-
.../blockmanagement/TestPendingReplication.java | 9 +-
...constructStripedBlocksWithRackAwareness.java | 213 +++
.../TestReplicationPolicyWithNodeGroup.java | 102 +-
.../TestUnderReplicatedBlockQueues.java | 55 +-
.../server/datanode/SimulatedFSDataset.java | 5 +-
.../TestBlockHasMultipleReplicasOnSameDN.java | 20 +-
.../hdfs/server/datanode/TestBlockRecovery.java | 27 +-
.../hdfs/server/datanode/TestBlockScanner.java | 2 +-
.../hdfs/server/datanode/TestDataNodeECN.java | 6 +
.../datanode/TestDataNodeHotSwapVolumes.java | 47 +-
.../server/datanode/TestDataNodeMXBean.java | 55 +
.../datanode/TestDataNodeVolumeFailure.java | 25 +-
.../TestDataNodeVolumeFailureReporting.java | 9 +-
...TestDnRespectsBlockReportSplitThreshold.java | 4 +-
.../datanode/TestIncrementalBlockReports.java | 11 +-
.../TestNNHandlesBlockReportPerStorage.java | 3 +-
.../TestNNHandlesCombinedBlockReport.java | 2 +-
.../server/datanode/TestTriggerBlockReport.java | 12 +-
.../TestRoundRobinVolumeChoosingPolicy.java | 56 +
.../fsdataset/impl/TestFsDatasetImpl.java | 33 +
.../fsdataset/impl/TestFsVolumeList.java | 42 +-
.../hdfs/server/namenode/FSImageTestUtil.java | 33 +-
.../server/namenode/NNThroughputBenchmark.java | 65 +-
.../TestAddOverReplicatedStripedBlocks.java | 16 +-
.../server/namenode/TestAddStripedBlocks.java | 46 +-
.../server/namenode/TestCacheDirectives.java | 7 +-
.../hdfs/server/namenode/TestDeadDatanode.java | 2 +-
.../server/namenode/TestFSEditLogLoader.java | 8 +-
.../hdfs/server/namenode/TestFSImage.java | 18 +-
.../hadoop/hdfs/server/namenode/TestFsck.java | 23 +-
.../hdfs/server/namenode/TestINodeFile.java | 3 +-
.../namenode/TestNNThroughputBenchmark.java | 72 +-
.../TestNameNodeMetadataConsistency.java | 2 +
.../namenode/TestNestedEncryptionZones.java | 215 +++
.../namenode/TestQuotaWithStripedBlocks.java | 5 +
.../namenode/TestReconstructStripedBlocks.java | 236 ++++
.../namenode/TestRecoverStripedBlocks.java | 238 ----
.../server/namenode/TestStripedINodeFile.java | 5 +
.../server/namenode/ha/TestHAConfiguration.java | 23 +
.../namenode/ha/TestStandbyCheckpoints.java | 42 +
.../namenode/snapshot/TestSnapshotDeletion.java | 95 +-
.../shortcircuit/TestShortCircuitLocalRead.java | 73 +-
.../TestOfflineImageViewer.java | 36 +-
.../TestOfflineImageViewerForAcl.java | 36 +
.../hadoop/hdfs/util/FoldedTreeSetTest.java | 644 +++++++++
.../hadoop/hdfs/util/TestStripedBlockUtil.java | 5 +
.../apache/hadoop/hdfs/web/TestAuthFilter.java | 15 +
...TestWebHdfsWithRestCsrfPreventionFilter.java | 166 +++
.../sink/TestRollingFileSystemSinkWithHdfs.java | 286 ++++
...TestRollingFileSystemSinkWithSecureHdfs.java | 283 ++++
.../src/test/resources/testCryptoConf.xml | 8 +-
.../test/resources/testErasureCodingConf.xml | 4 +-
hadoop-mapreduce-project/CHANGES.txt | 62 +-
.../hadoop/mapred/LocalContainerLauncher.java | 4 +-
.../jobhistory/JobHistoryEventHandler.java | 6 +-
.../v2/app/commit/CommitterEventHandler.java | 3 +-
.../mapreduce/v2/app/job/impl/JobImpl.java | 3 +-
.../v2/app/launcher/ContainerLauncherImpl.java | 3 +-
.../mapred/LocalDistributedCacheManager.java | 5 +-
.../apache/hadoop/mapred/LocalJobRunner.java | 8 +-
.../mapreduce/v2/jobhistory/JHAdminConfig.java | 2 +-
.../hadoop-mapreduce-client-core/pom.xml | 5 +
.../org/apache/hadoop/mapred/JobClient.java | 8 +
.../hadoop/mapred/LocatedFileStatusFetcher.java | 4 +-
.../java/org/apache/hadoop/mapred/TaskLog.java | 31 +-
.../mapred/lib/MultithreadedMapRunner.java | 4 +-
.../apache/hadoop/mapreduce/JobSubmitter.java | 7 +-
.../mapreduce/jobhistory/HistoryViewer.java | 480 +------
.../jobhistory/HistoryViewerPrinter.java | 39 +
.../HumanReadableHistoryViewerPrinter.java | 471 +++++++
.../jobhistory/JSONHistoryViewerPrinter.java | 256 ++++
.../lib/input/UncompressedSplitLineReader.java | 7 +-
.../org/apache/hadoop/mapreduce/tools/CLI.java | 79 +-
.../src/main/resources/mapred-default.xml | 2 +-
.../src/site/markdown/EncryptedShuffle.md | 2 +-
.../src/site/markdown/MapReduceTutorial.md | 6 +-
.../src/site/markdown/MapredCommands.md | 4 +-
.../hadoop/mapred/TestFileInputFormat.java | 2 +-
.../hadoop/mapred/TestLineRecordReader.java | 53 +
.../jobhistory/TestHistoryViewerPrinter.java | 945 +++++++++++++
.../lib/input/TestFileInputFormat.java | 2 +-
.../lib/output/TestFileOutputCommitter.java | 4 +-
.../mapreduce/v2/hs/HistoryFileManager.java | 6 +-
.../hadoop/mapreduce/v2/hs/JobHistory.java | 3 +-
.../mapreduce/v2/hs/webapp/HsJobBlock.java | 2 +-
.../mapreduce/v2/hs/webapp/HsJobsBlock.java | 2 +-
.../mapreduce/v2/hs/webapp/dao/JobInfo.java | 12 +
.../mapreduce/v2/hs/webapp/dao/TestJobInfo.java | 26 +-
.../hadoop/mapred/ResourceMgrDelegate.java | 7 +
.../hadoop/mapred/TestClientRedirect.java | 8 +
.../hadoop/mapreduce/TestMRJobClient.java | 160 ++-
.../mapreduce/TimelineServicePerformance.java | 2 +-
.../apache/hadoop/mapred/ShuffleHandler.java | 6 +-
.../org/apache/hadoop/examples/pi/Util.java | 5 +-
hadoop-project/pom.xml | 13 +-
hadoop-project/src/site/site.xml | 2 +
.../org/apache/hadoop/fs/s3a/Constants.java | 3 +
.../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 267 ++--
.../src/site/markdown/tools/hadoop-aws/index.md | 54 +
.../hadoop/fs/s3a/TestS3AConfiguration.java | 151 +-
.../hadoop/fs/s3a/scale/S3AScaleTestBase.java | 11 +-
.../fs/s3a/scale/TestS3ADeleteManyFiles.java | 2 +-
.../fs/azure/AzureNativeFileSystemStore.java | 30 +
.../hadoop/fs/azure/NativeAzureFileSystem.java | 8 +-
.../hadoop/fs/azure/NativeFileSystemStore.java | 2 +
.../hadoop/fs/azure/SimpleKeyProvider.java | 16 +-
.../hadoop-azure/src/site/markdown/index.md | 52 +-
.../hadoop/fs/azure/MockStorageInterface.java | 33 +-
.../fs/azure/NativeAzureFileSystemBaseTest.java | 67 +
.../fs/azure/TestWasbUriAndConfiguration.java | 45 +-
.../org/apache/hadoop/tools/DistCpOptions.java | 10 +-
.../apache/hadoop/tools/TestOptionsParser.java | 15 +-
hadoop-tools/hadoop-kafka/pom.xml | 2 -
hadoop-yarn-project/CHANGES.txt | 121 +-
.../yarn/api/ApplicationClientProtocol.java | 49 +-
.../protocolrecords/ReservationListRequest.java | 228 +++
.../ReservationListResponse.java | 79 ++
.../hadoop/yarn/api/records/ReservationACL.java | 56 +
.../api/records/ReservationAllocationState.java | 191 +++
.../api/records/ResourceAllocationRequest.java | 123 ++
.../hadoop/yarn/conf/YarnConfiguration.java | 5 +
.../main/proto/applicationclient_protocol.proto | 1 +
.../src/main/proto/yarn_protos.proto | 17 +
.../src/main/proto/yarn_service_protos.proto | 12 +
.../applications/distributedshell/Client.java | 3 +-
.../hadoop/yarn/client/api/YarnClient.java | 49 +-
.../yarn/client/api/impl/AMRMClientImpl.java | 1 +
.../yarn/client/api/impl/YarnClientImpl.java | 10 +-
.../hadoop/yarn/client/cli/RMAdminCLI.java | 25 +-
.../apache/hadoop/yarn/client/cli/TopCLI.java | 18 +-
.../yarn/client/util/YarnClientUtils.java | 113 ++
.../hadoop/yarn/client/util/package-info.java | 20 +
.../yarn/client/api/impl/TestYarnClient.java | 173 +++
.../hadoop/yarn/client/cli/TestRMAdminCLI.java | 32 +-
.../hadoop/yarn/client/cli/TestYarnCLI.java | 9 +-
.../yarn/client/util/TestYarnClientUtils.java | 319 +++++
.../ApplicationClientProtocolPBClientImpl.java | 17 +
.../ApplicationClientProtocolPBServiceImpl.java | 24 +-
.../impl/pb/ReservationListRequestPBImpl.java | 178 +++
.../impl/pb/ReservationListResponsePBImpl.java | 157 +++
.../pb/ReservationAllocationStatePBImpl.java | 288 ++++
.../pb/ResourceAllocationRequestPBImpl.java | 188 +++
.../RequestHedgingRMFailoverProxyProvider.java | 4 +-
.../hadoop/yarn/webapp/hamlet/Hamlet.java | 11 +-
.../hadoop/yarn/webapp/util/WebAppUtils.java | 4 +-
.../src/main/resources/yarn-default.xml | 56 +-
.../hadoop/yarn/api/TestPBImplRecords.java | 22 +-
.../apache/hadoop/yarn/util/TestFSDownload.java | 15 +-
.../yarn/webapp/WebServicesTestUtils.java | 28 +
.../server/services/RegistryAdminService.java | 4 +-
.../hadoop/yarn/server/nodemanager/Context.java | 2 +
.../server/nodemanager/DeletionService.java | 35 +-
.../yarn/server/nodemanager/NodeManager.java | 10 +
.../containermanager/ContainerManagerImpl.java | 26 +-
.../container/ContainerImpl.java | 25 +-
.../launcher/ContainersLauncher.java | 4 +-
.../localizer/ContainerLocalizer.java | 4 +-
.../localizer/ResourceLocalizationService.java | 8 +-
.../sharedcache/SharedCacheUploadService.java | 4 +-
.../logaggregation/LogAggregationService.java | 4 +-
.../loghandler/NonAggregatingLogHandler.java | 3 +-
.../recovery/NMLeveldbStateStoreService.java | 44 +
.../recovery/NMStateStoreService.java | 11 +
.../nodemanager/webapp/dao/ContainerInfo.java | 1 -
.../impl/container-executor.c | 231 ++--
.../test/test-container-executor.c | 10 +-
.../yarn/server/nodemanager/TestEventFlow.java | 1 +
.../nodemanager/TestNodeStatusUpdater.java | 31 +-
.../amrmproxy/BaseAMRMProxyTest.java | 5 +
.../amrmproxy/MockResourceManagerFacade.java | 14 +-
.../BaseContainerManagerTest.java | 1 +
.../containermanager/TestAuxServices.java | 7 +-
.../container/TestContainer.java | 16 +-
.../nodemanager/webapp/TestNMWebServer.java | 5 +-
.../webapp/TestNMWebServicesContainers.java | 39 +-
.../server/resourcemanager/AdminService.java | 6 +-
.../server/resourcemanager/ClientRMService.java | 158 ++-
.../resourcemanager/NodesListManager.java | 122 +-
.../server/resourcemanager/RMAuditLogger.java | 2 +
.../server/resourcemanager/ResourceManager.java | 5 +
.../blacklist/SimpleBlacklistManager.java | 9 +-
.../recovery/FileSystemRMStateStore.java | 2 +-
.../recovery/LeveldbRMStateStore.java | 2 +-
.../recovery/MemoryRMStateStore.java | 2 +-
.../recovery/NullRMStateStore.java | 2 +-
.../resourcemanager/recovery/RMStateStore.java | 2 +-
.../RMStateStoreStoreReservationEvent.java | 2 +-
.../recovery/ZKRMStateStore.java | 2 +-
.../reservation/AbstractReservationSystem.java | 16 +-
.../reservation/CapacityOverTimePolicy.java | 18 +-
.../reservation/InMemoryPlan.java | 77 +-
.../resourcemanager/reservation/PlanView.java | 31 +
.../reservation/ReservationInputValidator.java | 122 +-
.../ReservationSchedulerConfiguration.java | 15 +
.../reservation/ReservationSystem.java | 9 +
.../reservation/ReservationSystemUtil.java | 45 +-
.../planning/AlignedPlannerWithGreedy.java | 4 +-
.../planning/GreedyReservationAgent.java | 40 +-
.../reservation/planning/IterativePlanner.java | 239 +++-
.../planning/StageAllocatorGreedyRLE.java | 245 ++++
.../rmapp/attempt/RMAppAttempt.java | 6 +
.../rmapp/attempt/RMAppAttemptImpl.java | 17 +
.../rmcontainer/AllocationExpirationInfo.java | 75 +
.../rmcontainer/ContainerAllocationExpirer.java | 11 +-
.../rmcontainer/RMContainer.java | 2 +
.../rmcontainer/RMContainerImpl.java | 132 +-
.../RMContainerNMDoneChangeResourceEvent.java | 37 +
.../resourcemanager/rmnode/RMNodeImpl.java | 61 +-
.../scheduler/AbstractYarnScheduler.java | 56 +-
.../scheduler/capacity/CapacityScheduler.java | 59 +-
.../CapacitySchedulerConfiguration.java | 38 +
.../event/ContainerExpiredSchedulerEvent.java | 12 +-
.../scheduler/fair/AllocationConfiguration.java | 20 +-
.../fair/AllocationFileLoaderService.java | 28 +-
.../scheduler/fair/FSAppAttempt.java | 7 +-
.../scheduler/fair/FSParentQueue.java | 6 +-
.../scheduler/fair/FairScheduler.java | 15 +
.../scheduler/fifo/FifoScheduler.java | 16 +
.../security/ReservationsACLsManager.java | 92 ++
.../webapp/RMAppAttemptBlock.java | 46 +-
.../resourcemanager/webapp/RMAppBlock.java | 32 +-
.../resourcemanager/webapp/RMAppsBlock.java | 5 +-
.../resourcemanager/webapp/RMWebServices.java | 97 +-
.../webapp/dao/AMBlackListingRequestInfo.java | 61 +
.../webapp/dao/AppAttemptInfo.java | 5 +
.../dao/ApplicationSubmissionContextInfo.java | 51 +
.../webapp/dao/LogAggregationContextInfo.java | 108 ++
.../webapp/dao/ReservationDefinitionInfo.java | 10 +
.../webapp/dao/ReservationIdInfo.java | 64 +
.../webapp/dao/ReservationInfo.java | 105 ++
.../webapp/dao/ReservationListInfo.java | 53 +
.../webapp/dao/ReservationRequestInfo.java | 9 +
.../webapp/dao/ReservationRequestsInfo.java | 11 +
.../webapp/dao/ResourceAllocationInfo.java | 72 +
.../yarn_server_resourcemanager_recovery.proto | 16 -
.../server/resourcemanager/ACLsTestBase.java | 123 ++
.../server/resourcemanager/Application.java | 10 +-
.../yarn/server/resourcemanager/MockNM.java | 20 +
.../yarn/server/resourcemanager/MockRM.java | 16 +
.../resourcemanager/QueueACLsTestBase.java | 87 +-
.../ReservationACLsTestBase.java | 600 ++++++++
.../resourcemanager/TestClientRMService.java | 179 +++
.../resourcemanager/TestRMAdminService.java | 35 +-
.../resourcemanager/TestRMNodeTransitions.java | 52 +-
.../server/resourcemanager/TestRMRestart.java | 11 +-
.../TestReservationSystemWithRMHA.java | 2 +-
.../TestResourceTrackerService.java | 189 ++-
.../recovery/RMStateStoreTestBase.java | 6 +-
.../reservation/ReservationSystemTestUtil.java | 19 +-
.../reservation/TestInMemoryPlan.java | 314 +++--
.../TestReservationInputValidator.java | 103 +-
.../reservation/TestReservationSystemUtil.java | 134 ++
.../planning/TestGreedyReservationAgent.java | 120 +-
.../rmcontainer/TestRMContainerImpl.java | 112 +-
.../capacity/TestCapacityScheduler.java | 89 +-
.../capacity/TestContainerResizing.java | 44 +-
.../capacity/TestIncreaseAllocationExpirer.java | 443 ++++++
.../scheduler/capacity/TestUtils.java | 15 +-
.../scheduler/fair/TestFSLeafQueue.java | 4 +-
.../scheduler/fair/TestFairScheduler.java | 1295 ++----------------
.../fair/TestFairSchedulerPreemption.java | 1295 +++++++++++++++++-
.../scheduler/fifo/TestFifoScheduler.java | 107 +-
.../webapp/TestRMWebServicesApps.java | 2 +-
.../TestRMWebServicesAppsModification.java | 74 +-
.../webapp/TestRMWebServicesReservation.java | 542 +++++++-
.../src/test/resources/submit-reservation.json | 6 +-
.../sharedcachemanager/CleanerService.java | 4 +-
.../store/InMemorySCMStore.java | 4 +-
.../store/TestInMemorySCMStore.java | 7 +-
.../src/site/markdown/FairScheduler.md | 2 +-
.../src/site/markdown/NodeLabel.md | 86 +-
.../src/site/markdown/ResourceManagerRest.md | 54 +-
.../src/site/markdown/SecureContainer.md | 2 +-
.../src/site/markdown/TimelineServer.md | 13 +-
.../site/markdown/YarnApplicationSecurity.md | 560 ++++++++
.../src/site/markdown/YarnCommands.md | 3 +
518 files changed, 27880 insertions(+), 7250 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
index 753fee7,f7cde90..4d3bff6
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
@@@ -35,50 -30,14 +35,19 @@@ import java.util.Iterator
* block's metadata currently includes blockCollection it belongs to and
* the datanodes that store the block.
*/
-class BlocksMap {
+public class BlocksMap {
- private static class StorageIterator implements Iterator<DatanodeStorageInfo> {
- private final BlockInfo blockInfo;
- private int nextIdx = 0;
-
- StorageIterator(BlockInfo blkInfo) {
- this.blockInfo = blkInfo;
- }
-
- @Override
- public boolean hasNext() {
- if (blockInfo == null) {
- return false;
- }
- while (nextIdx < blockInfo.getCapacity() &&
- blockInfo.getDatanode(nextIdx) == null) {
- // note that for striped blocks there may be null in the triplets
- nextIdx++;
- }
- return nextIdx < blockInfo.getCapacity();
- }
-
- @Override
- public DatanodeStorageInfo next() {
- return blockInfo.getStorageInfo(nextIdx++);
- }
-
- @Override
- public void remove() {
- throw new UnsupportedOperationException("Sorry. can't remove.");
- }
- }
/** Constant {@link LightWeightGSet} capacity. */
private final int capacity;
private GSet<Block, BlockInfo> blocks;
- BlocksMap(int capacity) {
+ public BlocksMap(int capacity, GSet<Block, BlockInfo> b) {
+ this.capacity = capacity;
+ this.blocks = b;
+ }
+
- BlocksMap(int capacity) {
++ public BlocksMap(int capacity) {
// Use 2% of total memory to size the GSet capacity
this.capacity = capacity;
this.blocks = new LightWeightGSet<Block, BlockInfo>(capacity) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index a8f3674,c4729ea..1096cb1
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@@ -29,7 -27,9 +29,8 @@@ import org.apache.hadoop.hdfs.protocol.
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+ import org.apache.hadoop.hdfs.util.FoldedTreeSet;
-import com.google.common.annotations.VisibleForTesting;
/**
* A Datanode has one or more storages. A storage in the Datanode is represented
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 3e4dc65,cc536d3..e2d46c5
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@@ -30,9 -30,8 +30,9 @@@ import org.apache.hadoop.hdfs.protocol.
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdfs.server.datanode.dataset.DatasetSpi;
import org.apache.hadoop.hdfs.server.protocol.*;
- import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo;
+ import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
import org.slf4j.Logger;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index a85e29d,4b987b0..b7c5260
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@@ -98,23 -92,8 +95,9 @@@ class BPServiceActor implements Runnabl
}
private volatile RunningState runningState = RunningState.CONNECTING;
-
- /**
- * Between block reports (which happen on the order of once an hour) the
- * DN reports smaller incremental changes to its block list. This map,
- * keyed by block ID, contains the pending changes which have yet to be
- * reported to the NN. Access should be synchronized on this object.
- */
- private final Map<DatanodeStorage, PerStoragePendingIncrementalBR>
- pendingIncrementalBRperStorage = Maps.newHashMap();
-
- // IBR = Incremental Block Report. If this flag is set then an IBR will be
- // sent immediately by the actor thread without waiting for the IBR timer
- // to elapse.
- private volatile boolean sendImmediateIBR = false;
private volatile boolean shouldServiceRun = true;
private final DataNode dn;
+ private DatasetSpi<? extends VolumeSpi> dataset = null;
private final DNConf dnConf;
private long prevBlockReportId;
@@@ -235,129 -221,8 +225,7 @@@
register(nsInfo);
}
-
/**
- * Report received blocks and delete hints to the Namenode for each
- * storage.
- *
- * @throws IOException
- */
- private void reportReceivedDeletedBlocks() throws IOException {
-
- // Generate a list of the pending reports for each storage under the lock
- ArrayList<StorageReceivedDeletedBlocks> reports =
- new ArrayList<StorageReceivedDeletedBlocks>(pendingIncrementalBRperStorage.size());
- synchronized (pendingIncrementalBRperStorage) {
- for (Map.Entry<DatanodeStorage, PerStoragePendingIncrementalBR> entry :
- pendingIncrementalBRperStorage.entrySet()) {
- final DatanodeStorage storage = entry.getKey();
- final PerStoragePendingIncrementalBR perStorageMap = entry.getValue();
-
- if (perStorageMap.getBlockInfoCount() > 0) {
- // Send newly-received and deleted blockids to namenode
- ReceivedDeletedBlockInfo[] rdbi = perStorageMap.dequeueBlockInfos();
- reports.add(new StorageReceivedDeletedBlocks(storage, rdbi));
- }
- }
- sendImmediateIBR = false;
- }
-
- if (reports.size() == 0) {
- // Nothing new to report.
- return;
- }
-
- // Send incremental block reports to the Namenode outside the lock
- boolean success = false;
- final long startTime = monotonicNow();
- try {
- bpNamenode.blockReceivedAndDeleted(bpRegistration,
- bpos.getBlockPoolId(),
- reports.toArray(new StorageReceivedDeletedBlocks[reports.size()]));
- success = true;
- } finally {
- dn.getMetrics().addIncrementalBlockReport(monotonicNow() - startTime);
- if (!success) {
- synchronized (pendingIncrementalBRperStorage) {
- for (StorageReceivedDeletedBlocks report : reports) {
- // If we didn't succeed in sending the report, put all of the
- // blocks back onto our queue, but only in the case where we
- // didn't put something newer in the meantime.
- PerStoragePendingIncrementalBR perStorageMap =
- pendingIncrementalBRperStorage.get(report.getStorage());
- perStorageMap.putMissingBlockInfos(report.getBlocks());
- sendImmediateIBR = true;
- }
- }
- }
- }
- }
-
- /**
- * @return pending incremental block report for given {@code storage}
- */
- private PerStoragePendingIncrementalBR getIncrementalBRMapForStorage(
- DatanodeStorage storage) {
- PerStoragePendingIncrementalBR mapForStorage =
- pendingIncrementalBRperStorage.get(storage);
-
- if (mapForStorage == null) {
- // This is the first time we are adding incremental BR state for
- // this storage so create a new map. This is required once per
- // storage, per service actor.
- mapForStorage = new PerStoragePendingIncrementalBR();
- pendingIncrementalBRperStorage.put(storage, mapForStorage);
- }
-
- return mapForStorage;
- }
-
- /**
- * Add a blockInfo for notification to NameNode. If another entry
- * exists for the same block it is removed.
- *
- * Caller must synchronize access using pendingIncrementalBRperStorage.
- */
- void addPendingReplicationBlockInfo(ReceivedDeletedBlockInfo bInfo,
- DatanodeStorage storage) {
- // Make sure another entry for the same block is first removed.
- // There may only be one such entry.
- for (Map.Entry<DatanodeStorage, PerStoragePendingIncrementalBR> entry :
- pendingIncrementalBRperStorage.entrySet()) {
- if (entry.getValue().removeBlockInfo(bInfo)) {
- break;
- }
- }
- getIncrementalBRMapForStorage(storage).putBlockInfo(bInfo);
- }
-
- /*
- * Informing the name node could take a long long time! Should we wait
- * till namenode is informed before responding with success to the
- * client? For now we don't.
- */
- void notifyNamenodeBlock(ReceivedDeletedBlockInfo bInfo,
- String storageUuid, boolean now) {
- synchronized (pendingIncrementalBRperStorage) {
- addPendingReplicationBlockInfo(
- bInfo, dataset.getStorage(storageUuid));
- sendImmediateIBR = true;
- // If now is true, the report is sent right away.
- // Otherwise, it will be sent out in the next heartbeat.
- if (now) {
- pendingIncrementalBRperStorage.notifyAll();
- }
- }
- }
-
- void notifyNamenodeDeletedBlock(
- ReceivedDeletedBlockInfo bInfo, String storageUuid) {
- synchronized (pendingIncrementalBRperStorage) {
- addPendingReplicationBlockInfo(
- bInfo, dataset.getStorage(storageUuid));
- }
- }
-
- /**
* Run an immediate block report on this thread. Used by tests.
*/
@VisibleForTesting
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 044def6,aed3eaa..f4f5342
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@@ -48,9 -48,9 +48,11 @@@ import static org.apache.hadoop.hdfs.DF
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY;
import static org.apache.hadoop.util.ExitUtil.terminate;
+
+ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ReconfigurationProtocolService;
import java.io.BufferedOutputStream;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index bf4831c,6697054..1fba200
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@@ -325,10 -327,10 +327,10 @@@ public class DataStorage extends Storag
}
StorageDirectory sd = loadStorageDirectory(
- datanode, nsInfos.iterator().next(), volume, StartupOption.HOTSWAP);
- datanode, nsInfos.get(0), volume, StartupOption.HOTSWAP, null);
++ datanode, nsInfos.iterator().next(), volume, StartupOption.HOTSWAP, null);
VolumeBuilder builder =
new VolumeBuilder(this, sd);
- for (NamespaceInfo nsInfo : nsInfos) {
+ for (final NamespaceInfo nsInfo : nsInfos) {
List<File> bpDataDirs = Lists.newArrayList();
bpDataDirs.add(BlockPoolSliceStorage.getBpRoot(
nsInfo.getBlockPoolID(), new File(volume, STORAGE_DIR_CURRENT)));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index fa127c9,d1cb836..67ca784
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@@ -453,17 -471,9 +469,9 @@@ class FsDatasetImpl implements FsDatase
final FsVolumeReference ref = fsVolume.obtainReference();
setupAsyncLazyPersistThread(fsVolume);
- synchronized (this) {
- volumeMap.addAll(tempVolumeMap);
- storageMap.put(sd.getStorageUuid(),
- new DatanodeStorage(sd.getStorageUuid(),
- DatanodeStorage.State.NORMAL,
- storageType));
- asyncDiskService.addVolume(sd.getCurrentDir());
- volumes.addVolume(ref);
- }
- builder.build();
+ activateVolume(tempVolumeMap, sd, storageType, ref);
- LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
+ LOG.info("Added volume - " + location.getFile() +
+ ", StorageType: " + storageType);
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index a3b7dec,f9bdbf6..fd6f564
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@@ -46,8 -54,8 +55,9 @@@ import org.apache.hadoop.hdfs.server.da
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainer;
import org.apache.hadoop.security.authorize.AccessControlList;
+ import org.apache.hadoop.security.http.RestCsrfPreventionFilter;
import org.apache.hadoop.security.ssl.SSLFactory;
import java.io.Closeable;
@@@ -81,17 -90,11 +92,18 @@@ public class DatanodeHttpServer impleme
public DatanodeHttpServer(final Configuration conf,
final DataNode datanode,
- final ServerSocketChannel externalHttpChannel)
- throws IOException {
+ final ServerSocketChannel externalHttpChannel,
+ final ObjectStoreHandler objectStoreHandler) throws IOException {
+ this.restCsrfPreventionFilter = createRestCsrfPreventionFilter(conf);
this.conf = conf;
+ final ObjectStoreJerseyContainer finalContainer;
+ if (objectStoreHandler != null) {
+ finalContainer = objectStoreHandler.getObjectStoreJerseyContainer();
+ } else {
+ finalContainer = null;
+ }
+
Configuration confForInfoServer = new Configuration(conf);
confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
HttpServer2.Builder builder = new HttpServer2.Builder()
@@@ -126,8 -129,7 +138,8 @@@
@Override
protected void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast(new PortUnificationServerHandler(jettyAddr,
- conf, confForCreate, restCsrfPreventionFilter));
+ conf, confForCreate,
- finalContainer));
++ finalContainer, restCsrfPreventionFilter));
}
});
@@@ -175,12 -177,16 +187,16 @@@
protected void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
p.addLast(
- new SslHandler(sslFactory.createSSLEngine()),
- new HttpRequestDecoder(),
- new HttpResponseEncoder(),
- new ChunkedWriteHandler(),
- new URLDispatcher(jettyAddr, conf, confForCreate,
- finalContainer));
+ new SslHandler(sslFactory.createSSLEngine()),
+ new HttpRequestDecoder(),
+ new HttpResponseEncoder());
+ if (restCsrfPreventionFilter != null) {
+ p.addLast(new RestCsrfPreventionFilterHandler(
+ restCsrfPreventionFilter));
+ }
+ p.addLast(
+ new ChunkedWriteHandler(),
- new URLDispatcher(jettyAddr, conf, confForCreate));
++ new URLDispatcher(jettyAddr, conf, confForCreate, finalContainer));
}
});
} else {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/PortUnificationServerHandler.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/PortUnificationServerHandler.java
index bb5cd57,ff10c6d..a6f3cbf
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/PortUnificationServerHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/PortUnificationServerHandler.java
@@@ -53,21 -52,25 +54,30 @@@ public class PortUnificationServerHandl
private final Configuration confForCreate;
+ private final ObjectStoreJerseyContainer objectStoreJerseyContainer;
+
+ private final RestCsrfPreventionFilter restCsrfPreventionFilter;
+
public PortUnificationServerHandler(InetSocketAddress proxyHost,
Configuration conf, Configuration confForCreate,
- ObjectStoreJerseyContainer container) {
++ ObjectStoreJerseyContainer container,
+ RestCsrfPreventionFilter restCsrfPreventionFilter) {
this.proxyHost = proxyHost;
this.conf = conf;
this.confForCreate = confForCreate;
+ this.objectStoreJerseyContainer = container;
+ this.restCsrfPreventionFilter = restCsrfPreventionFilter;
}
- private void configureHttp1(ChannelHandlerContext ctx) {
+ private void configureHttp1(ChannelHandlerContext ctx) throws IOException {
- ctx.pipeline().addLast(new HttpServerCodec(), new ChunkedWriteHandler(),
+ ctx.pipeline().addLast(new HttpServerCodec());
+ if (this.restCsrfPreventionFilter != null) {
+ ctx.pipeline().addLast(new RestCsrfPreventionFilterHandler(
+ this.restCsrfPreventionFilter));
+ }
+ ctx.pipeline().addLast(new ChunkedWriteHandler(),
- new URLDispatcher(proxyHost, conf, confForCreate));
+ new URLDispatcher(proxyHost, conf, confForCreate,
+ objectStoreJerseyContainer));
}
private void configureHttp2(ChannelHandlerContext ctx) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerManager.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerManager.java
index c85a554,0000000..beed82e
mode 100644,000000..100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerManager.java
@@@ -1,323 -1,0 +1,324 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.storagecontainer;
+
+import com.google.protobuf.BlockingService;
+import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.protocol.*;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos;
+import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB;
+import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlocksMap;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import org.apache.hadoop.hdfs.server.protocol.*;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.WritableRpcEngine;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.OzoneConfiguration;
+import org.apache.hadoop.storagecontainer.protocol.ContainerLocationProtocol;
+import org.apache.hadoop.util.LightWeightGSet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.List;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+
+/**
+ * Service that allocates storage containers and tracks their
+ * location.
+ */
+public class StorageContainerManager
+ implements DatanodeProtocol, ContainerLocationProtocol {
+
+ public static final Logger LOG =
+ LoggerFactory.getLogger(StorageContainerManager.class);
+
+ private final Namesystem ns = new StorageContainerNameService();
+ private final BlockManager blockManager;
+
+ private long txnId = 234;
+
+ /** The RPC server that listens to requests from DataNodes. */
+ private final RPC.Server serviceRpcServer;
+ private final InetSocketAddress serviceRPCAddress;
+
+ /** The RPC server that listens to requests from clients. */
+ private final RPC.Server clientRpcServer;
+ private final InetSocketAddress clientRpcAddress;
+
+ public StorageContainerManager(OzoneConfiguration conf)
+ throws IOException {
+ BlocksMap containerMap = new BlocksMap(
+ LightWeightGSet.computeCapacity(2.0, "BlocksMap"),
+ new StorageContainerMap());
+ this.blockManager = new BlockManager(ns, false, conf, containerMap);
+
+ int handlerCount =
+ conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY,
+ DFS_NAMENODE_HANDLER_COUNT_DEFAULT);
+
+ RPC.setProtocolEngine(conf, DatanodeProtocolPB.class,
+ ProtobufRpcEngine.class);
+
+ DatanodeProtocolServerSideTranslatorPB dnProtoPbTranslator =
+ new DatanodeProtocolServerSideTranslatorPB(this);
+ BlockingService dnProtoPbService =
+ DatanodeProtocolProtos.DatanodeProtocolService
+ .newReflectiveBlockingService(dnProtoPbTranslator);
+
+ WritableRpcEngine.ensureInitialized();
+
+ InetSocketAddress serviceRpcAddr = NameNode.getServiceAddress(conf, false);
+ if (serviceRpcAddr != null) {
+ String bindHost =
+ conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
+ if (bindHost == null || bindHost.isEmpty()) {
+ bindHost = serviceRpcAddr.getHostName();
+ }
+ LOG.info("Service RPC server is binding to " + bindHost + ":" +
+ serviceRpcAddr.getPort());
+
+ int serviceHandlerCount =
+ conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
+ DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
+ serviceRpcServer = new RPC.Builder(conf)
+ .setProtocol(
+ org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB.class)
+ .setInstance(dnProtoPbService)
+ .setBindAddress(bindHost)
+ .setPort(serviceRpcAddr.getPort())
+ .setNumHandlers(serviceHandlerCount)
+ .setVerbose(false)
+ .setSecretManager(null)
+ .build();
+
+ DFSUtil.addPBProtocol(conf, DatanodeProtocolPB.class, dnProtoPbService,
+ serviceRpcServer);
+
+ InetSocketAddress listenAddr = serviceRpcServer.getListenerAddress();
+ serviceRPCAddress = new InetSocketAddress(
+ serviceRpcAddr.getHostName(), listenAddr.getPort());
+ conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+ NetUtils.getHostPortString(serviceRPCAddress));
+ } else {
+ serviceRpcServer = null;
+ serviceRPCAddress = null;
+ }
+
+ InetSocketAddress rpcAddr = DFSUtilClient.getNNAddress(conf);
+ String bindHost = conf.getTrimmed(DFS_NAMENODE_RPC_BIND_HOST_KEY);
+ if (bindHost == null || bindHost.isEmpty()) {
+ bindHost = rpcAddr.getHostName();
+ }
+ LOG.info("RPC server is binding to " + bindHost + ":" + rpcAddr.getPort());
+
+ clientRpcServer = new RPC.Builder(conf)
+ .setProtocol(
+ org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB.class)
+ .setInstance(dnProtoPbService)
+ .setBindAddress(bindHost)
+ .setPort(rpcAddr.getPort())
+ .setNumHandlers(handlerCount)
+ .setVerbose(false)
+ .setSecretManager(null)
+ .build();
+
+ DFSUtil.addPBProtocol(conf, DatanodeProtocolPB.class, dnProtoPbService,
+ clientRpcServer);
+
+ // The rpc-server port can be ephemeral... ensure we have the correct info
+ InetSocketAddress listenAddr = clientRpcServer.getListenerAddress();
+ clientRpcAddress = new InetSocketAddress(
+ rpcAddr.getHostName(), listenAddr.getPort());
+ conf.set(FS_DEFAULT_NAME_KEY,
+ DFSUtilClient.getNNUri(clientRpcAddress).toString());
+ }
+
+ @Override
+ public DatanodeRegistration registerDatanode(
+ DatanodeRegistration registration) throws IOException {
+ ns.writeLock();
+ try {
+ blockManager.getDatanodeManager().registerDatanode(registration);
+ } finally {
+ ns.writeUnlock();
+ }
+ return registration;
+ }
+
+ @Override
+ public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration,
+ StorageReport[] reports, long dnCacheCapacity, long dnCacheUsed,
+ int xmitsInProgress, int xceiverCount, int failedVolumes,
+ VolumeFailureSummary volumeFailureSummary,
+ boolean requestFullBlockReportLease) throws IOException {
+ ns.readLock();
+ try {
+ final int maxTransfer = blockManager.getMaxReplicationStreams()
+ - xmitsInProgress;
+ DatanodeCommand[] cmds = blockManager.getDatanodeManager()
- .handleHeartbeat(registration, reports, ns.getBlockPoolId(), 0, 0,
- xceiverCount, maxTransfer, failedVolumes, volumeFailureSummary);
++ .handleHeartbeat(registration, reports, blockManager.getBlockPoolId(),
++ 0, 0, xceiverCount, maxTransfer, failedVolumes,
++ volumeFailureSummary);
+
+ return new HeartbeatResponse(cmds,
+ new NNHAStatusHeartbeat(HAServiceProtocol.HAServiceState.ACTIVE,
+ txnId), null, 0);
+ } finally {
+ ns.readUnlock();
+ }
+ }
+
+ @Override
+ public DatanodeCommand blockReport(DatanodeRegistration registration,
+ String poolId, StorageBlockReport[] reports,
+ BlockReportContext context) throws IOException {
+ for (int r = 0; r < reports.length; r++) {
+ final BlockListAsLongs storageContainerList = reports[r].getBlocks();
+ blockManager.processReport(registration, reports[r].getStorage(),
+ storageContainerList, context, (r == reports.length - 1));
+ }
+ return null;
+ }
+
+ @Override
+ public DatanodeCommand cacheReport(DatanodeRegistration registration,
+ String poolId, List<Long> blockIds) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void blockReceivedAndDeleted(DatanodeRegistration registration,
+ String poolId, StorageReceivedDeletedBlocks[] rcvdAndDeletedBlocks)
+ throws IOException {
+ for(StorageReceivedDeletedBlocks r : rcvdAndDeletedBlocks) {
+ ns.writeLock();
+ try {
+ blockManager.processIncrementalBlockReport(registration, r);
+ } finally {
+ ns.writeUnlock();
+ }
+ }
+ }
+
+ @Override
+ public void errorReport(DatanodeRegistration registration,
+ int errorCode, String msg) throws IOException {
+ String dnName =
+ (registration == null) ? "Unknown DataNode" : registration.toString();
+
+ if (errorCode == DatanodeProtocol.NOTIFY) {
+ LOG.info("Error report from " + dnName + ": " + msg);
+ return;
+ }
+
+ if (errorCode == DatanodeProtocol.DISK_ERROR) {
+ LOG.warn("Disk error on " + dnName + ": " + msg);
+ } else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) {
+ LOG.warn("Fatal disk error on " + dnName + ": " + msg);
+ blockManager.getDatanodeManager().removeDatanode(registration);
+ } else {
+ LOG.info("Error report from " + dnName + ": " + msg);
+ }
+ }
+
+ @Override
+ public NamespaceInfo versionRequest() throws IOException {
+ ns.readLock();
+ try {
+ return unprotectedGetNamespaceInfo();
+ } finally {
+ ns.readUnlock();
+ }
+ }
+
+ private NamespaceInfo unprotectedGetNamespaceInfo() {
+ return new NamespaceInfo(1, "random", "random", 2,
+ NodeType.STORAGE_CONTAINER_SERVICE);
+ }
+
+ @Override
+ public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
+ // It doesn't make sense to have LocatedBlock in this API.
+ ns.writeLock();
+ try {
+ for (int i = 0; i < blocks.length; i++) {
+ ExtendedBlock blk = blocks[i].getBlock();
+ DatanodeInfo[] nodes = blocks[i].getLocations();
+ String[] storageIDs = blocks[i].getStorageIDs();
+ for (int j = 0; j < nodes.length; j++) {
+ blockManager.findAndMarkBlockAsCorrupt(blk, nodes[j],
+ storageIDs == null ? null: storageIDs[j],
+ "client machine reported it");
+ }
+ }
+ } finally {
+ ns.writeUnlock();
+ }
+ }
+
+ /**
+ * Start client and service RPC servers.
+ */
+ void start() {
+ clientRpcServer.start();
+ if (serviceRpcServer != null) {
+ serviceRpcServer.start();
+ }
+ }
+
+ /**
+ * Wait until the RPC servers have shutdown.
+ */
+ void join() throws InterruptedException {
+ clientRpcServer.join();
+ if (serviceRpcServer != null) {
+ serviceRpcServer.join();
+ }
+ }
+
+ @Override
+ public void commitBlockSynchronization(ExtendedBlock block,
+ long newgenerationstamp, long newlength, boolean closeFile,
+ boolean deleteblock, DatanodeID[] newtargets, String[] newtargetstorages)
+ throws IOException {
+ // Not needed for the purpose of object store
+ throw new UnsupportedOperationException();
+ }
+
+ public static void main(String[] argv) throws IOException {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ StorageContainerManager scm = new StorageContainerManager(conf);
+ scm.start();
+ try {
+ scm.join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerNameService.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerNameService.java
index 76e0bb8,0000000..751b635
mode 100644,000000..100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerNameService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerNameService.java
@@@ -1,154 -1,0 +1,133 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.storagecontainer;
+
+import org.apache.commons.lang.NotImplementedException;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.namenode.CacheManager;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
+import org.apache.hadoop.ipc.StandbyException;
+import org.apache.hadoop.security.AccessControlException;
+
+import java.io.IOException;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * Namesystem implementation to be used by StorageContainerManager.
+ */
+public class StorageContainerNameService implements Namesystem {
+
+ private ReentrantReadWriteLock coarseLock = new ReentrantReadWriteLock();
- private String blockPoolId;
+ private volatile boolean serviceRunning = true;
+
+ public void shutdown() {
+ serviceRunning = false;
+ }
+
+ @Override
+ public boolean isRunning() {
+ return serviceRunning;
+ }
+
+ @Override
- public void checkSuperuserPrivilege() throws AccessControlException {
- // TBD
- }
-
- @Override
- public String getBlockPoolId() {
- return blockPoolId;
- }
-
- public void setBlockPoolId(String id) {
- this.blockPoolId = id;
- }
-
- @Override
+ public BlockCollection getBlockCollection(long id) {
+ return null;
+ }
+
+ @Override
+ public void startSecretManagerIfNecessary() {
+ throw new NotImplementedException();
+ }
+
+ @Override
- public ErasureCodingPolicy getErasureCodingPolicyForPath(String src)
- throws IOException {
- return null;
- }
-
- @Override
+ public boolean isInSnapshot(long blockCollectionID) {
+ return false;
+ }
+
+ @Override
+ public CacheManager getCacheManager() {
+ // Cache Management is not supported
+ return null;
+ }
+
+ @Override
+ public HAContext getHAContext() {
+ return null;
+ }
+
+ /**
+ * @return Whether the namenode is transitioning to active state and is in the
+ * middle of the starting active services.
+ */
+ @Override
+ public boolean inTransitionToActive() {
+ return false;
+ }
+
+ @Override
+ public void readLock() {
+ coarseLock.readLock().lock();
+ }
+
+ @Override
+ public void readUnlock() {
+ coarseLock.readLock().unlock();
+ }
+
+ @Override
+ public boolean hasReadLock() {
+ return coarseLock.getReadHoldCount() > 0 || hasWriteLock();
+ }
+
+ @Override
+ public void writeLock() {
+ coarseLock.writeLock().lock();
+ }
+
+ @Override
+ public void writeLockInterruptibly() throws InterruptedException {
+ coarseLock.writeLock().lockInterruptibly();
+ }
+
+ @Override
+ public void writeUnlock() {
+ coarseLock.writeLock().unlock();
+ }
+
+ @Override
+ public boolean hasWriteLock() {
+ return coarseLock.isWriteLockedByCurrentThread();
+ }
+
+ @Override
+ public boolean isInSafeMode() {
+ return false;
+ }
+
+ @Override
+ public boolean isInStartupSafeMode() {
+ return false;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7df69d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
----------------------------------------------------------------------