You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2015/07/14 22:26:06 UTC

[50/50] hadoop git commit: Merge remote-tracking branch 'apache-commit/trunk' into HDFS-7240

Merge remote-tracking branch 'apache-commit/trunk' into HDFS-7240

# Conflicts:
#	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
#	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6da5a33b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6da5a33b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6da5a33b

Branch: refs/heads/HDFS-7240
Commit: 6da5a33bbe8b626dc9a3ad133abb2b1581560a97
Parents: 9e63be7 979c9ca
Author: Arpit Agarwal <ar...@apache.org>
Authored: Tue Jul 14 13:24:46 2015 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Tue Jul 14 13:24:46 2015 -0700

----------------------------------------------------------------------
 dev-support/releasedocmaker.py                  |  76 +-
 hadoop-common-project/hadoop-common/CHANGES.txt |  32 +-
 hadoop-common-project/hadoop-common/pom.xml     |   5 +
 .../java/org/apache/hadoop/fs/FileContext.java  |   3 +
 .../java/org/apache/hadoop/fs/FileSystem.java   |  20 +-
 .../main/java/org/apache/hadoop/fs/Globber.java |  17 +
 .../hadoop/fs/sftp/SFTPConnectionPool.java      | 303 +++++++
 .../apache/hadoop/fs/sftp/SFTPFileSystem.java   | 671 ++++++++++++++
 .../apache/hadoop/fs/sftp/SFTPInputStream.java  | 130 +++
 .../org/apache/hadoop/fs/sftp/package-info.java |  19 +
 .../java/org/apache/hadoop/fs/shell/Touch.java  |  84 ++
 .../java/org/apache/hadoop/fs/shell/Touchz.java |  84 --
 .../hadoop/io/compress/bzip2/Bzip2Factory.java  |   2 +-
 .../org/apache/hadoop/jmx/package-info.java     |   5 +-
 .../org/apache/hadoop/util/CpuTimeTracker.java  | 115 +++
 .../java/org/apache/hadoop/util/SysInfo.java    | 137 +++
 .../org/apache/hadoop/util/SysInfoLinux.java    | 690 +++++++++++++++
 .../org/apache/hadoop/util/SysInfoWindows.java  | 208 +++++
 .../src/site/markdown/DeprecatedProperties.md   |   1 +
 ...yptoStreamsWithOpensslAesCtrCryptoCodec.java |   3 +
 .../hadoop/fs/sftp/TestSFTPFileSystem.java      | 308 +++++++
 .../apache/hadoop/util/TestSysInfoLinux.java    | 432 ++++++++++
 .../apache/hadoop/util/TestSysInfoWindows.java  | 100 +++
 .../dev-support/findbugsExcludeFile.xml         |   4 +
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |  42 +
 .../hdfs/client/HdfsClientConfigKeys.java       |   3 +-
 .../org/apache/hadoop/hdfs/inotify/Event.java   |  37 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java    | 606 ++++++-------
 .../src/main/proto/ClientDatanodeProtocol.proto | 247 ++++++
 .../src/main/proto/ClientNamenodeProtocol.proto | 863 +++++++++++++++++++
 .../hadoop-hdfs-client/src/main/proto/acl.proto | 108 +++
 .../src/main/proto/datatransfer.proto           | 304 +++++++
 .../src/main/proto/encryption.proto             |  67 ++
 .../src/main/proto/hdfs.proto                   | 619 +++++++++++++
 .../src/main/proto/inotify.proto                | 133 +++
 .../src/main/proto/xattr.proto                  |  75 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  41 +-
 hadoop-hdfs-project/hadoop-hdfs/pom.xml         |  10 +-
 .../hadoop-hdfs/src/contrib/bkjournal/pom.xml   |   2 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   1 +
 .../ClientNamenodeProtocolTranslatorPB.java     |   2 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  17 +
 .../hadoop/hdfs/server/balancer/Dispatcher.java |  18 +
 .../hdfs/server/blockmanagement/BlockInfo.java  |   7 +-
 .../blockmanagement/BlockInfoContiguous.java    |   9 +-
 .../BlockInfoUnderConstruction.java             |  22 +-
 .../BlockInfoUnderConstructionContiguous.java   |  13 +-
 .../server/blockmanagement/BlockManager.java    | 141 +--
 .../hdfs/server/blockmanagement/BlocksMap.java  |   4 +-
 .../ContiguousBlockStorageOp.java               |   7 +-
 .../blockmanagement/CorruptReplicasMap.java     |  62 +-
 .../hdfs/server/datanode/BlockReceiver.java     |  34 +-
 .../server/datanode/fsdataset/FsVolumeSpi.java  |  32 +-
 .../apache/hadoop/hdfs/server/mover/Mover.java  |  27 +-
 .../hdfs/server/namenode/FSDirConcatOp.java     |   2 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |   6 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   2 +-
 .../hdfs/server/namenode/FSEditLogOp.java       |   4 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  33 +-
 .../namenode/InotifyFSEditLogOpTranslator.java  |   4 +
 .../hdfs/server/namenode/NamenodeFsck.java      |  12 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  10 +-
 .../src/main/proto/ClientDatanodeProtocol.proto | 247 ------
 .../src/main/proto/ClientNamenodeProtocol.proto | 863 -------------------
 .../hadoop-hdfs/src/main/proto/acl.proto        | 113 ---
 .../src/main/proto/datatransfer.proto           | 304 -------
 .../hadoop-hdfs/src/main/proto/editlog.proto    |  35 +
 .../hadoop-hdfs/src/main/proto/encryption.proto |  67 --
 .../hadoop-hdfs/src/main/proto/hdfs.proto       | 619 -------------
 .../hadoop-hdfs/src/main/proto/inotify.proto    | 126 ---
 .../hadoop-hdfs/src/main/proto/xattr.proto      |  80 --
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |   6 +-
 .../hdfs/TestDFSInotifyEventInputStream.java    |  17 +-
 .../hadoop/hdfs/TestDistributedFileSystem.java  |  43 +-
 .../java/org/apache/hadoop/hdfs/TestQuota.java  |  21 +
 .../blockmanagement/BlockManagerTestUtil.java   |   7 +-
 .../server/blockmanagement/TestBlockInfo.java   |  10 +-
 .../blockmanagement/TestBlockManager.java       |  10 +-
 .../blockmanagement/TestCorruptReplicaInfo.java |  15 +-
 .../hadoop/hdfs/server/mover/TestMover.java     |   2 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |  21 +-
 .../hdfs/server/namenode/TestINodeFile.java     |   9 +
 .../hdfs/tools/TestDelegationTokenFetcher.java  |  39 +
 hadoop-mapreduce-project/CHANGES.txt            |  10 +
 .../dev-support/findbugs-exclude.xml            |   1 +
 .../mapreduce/v2/app/job/impl/JobImpl.java      |   4 +-
 .../v2/app/job/impl/TaskAttemptImpl.java        |   4 +-
 .../mapreduce/v2/app/job/impl/TaskImpl.java     |   4 +-
 .../src/site/markdown/MapReduceTutorial.md      |   2 +-
 .../hadoop/mapred/TestShuffleHandler.java       |   6 +-
 hadoop-maven-plugins/pom.xml                    |   8 +
 .../hadoop/maven/plugin/protoc/ProtocMojo.java  | 188 +++-
 hadoop-project/pom.xml                          |   5 +
 .../gridmix/DummyResourceCalculatorPlugin.java  |  36 +
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |   8 +-
 .../yarn/sls/scheduler/RMNodeWrapper.java       |   5 +
 hadoop-yarn-project/CHANGES.txt                 |  38 +
 .../yarn/server/api/ContainerContext.java       |  19 +
 .../api/ContainerInitializationContext.java     |   7 +
 .../server/api/ContainerTerminationContext.java |   7 +
 .../hadoop/yarn/server/api/ContainerType.java   |  34 +
 .../apache/hadoop/yarn/util/package-info.java   |   2 -
 .../src/main/proto/yarn_protos.proto            |   5 +
 .../api/async/impl/NMClientAsyncImpl.java       |   4 +-
 .../yarn/client/TestResourceTrackerOnHA.java    |   2 +-
 .../yarn/api/records/impl/pb/ProtoUtils.java    |  12 +
 .../yarn/client/api/impl/package-info.java      |   4 -
 .../hadoop/yarn/client/api/package-info.java    |   4 -
 .../hadoop/yarn/factories/package-info.java     |   2 -
 .../yarn/factory/providers/package-info.java    |   2 -
 .../yarn/security/ContainerTokenIdentifier.java |  43 +-
 .../state/InvalidStateTransitionException.java  |  51 ++
 .../state/InvalidStateTransitonException.java   |  21 +-
 .../apache/hadoop/yarn/state/StateMachine.java  |   2 +-
 .../hadoop/yarn/state/StateMachineFactory.java  |  10 +-
 .../apache/hadoop/yarn/util/CpuTimeTracker.java | 100 ---
 .../util/LinuxResourceCalculatorPlugin.java     | 392 +--------
 .../yarn/util/ProcfsBasedProcessTree.java       |  34 +-
 .../yarn/util/ResourceCalculatorPlugin.java     | 107 ++-
 .../yarn/util/WindowsBasedProcessTree.java      |   2 +-
 .../util/WindowsResourceCalculatorPlugin.java   | 158 +---
 .../main/proto/server/yarn_security_token.proto |   1 +
 .../src/main/resources/yarn-default.xml         | 466 +++++++++-
 .../yarn/conf/TestYarnConfigurationFields.java  |  44 +-
 .../yarn/security/TestYARNTokenIdentifier.java  |  53 ++
 .../util/TestLinuxResourceCalculatorPlugin.java | 324 -------
 .../util/TestResourceCalculatorProcessTree.java |   2 +-
 .../TestWindowsResourceCalculatorPlugin.java    |  86 --
 .../yarn/server/api/records/NodeStatus.java     |  43 +-
 .../server/api/records/ResourceUtilization.java | 133 +++
 .../api/records/impl/pb/NodeStatusPBImpl.java   |  34 +-
 .../impl/pb/ResourceUtilizationPBImpl.java      | 104 +++
 .../yarn/server/api/records/package-info.java   |  19 +
 .../main/proto/yarn_server_common_protos.proto  |   7 +
 .../nodemanager/NodeStatusUpdaterImpl.java      |  19 +-
 .../containermanager/AuxServices.java           |   6 +-
 .../application/ApplicationImpl.java            |   4 +-
 .../container/ContainerImpl.java                |   4 +-
 .../localizer/LocalizedResource.java            |   4 +-
 .../monitor/ContainersMonitor.java              |   3 +-
 .../monitor/ContainersMonitorImpl.java          |  28 +
 .../TestPrivilegedOperationExecutor.java        |   6 +-
 .../ProportionalCapacityPreemptionPolicy.java   |   4 +-
 .../resourcemanager/recovery/RMStateStore.java  |   4 +-
 .../recovery/ZKRMStateStore.java                |   3 +-
 .../reservation/GreedyReservationAgent.java     |  27 +-
 .../reservation/InMemoryPlan.java               |   9 +-
 .../InMemoryReservationAllocation.java          |  24 +-
 .../RLESparseResourceAllocation.java            |  43 +-
 .../reservation/ReservationAllocation.java      |   3 +-
 .../reservation/ReservationSystemUtil.java      |  51 ++
 .../server/resourcemanager/rmapp/RMAppImpl.java |   4 +-
 .../rmapp/attempt/RMAppAttemptImpl.java         |   4 +-
 .../rmcontainer/RMContainerImpl.java            |   4 +-
 .../server/resourcemanager/rmnode/RMNode.java   |   2 +
 .../resourcemanager/rmnode/RMNodeImpl.java      |  47 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  17 +-
 .../scheduler/capacity/LeafQueue.java           |   3 +-
 .../scheduler/capacity/ParentQueue.java         |   2 +-
 .../scheduler/fair/FSLeafQueue.java             |   9 +-
 .../scheduler/fair/FairScheduler.java           |  40 +-
 .../scheduler/fair/SchedulingPolicy.java        |  11 +
 .../DominantResourceFairnessPolicy.java         |  18 +-
 .../fair/policies/FairSharePolicy.java          |  11 +-
 .../scheduler/fair/policies/FifoPolicy.java     |  15 +-
 .../security/RMContainerTokenSecretManager.java |  13 +-
 .../resourcemanager/webapp/RMAppsBlock.java     |  10 +-
 .../server/resourcemanager/Application.java     |  12 +
 .../yarn/server/resourcemanager/MockNodes.java  |   5 +
 .../resourcemanager/TestRMNodeTransitions.java  |  36 +-
 ...estProportionalCapacityPreemptionPolicy.java | 253 ++++--
 ...pacityPreemptionPolicyForNodePartitions.java | 114 ++-
 .../reservation/ReservationSystemTestUtil.java  |  11 +-
 .../reservation/TestCapacityOverTimePolicy.java |  16 +-
 .../reservation/TestGreedyReservationAgent.java |   2 +-
 .../reservation/TestInMemoryPlan.java           |  37 +-
 .../TestInMemoryReservationAllocation.java      |  29 +-
 .../TestRLESparseResourceAllocation.java        |  33 +-
 .../TestSimpleCapacityReplanner.java            |  11 +-
 .../capacity/TestContainerAllocation.java       |   5 +-
 .../scheduler/capacity/TestQueueParsing.java    |  38 +
 .../scheduler/fair/FairSchedulerTestBase.java   |  31 +-
 .../scheduler/fair/TestFSLeafQueue.java         |  64 ++
 .../scheduler/fair/TestFairScheduler.java       | 207 ++++-
 .../scheduler/fifo/TestFifoScheduler.java       |   4 +
 185 files changed, 9070 insertions(+), 4678 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6da5a33b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
index 0000000,86fb462..2df5955
mode 000000,100644..100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
@@@ -1,0 -1,611 +1,619 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ /**
+  * These .proto interfaces are private and stable.
+  * Please see http://wiki.apache.org/hadoop/Compatibility
+  * for what changes are allowed for a *stable* .proto interface.
+  */
+ 
+ // This file contains protocol buffers that are used throughout HDFS -- i.e.
+ // by the client, server, and data transfer protocols.
+ 
+ 
+ option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+ option java_outer_classname = "HdfsProtos";
+ option java_generate_equals_and_hash = true;
+ package hadoop.hdfs;
+ 
+ import "Security.proto";
+ 
+ /**
+  * Extended block idenfies a block
+  */
+ message ExtendedBlockProto {
+   required string poolId = 1;   // Block pool id - gloablly unique across clusters
+   required uint64 blockId = 2;  // the local id within a pool
+   required uint64 generationStamp = 3;
+   optional uint64 numBytes = 4 [default = 0];  // len does not belong in ebid 
+                                                // here for historical reasons
+ }
+ 
+ /**
+  * Identifies a Datanode
+  */
+ message DatanodeIDProto {
+   required string ipAddr = 1;    // IP address
+   required string hostName = 2;  // hostname
+   required string datanodeUuid = 3;     // UUID assigned to the Datanode. For
+                                         // upgraded clusters this is the same
+                                         // as the original StorageID of the
+                                         // Datanode.
+   required uint32 xferPort = 4;  // data streaming port
+   required uint32 infoPort = 5;  // datanode http port
+   required uint32 ipcPort = 6;   // ipc server port
+   optional uint32 infoSecurePort = 7 [default = 0]; // datanode https port
+ }
+ 
+ /**
+  * Datanode local information
+  */
+ message DatanodeLocalInfoProto {
+   required string softwareVersion = 1;
+   required string configVersion = 2;
+   required uint64 uptime = 3;
+ }
+ 
+ /**
+  * DatanodeInfo array
+  */
+ message DatanodeInfosProto {
+   repeated DatanodeInfoProto datanodes = 1;
+ }
+ 
+ /**
+  * The status of a Datanode
+  */
+ message DatanodeInfoProto {
+   required DatanodeIDProto id = 1;
+   optional uint64 capacity = 2 [default = 0];
+   optional uint64 dfsUsed = 3 [default = 0];
+   optional uint64 remaining = 4 [default = 0];
+   optional uint64 blockPoolUsed = 5 [default = 0];
+   optional uint64 lastUpdate = 6 [default = 0];
+   optional uint32 xceiverCount = 7 [default = 0];
+   optional string location = 8;
+   enum AdminState {
+     NORMAL = 0;
+     DECOMMISSION_INPROGRESS = 1;
+     DECOMMISSIONED = 2;
+   }
+ 
+   optional AdminState adminState = 10 [default = NORMAL];
+   optional uint64 cacheCapacity = 11 [default = 0];
+   optional uint64 cacheUsed = 12 [default = 0];
+   optional uint64 lastUpdateMonotonic = 13 [default = 0];
+ }
+ 
+ /**
+  * Represents a storage available on the datanode
+  */
+ message DatanodeStorageProto {
+   enum StorageState {
+     NORMAL = 0;
+     READ_ONLY_SHARED = 1;
+   }
+ 
+   required string storageUuid = 1;
+   optional StorageState state = 2 [default = NORMAL];
+   optional StorageTypeProto storageType = 3 [default = DISK];
+ }
+ 
+ message StorageReportProto {
+   required string storageUuid = 1 [ deprecated = true ];
+   optional bool failed = 2 [ default = false ];
+   optional uint64 capacity = 3 [ default = 0 ];
+   optional uint64 dfsUsed = 4 [ default = 0 ];
+   optional uint64 remaining = 5 [ default = 0 ];
+   optional uint64 blockPoolUsed = 6 [ default = 0 ];
+   optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
+ }
+ 
+ /**
+  * Summary of a file or directory
+  */
+ message ContentSummaryProto {
+   required uint64 length = 1;
+   required uint64 fileCount = 2;
+   required uint64 directoryCount = 3;
+   required uint64 quota = 4;
+   required uint64 spaceConsumed = 5;
+   required uint64 spaceQuota = 6;
+   optional StorageTypeQuotaInfosProto typeQuotaInfos = 7;
+ }
+ 
+ /**
+  * Storage type quota and usage information of a file or directory
+  */
+ message StorageTypeQuotaInfosProto {
+   repeated StorageTypeQuotaInfoProto typeQuotaInfo = 1;
+ }
+ 
+ message StorageTypeQuotaInfoProto {
+   required StorageTypeProto type = 1;
+   required uint64 quota = 2;
+   required uint64 consumed = 3;
+ }
+ 
+ /**
+  * Contains a list of paths corresponding to corrupt files and a cookie
+  * used for iterative calls to NameNode.listCorruptFileBlocks.
+  *
+  */
+ message CorruptFileBlocksProto {
+  repeated string files = 1;
+  required string   cookie = 2;
+ }
+ 
+ /**
+  * File or Directory permision - same spec as posix
+  */
+ message FsPermissionProto {
+   required uint32 perm = 1;       // Actually a short - only 16bits used
+ }
+ 
+ /**
+  * Types of recognized storage media.
+  */
+ enum StorageTypeProto {
+   DISK = 1;
+   SSD = 2;
+   ARCHIVE = 3;
+   RAM_DISK = 4;
+ }
+ 
+ /**
+  * A list of storage types. 
+  */
+ message StorageTypesProto {
+   repeated StorageTypeProto storageTypes = 1;
+ }
+ 
+ /**
+  * Block replica storage policy.
+  */
+ message BlockStoragePolicyProto {
+   required uint32 policyId = 1;
+   required string name = 2;
+   // a list of storage types for storing the block replicas when creating a
+   // block.
+   required StorageTypesProto creationPolicy = 3;
+   // A list of storage types for creation fallback storage.
+   optional StorageTypesProto creationFallbackPolicy = 4;
+   optional StorageTypesProto replicationFallbackPolicy = 5;
+ }
+ 
+ /**
+  * A list of storage IDs. 
+  */
+ message StorageUuidsProto {
+   repeated string storageUuids = 1;
+ }
+ 
+ /**
+  * A LocatedBlock gives information about a block and its location.
+  */ 
+ message LocatedBlockProto {
+   required ExtendedBlockProto b  = 1;
+   required uint64 offset = 2;           // offset of first byte of block in the file
+   repeated DatanodeInfoProto locs = 3;  // Locations ordered by proximity to client ip
+   required bool corrupt = 4;            // true if all replicas of a block are corrupt, else false
+                                         // If block has few corrupt replicas, they are filtered and 
+                                         // their locations are not part of this object
+ 
+   required hadoop.common.TokenProto blockToken = 5;
+   repeated bool isCached = 6 [packed=true]; // if a location in locs is cached
+   repeated StorageTypeProto storageTypes = 7;
+   repeated string storageIDs = 8;
+ }
+ 
+ message DataEncryptionKeyProto {
+   required uint32 keyId = 1;
+   required string blockPoolId = 2;
+   required bytes nonce = 3;
+   required bytes encryptionKey = 4;
+   required uint64 expiryDate = 5;
+   optional string encryptionAlgorithm = 6;
+ }
+ 
+ /**
+  * Cipher suite.
+  */
+ enum CipherSuiteProto {
+     UNKNOWN = 1;
+     AES_CTR_NOPADDING = 2;
+ }
+ 
+ /**
+  * Crypto protocol version used to access encrypted files.
+  */
+ enum CryptoProtocolVersionProto {
+     UNKNOWN_PROTOCOL_VERSION = 1;
+     ENCRYPTION_ZONES = 2;
+ }
+ 
+ /**
+  * Encryption information for a file.
+  */
+ message FileEncryptionInfoProto {
+   required CipherSuiteProto suite = 1;
+   required CryptoProtocolVersionProto cryptoProtocolVersion = 2;
+   required bytes key = 3;
+   required bytes iv = 4;
+   required string keyName = 5;
+   required string ezKeyVersionName = 6;
+ }
+ 
+ /**
+  * Encryption information for an individual
+  * file within an encryption zone
+  */
+ message PerFileEncryptionInfoProto {
+   required bytes key = 1;
+   required bytes iv = 2;
+   required string ezKeyVersionName = 3;
+ }
+ 
+ /**
+  * Encryption information for an encryption
+  * zone
+  */
+ message ZoneEncryptionInfoProto {
+   required CipherSuiteProto suite = 1;
+   required CryptoProtocolVersionProto cryptoProtocolVersion = 2;
+   required string keyName = 3;
+ }
+ 
+ /**
+  * Cipher option
+  */
+ message CipherOptionProto {
+   required CipherSuiteProto suite = 1;
+   optional bytes inKey = 2;
+   optional bytes inIv = 3;
+   optional bytes outKey = 4;
+   optional bytes outIv = 5;
+ }
+ 
+ /**
+  * A set of file blocks and their locations.
+  */
+ message LocatedBlocksProto {
+   required uint64 fileLength = 1;
+   repeated LocatedBlockProto blocks = 2;
+   required bool underConstruction = 3;
+   optional LocatedBlockProto lastBlock = 4;
+   required bool isLastBlockComplete = 5;
+   optional FileEncryptionInfoProto fileEncryptionInfo = 6;
+ }
+ 
+ /**
+  * Status of a file, directory or symlink
+  * Optionally includes a file's block locations if requested by client on the rpc call.
+  */
+ message HdfsFileStatusProto {
+   enum FileType {
+     IS_DIR = 1;
+     IS_FILE = 2;
+     IS_SYMLINK = 3;
+   }
+   required FileType fileType = 1;
+   required bytes path = 2;          // local name of inode encoded java UTF8
+   required uint64 length = 3;
+   required FsPermissionProto permission = 4;
+   required string owner = 5;
+   required string group = 6;
+   required uint64 modification_time = 7;
+   required uint64 access_time = 8;
+ 
+   // Optional fields for symlink
+   optional bytes symlink = 9;             // if symlink, target encoded java UTF8 
+ 
+   // Optional fields for file
+   optional uint32 block_replication = 10 [default = 0]; // only 16bits used
+   optional uint64 blocksize = 11 [default = 0];
+   optional LocatedBlocksProto locations = 12;  // suppled only if asked by client
+ 
+   // Optional field for fileId
+   optional uint64 fileId = 13 [default = 0]; // default as an invalid id
+   optional int32 childrenNum = 14 [default = -1];
+   // Optional field for file encryption
+   optional FileEncryptionInfoProto fileEncryptionInfo = 15;
+ 
+   optional uint32 storagePolicy = 16 [default = 0]; // block storage policy id
+ } 
+ 
+ /**
+  * Checksum algorithms/types used in HDFS
+  * Make sure this enum's integer values match enum values' id properties defined
+  * in org.apache.hadoop.util.DataChecksum.Type
+  */
+ enum ChecksumTypeProto {
+   CHECKSUM_NULL = 0;
+   CHECKSUM_CRC32 = 1;
+   CHECKSUM_CRC32C = 2;
+ }
+ 
+ /**
+  * HDFS Server Defaults
+  */
+ message FsServerDefaultsProto {
+   required uint64 blockSize = 1;
+   required uint32 bytesPerChecksum = 2;
+   required uint32 writePacketSize = 3;
+   required uint32 replication = 4; // Actually a short - only 16 bits used
+   required uint32 fileBufferSize = 5;
+   optional bool encryptDataTransfer = 6 [default = false];
+   optional uint64 trashInterval = 7 [default = 0];
+   optional ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
+ }
+ 
+ 
+ /**
+  * Directory listing
+  */
+ message DirectoryListingProto {
+   repeated HdfsFileStatusProto partialListing = 1;
+   required uint32 remainingEntries  = 2;
+ }
+ 
+ /**
+  * Status of a snapshottable directory: besides the normal information for 
+  * a directory status, also include snapshot quota, number of snapshots, and
+  * the full path of the parent directory. 
+  */
+ message SnapshottableDirectoryStatusProto {
+   required HdfsFileStatusProto dirStatus = 1;
+ 
+   // Fields specific for snapshottable directory
+   required uint32 snapshot_quota = 2;
+   required uint32 snapshot_number = 3;
+   required bytes parent_fullpath = 4;
+ }
+ 
+ /**
+  * Snapshottable directory listing
+  */
+ message SnapshottableDirectoryListingProto {
+   repeated SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
+ }
+ 
+ /**
+  * Snapshot diff report entry
+  */
+ message SnapshotDiffReportEntryProto {
+   required bytes fullpath = 1;
+   required string modificationLabel = 2;
+   optional bytes targetPath = 3;
+ }
+ 
+ /**
+  * Snapshot diff report
+  */
+ message SnapshotDiffReportProto {
+   // full path of the directory where snapshots were taken
+   required string snapshotRoot = 1;
+   required string fromSnapshot = 2;
+   required string toSnapshot = 3;
+   repeated SnapshotDiffReportEntryProto diffReportEntries = 4;
+ }
+ 
+ /**
+  * Common node information shared by all the nodes in the cluster
+  */
+ message StorageInfoProto {
+   required uint32 layoutVersion = 1; // Layout version of the file system
+   required uint32 namespceID = 2;    // File system namespace ID
+   required string clusterID = 3;     // ID of the cluster
+   required uint64 cTime = 4;         // File system creation time
++
++  enum NodeTypeProto {
++    NAME_NODE = 1;
++    DATA_NODE = 2;
++    JOURNAL_NODE = 3;
++    STORAGE_CONTAINER_SERVICE = 4;
++  }
++  optional NodeTypeProto nodeType = 5;
+ }
+ 
+ /**
+  * Information sent by a namenode to identify itself to the primary namenode.
+  */
+ message NamenodeRegistrationProto {
+   required string rpcAddress = 1;    // host:port of the namenode RPC address
+   required string httpAddress = 2;   // host:port of the namenode http server
+   enum NamenodeRoleProto {
+     NAMENODE = 1;
+     BACKUP = 2;
+     CHECKPOINT = 3;
+   }
+   required StorageInfoProto storageInfo = 3;  // Node information
+   optional NamenodeRoleProto role = 4 [default = NAMENODE];        // Namenode role
+ }
+ 
+ /**
+  * Unique signature to identify checkpoint transactions.
+  */
+ message CheckpointSignatureProto {
+   required string blockPoolId = 1;
+   required uint64 mostRecentCheckpointTxId = 2;
+   required uint64 curSegmentTxId = 3;
+   required StorageInfoProto storageInfo = 4;
+ }
+ 
+ /**
+  * Command sent from one namenode to another namenode.
+  */
+ message NamenodeCommandProto {
+   enum Type {
+     NamenodeCommand = 0;      // Base command
+     CheckPointCommand = 1;    // Check point command
+   }
+   required uint32 action = 1;
+   required Type type = 2;
+   optional CheckpointCommandProto checkpointCmd = 3;
+ }
+ 
+ /**
+  * Command returned from primary to checkpointing namenode.
+  * This command has checkpoint signature that identifies
+  * checkpoint transaction and is needed for further
+  * communication related to checkpointing.
+  */
+ message CheckpointCommandProto {
+   // Unique signature to identify checkpoint transation
+   required CheckpointSignatureProto signature = 1; 
+ 
+   // If true, return transfer image to primary upon the completion of checkpoint
+   required bool needToReturnImage = 2;
+ }
+ 
+ /**
+  * Block information
+  *
+  * Please be wary of adding additional fields here, since INodeFiles
+  * need to fit in PB's default max message size of 64MB.
+  * We restrict the max # of blocks per file
+  * (dfs.namenode.fs-limits.max-blocks-per-file), but it's better
+  * to avoid changing this.
+  */
+ message BlockProto {
+   required uint64 blockId = 1;
+   required uint64 genStamp = 2;
+   optional uint64 numBytes = 3 [default = 0];
+ }
+ 
+ /**
+  * Block and datanodes where is it located
+  */
+ message BlockWithLocationsProto {
+   required BlockProto block = 1;   // Block
+   repeated string datanodeUuids = 2; // Datanodes with replicas of the block
+   repeated string storageUuids = 3;  // Storages with replicas of the block
+   repeated StorageTypeProto storageTypes = 4;
+ }
+ 
+ /**
+  * List of block with locations
+  */
+ message BlocksWithLocationsProto {
+   repeated BlockWithLocationsProto blocks = 1;
+ }
+ 
+ /**
+  * Editlog information with available transactions
+  */
+ message RemoteEditLogProto {
+   required uint64 startTxId = 1;  // Starting available edit log transaction
+   required uint64 endTxId = 2;    // Ending available edit log transaction
+   optional bool isInProgress = 3 [default = false];
+ }
+ 
+ /**
+  * Enumeration of editlogs available on a remote namenode
+  */
+ message RemoteEditLogManifestProto {
+   repeated RemoteEditLogProto logs = 1;
+ }
+ 
+ /**
+  * Namespace information that describes namespace on a namenode
+  */
+ message NamespaceInfoProto {
+   required string buildVersion = 1;         // Software revision version (e.g. an svn or git revision)
+   required uint32 unused = 2;               // Retained for backward compatibility
+   required string blockPoolID = 3;          // block pool used by the namespace
+   required StorageInfoProto storageInfo = 4;// Node information
+   required string softwareVersion = 5;      // Software version number (e.g. 2.0.0)
+   optional uint64 capabilities = 6 [default = 0]; // feature flags
+ }
+ 
+ /**
+  * Block access token information
+  */
+ message BlockKeyProto {
+   required uint32 keyId = 1;      // Key identifier
+   required uint64 expiryDate = 2; // Expiry time in milliseconds
+   optional bytes keyBytes = 3;    // Key secret
+ }
+ 
+ /**
+  * Current key and set of block keys at the namenode.
+  */
+ message ExportedBlockKeysProto {
+   required bool isBlockTokenEnabled = 1;
+   required uint64 keyUpdateInterval = 2;
+   required uint64 tokenLifeTime = 3;
+   required BlockKeyProto currentKey = 4;
+   repeated BlockKeyProto allKeys = 5;
+ }
+ 
+ /**
+  * State of a block replica at a datanode
+  */
+ enum ReplicaStateProto {
+   FINALIZED = 0;  // State of a replica when it is not modified
+   RBW = 1;        // State of replica that is being written to
+   RWR = 2;        // State of replica that is waiting to be recovered
+   RUR = 3;        // State of replica that is under recovery
+   TEMPORARY = 4;  // State of replica that is created for replication
+ }
+ 
+ /**
+  * Block that needs to be recovered with at a given location
+  */
+ message RecoveringBlockProto {
+   required uint64 newGenStamp = 1;        // New genstamp post recovery
+   required LocatedBlockProto block = 2;   // Block to be recovered
+   optional BlockProto truncateBlock = 3;  // New block for recovery (truncate)
+ }
+ 
+ /**
+  * void request
+  */
+ message VersionRequestProto {
+ }
+ 
+ /**
+  * Version response from namenode.
+  */
+ message VersionResponseProto {
+   required NamespaceInfoProto info = 1;
+ }
+ 
+ /**
+  * Information related to a snapshot
+  * TODO: add more information
+  */
+ message SnapshotInfoProto {
+   required string snapshotName = 1;
+   required string snapshotRoot = 2;
+   required FsPermissionProto permission = 3;
+   required string owner = 4;
+   required string group = 5;
+   required string createTime = 6;
+   // TODO: do we need access time?
+ }
+ 
+ /**
+  * Rolling upgrade status
+  */
+ message RollingUpgradeStatusProto {
+   required string blockPoolId = 1;
+   optional bool finalized = 2 [default = false];
+ }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6da5a33b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6da5a33b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6da5a33b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6da5a33b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6da5a33b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6da5a33b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
index 64ad701,ee01924..d58ce79
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
@@@ -42,9 -38,29 +42,9 @@@ public interface FsVolumeSpi extends Vo
     */
    FsVolumeReference obtainReference() throws ClosedChannelException;
  
 -  /** @return the StorageUuid of the volume */
 -  String getStorageID();
 -
 -  /** @return a list of block pools. */
 -  String[] getBlockPoolList();
 -
 -  /** @return the available storage space in bytes. */
 -  long getAvailable() throws IOException;
 -
 -  /** @return the base path to the volume */
 -  String getBasePath();
 -
 -  /** @return the path to the volume */
 -  String getPath(String bpid) throws IOException;
 -
    /** @return the directory for the finalized blocks in the block pool. */
-   public File getFinalizedDir(String bpid) throws IOException;
+   File getFinalizedDir(String bpid) throws IOException;
    
 -  StorageType getStorageType();
 -
 -  /** Returns true if the volume is NOT backed by persistent storage. */
 -  boolean isTransientStorage();
 -
    /**
     * Reserve disk space for an RBW block so a writer does not run out of
     * space before the block is full.
@@@ -164,9 -180,10 +164,9 @@@
     * @throws IOException     If there was an IO error loading the saved
     *                           block iterator.
     */
-   public BlockIterator loadBlockIterator(String bpid, String name)
 -  BlockIterator loadBlockIterator(String bpid, String name) throws IOException;
++  BlockIterator loadBlockIterator(String bpid, String name)
 +      throws IOException;
  
 -  /**
 -   * Get the FSDatasetSpi which this volume is a part of.
 -   */
 +  @Override
-   public FsDatasetSpi getDataset();
+   FsDatasetSpi getDataset();
  }