You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by aa...@apache.org on 2018/10/31 05:45:58 UTC
[2/2] hadoop git commit: HDFS-13942. [JDK10] Fix javadoc errors in
hadoop-hdfs module. Contributed by Dinesh Chitlangia.
HDFS-13942. [JDK10] Fix javadoc errors in hadoop-hdfs module. Contributed by Dinesh Chitlangia.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fac9f91b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fac9f91b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fac9f91b
Branch: refs/heads/trunk
Commit: fac9f91b2944cee641049fffcafa6b65e0cf68f2
Parents: e4f22b0
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed Oct 31 14:43:58 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed Oct 31 14:43:58 2018 +0900
----------------------------------------------------------------------
.../java/org/apache/hadoop/hdfs/DFSUtil.java | 12 ++--
.../hadoop/hdfs/protocol/BlockListAsLongs.java | 2 +-
.../QJournalProtocolServerSideTranslatorPB.java | 2 +-
.../token/block/BlockTokenSecretManager.java | 2 +-
.../hadoop/hdfs/server/balancer/Balancer.java | 15 ++---
.../server/blockmanagement/BlockManager.java | 26 +++++----
.../blockmanagement/BlockPlacementPolicy.java | 1 -
.../CombinedHostFileManager.java | 6 +-
.../blockmanagement/CorruptReplicasMap.java | 2 +-
.../blockmanagement/DatanodeAdminManager.java | 8 +--
.../server/blockmanagement/HostFileManager.java | 7 +--
.../hdfs/server/blockmanagement/HostSet.java | 8 +--
.../server/blockmanagement/SlowPeerTracker.java | 5 +-
.../server/datanode/BlockPoolSliceStorage.java | 60 ++++++++++++--------
.../server/datanode/BlockRecoveryWorker.java | 15 +++--
.../hdfs/server/datanode/BlockScanner.java | 6 +-
.../hadoop/hdfs/server/datanode/DataNode.java | 10 ++--
.../hdfs/server/datanode/DataStorage.java | 4 +-
.../hdfs/server/datanode/DirectoryScanner.java | 1 -
.../hdfs/server/datanode/FileIoProvider.java | 3 -
.../hdfs/server/datanode/VolumeScanner.java | 4 +-
.../server/datanode/checker/AbstractFuture.java | 13 ++---
.../server/datanode/fsdataset/FsDatasetSpi.java | 12 ++--
.../server/datanode/fsdataset/FsVolumeSpi.java | 13 +++--
.../datanode/metrics/OutlierDetector.java | 3 +-
.../diskbalancer/DiskBalancerException.java | 1 -
.../datamodel/DiskBalancerCluster.java | 11 ++--
.../datamodel/DiskBalancerDataNode.java | 10 ++--
.../diskbalancer/planner/GreedyPlanner.java | 2 +-
.../hadoop/hdfs/server/namenode/AclStorage.java | 18 +++---
.../server/namenode/EncryptionZoneManager.java | 42 +++++++++-----
.../hdfs/server/namenode/FSDirectory.java | 8 +--
.../hdfs/server/namenode/FSNamesystem.java | 24 ++++++--
.../hadoop/hdfs/server/namenode/INode.java | 4 +-
.../hdfs/server/namenode/INodeReference.java | 6 +-
.../hdfs/server/namenode/INodesInPath.java | 4 +-
.../hdfs/server/namenode/JournalManager.java | 2 +-
.../hdfs/server/namenode/LeaseManager.java | 2 +-
.../server/namenode/MetaRecoveryContext.java | 2 +-
.../hadoop/hdfs/server/namenode/NameNode.java | 6 +-
.../hdfs/server/namenode/NamenodeFsck.java | 9 ++-
.../hadoop/hdfs/server/namenode/Quota.java | 5 +-
.../server/namenode/ReencryptionHandler.java | 2 +-
.../server/namenode/XAttrPermissionFilter.java | 4 +-
.../hdfs/server/namenode/XAttrStorage.java | 8 +--
.../snapshot/AbstractINodeDiffList.java | 8 +--
.../namenode/snapshot/DiffListBySkipList.java | 9 +--
.../sps/BlockStorageMovementNeeded.java | 5 +-
.../namenode/sps/DatanodeCacheManager.java | 2 +-
.../sps/StoragePolicySatisfyManager.java | 14 +++--
.../startupprogress/StartupProgressView.java | 4 +-
.../server/namenode/top/metrics/TopMetrics.java | 17 ++++--
.../namenode/top/window/RollingWindow.java | 18 +++---
.../top/window/RollingWindowManager.java | 2 +-
.../protocol/BlockStorageMovementCommand.java | 11 ++--
.../hdfs/server/protocol/DatanodeProtocol.java | 2 +-
.../hdfs/server/protocol/NamenodeProtocol.java | 5 +-
.../sps/ExternalSPSBlockMoveTaskHandler.java | 2 +
.../org/apache/hadoop/hdfs/tools/DFSck.java | 13 +++--
.../offlineEditsViewer/OfflineEditsViewer.java | 4 +-
.../offlineEditsViewer/OfflineEditsVisitor.java | 2 +-
.../StatisticsEditsVisitor.java | 4 +-
.../NameDistributionVisitor.java | 4 +-
.../java/org/apache/hadoop/hdfs/util/Diff.java | 16 +++---
.../org/apache/hadoop/hdfs/util/XMLUtils.java | 4 +-
65 files changed, 310 insertions(+), 246 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 6dd366f..8627268 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -160,7 +160,8 @@ public class DFSUtil {
/**
* Comparator for sorting DataNodeInfo[] based on
* stale, decommissioned and entering_maintenance states.
- * Order: live -> stale -> entering_maintenance -> decommissioned
+ * Order: live {@literal ->} stale {@literal ->} entering_maintenance
+ * {@literal ->} decommissioned
*/
@InterfaceAudience.Private
public static class ServiceAndStaleComparator extends ServiceComparator {
@@ -390,7 +391,8 @@ public class DFSUtil {
* @param conf Configuration
* @param nsId the nameservice whose NNs addresses we want.
* @param defaultValue default address to return in case key is not found.
- * @return A map from nnId -> RPC address of each NN in the nameservice.
+ * @return A map from nnId {@literal ->} RPC address of each NN in the
+ * nameservice.
*/
public static Map<String, InetSocketAddress> getRpcAddressesForNameserviceId(
Configuration conf, String nsId, String defaultValue) {
@@ -1289,7 +1291,8 @@ public class DFSUtil {
* @param conf configuration
* @param protocol Protocol interface
* @param service service that implements the protocol
- * @param server RPC server to which the protocol & implementation is added to
+ * @param server RPC server to which the protocol & implementation is
+ * added to
* @throws IOException
*/
public static void addPBProtocol(Configuration conf, Class<?> protocol,
@@ -1357,7 +1360,8 @@ public class DFSUtil {
* @param conf Configuration
* @param nsId the nameservice whose NNs addresses we want.
* @param defaultValue default address to return in case key is not found.
- * @return A map from nnId -> Web address of each NN in the nameservice.
+ * @return A map from nnId {@literal ->} Web address of each NN in the
+ * nameservice.
*/
public static Map<String, InetSocketAddress> getWebAddressesForNameserviceId(
Configuration conf, String nsId, String defaultValue) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
index 8f482e3..77e40b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
@@ -201,7 +201,7 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica> {
/**
* Very efficient encoding of the block report into a ByteString to avoid
* the overhead of protobuf repeating fields. Primitive repeating fields
- * require re-allocs of an ArrayList<Long> and the associated (un)boxing
+ * require re-allocs of an ArrayList<Long> and the associated (un)boxing
* overhead which puts pressure on GC.
*
* The structure of the buffer is as follows:
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
index 2ad19da..61e8fa3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
@@ -168,7 +168,7 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP
return VOID_JOURNAL_RESPONSE;
}
- /** @see JournalProtocol#heartbeat */
+ /** @see QJournalProtocol#heartbeat */
@Override
public HeartbeatResponseProto heartbeat(RpcController controller,
HeartbeatRequestProto req) throws ServiceException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
index 3b2e8d2..52bc52d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
@@ -220,7 +220,7 @@ public class BlockTokenSecretManager extends
}
/**
- * Update block keys if update time > update interval.
+ * Update block keys if update time {@literal >} update interval.
* @return true if the keys are updated.
*/
public synchronized boolean updateKeys(final long updateTime) throws IOException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index a58e391..d21d13c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -78,7 +78,7 @@ import com.google.common.base.Preconditions;
* <p>SYNOPSIS
* <pre>
* To start:
- * bin/start-balancer.sh [-threshold <threshold>]
+ * bin/start-balancer.sh [-threshold {@literal <threshold>}]
* Example: bin/ start-balancer.sh
* start the balancer with a default threshold of 10%
* bin/ start-balancer.sh -threshold 5
@@ -113,13 +113,14 @@ import com.google.common.base.Preconditions;
* <p>A system property that limits the balancer's use of bandwidth is
* defined in the default configuration file:
* <pre>
- * <property>
- * <name>dfs.datanode.balance.bandwidthPerSec</name>
- * <value>1048576</value>
- * <description> Specifies the maximum bandwidth that each datanode
+ * <property>
+ * <name>dfs.datanode.balance.bandwidthPerSec</name>
+ * <value>1048576</value>
+ * <description> Specifies the maximum bandwidth that each datanode
* can utilize for the balancing purpose in term of the number of bytes
- * per second. </description>
- * </property>
+ * per second.
+ * </description>
+ * </property>
* </pre>
*
* <p>This property determines the maximum speed at which a block will be
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 5e14247..d74b523 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -143,12 +143,13 @@ import org.slf4j.LoggerFactory;
* If any of the replica is in maintenance mode, the safety property
* is extended as follows. These property still apply for the case of zero
* maintenance replicas, thus we can use these safe property for all scenarios.
- * a. # of live replicas >= # of min replication for maintenance.
- * b. # of live replicas <= # of expected redundancy.
- * c. # of live replicas and maintenance replicas >= # of expected redundancy.
+ * a. # of live replicas >= # of min replication for maintenance.
+ * b. # of live replicas <= # of expected redundancy.
+ * c. # of live replicas and maintenance replicas >= # of expected
+ * redundancy.
*
* For regular replication, # of min live replicas for maintenance is determined
- * by DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY. This number has to <=
+ * by DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY. This number has to <=
* DFS_NAMENODE_REPLICATION_MIN_KEY.
* For erasure encoding, # of min live replicas for maintenance is
* BlockInfoStriped#getRealDataBlockNum.
@@ -305,7 +306,7 @@ public class BlockManager implements BlockStatsMXBean {
private final double storageInfoDefragmentRatio;
/**
- * Mapping: Block -> { BlockCollection, datanodes, self ref }
+ * Mapping: Block {@literal ->} { BlockCollection, datanodes, self ref }
* Updated only in response to client-sent information.
*/
final BlocksMap blocksMap;
@@ -321,7 +322,9 @@ public class BlockManager implements BlockStatsMXBean {
private final BlockReportProcessingThread blockReportThread =
new BlockReportProcessingThread();
- /** Store blocks -> datanodedescriptor(s) map of corrupt replicas */
+ /**
+ * Store blocks {@literal ->} datanodedescriptor(s) map of corrupt replicas.
+ */
final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
/**
@@ -2105,7 +2108,7 @@ public class BlockManager implements BlockStatsMXBean {
* Choose target datanodes for creating a new block.
*
* @throws IOException
- * if the number of targets < minimum replication.
+ * if the number of targets {@literal <} minimum replication.
* @see BlockPlacementPolicy#chooseTarget(String, int, Node,
* Set, long, List, BlockStoragePolicy, EnumSet)
*/
@@ -2487,7 +2490,8 @@ public class BlockManager implements BlockStatsMXBean {
/**
* The given storage is reporting all its blocks.
- * Update the (storage-->block list) and (block-->storage list) maps.
+ * Update the (storage{@literal -->}block list) and
+ * (block{@literal -->}storage list) maps.
*
* @return true if all known storages of the given DN have finished reporting.
* @throws IOException
@@ -3777,8 +3781,8 @@ public class BlockManager implements BlockStatsMXBean {
}
/**
- * Modify (block-->datanode) map. Possibly generate replication tasks, if the
- * removed block is still valid.
+ * Modify (block{@literal -->}datanode) map. Possibly generate replication
+ * tasks, if the removed block is still valid.
*/
public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
blockLog.debug("BLOCK* removeStoredBlock: {} from {}", storedBlock, node);
@@ -4341,7 +4345,7 @@ public class BlockManager implements BlockStatsMXBean {
}
/**
- * Get blocks to invalidate for <i>nodeId</i>
+ * Get blocks to invalidate for {@code nodeId}
* in {@link #invalidateBlocks}.
*
* @return number of blocks scheduled for removal during this iteration.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
index 23e3e40..897bf69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
@@ -152,7 +152,6 @@ public abstract class BlockPlacementPolicy {
/**
* Check if the move is allowed. Used by balancer and other tools.
- * @
*
* @param candidates all replicas including source and target
* @param source source replica of the move
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java
index d6a0972..d607789 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java
@@ -47,13 +47,11 @@ import org.apache.hadoop.hdfs.util.CombinedHostsFileReader;
/**
* This class manages datanode configuration using a json file.
* Please refer to {@link CombinedHostsFileReader} for the json format.
- * <p/>
- * <p/>
+ * <p>
* Entries may or may not specify a port. If they don't, we consider
* them to apply to every DataNode on that host. The code canonicalizes the
* entries into IP addresses.
- * <p/>
- * <p/>
+ * <p>
* The code ignores all entries that the DNS fails to resolve their IP
* addresses. This is okay because by default the NN rejects the registrations
* of DNs when it fails to do a forward and reverse lookup. Note that DNS
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index fe1224c..fc31584 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
@@ -38,7 +38,7 @@ import com.google.common.annotations.VisibleForTesting;
* corrupt. While reporting replicas of a Block, we hide any corrupt
* copies. These copies are removed once Block is found to have
* expected number of good replicas.
- * Mapping: Block -> TreeSet<DatanodeDescriptor>
+ * Mapping: Block {@literal -> TreeSet<DatanodeDescriptor>}
*/
@InterfaceAudience.Private
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
index a1dff08..abc0f7c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
@@ -57,7 +57,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
* Manages decommissioning and maintenance state for DataNodes. A background
* monitor thread periodically checks the status of DataNodes that are
* decommissioning or entering maintenance state.
- * <p/>
+ * <p>
* A DataNode can be decommissioned in a few situations:
* <ul>
* <li>If a DN is dead, it is decommissioned immediately.</li>
@@ -72,11 +72,11 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
* determine if they can be DECOMMISSIONED. The monitor also prunes this list
* as blocks become replicated, so monitor scans will become more efficient
* over time.
- * <p/>
+ * <p>
* DECOMMISSION_INPROGRESS nodes that become dead do not progress to
* DECOMMISSIONED until they become live again. This prevents potential
* durability loss for singly-replicated blocks (see HDFS-6791).
- * <p/>
+ * <p>
* DataNodes can also be put under maintenance state for any short duration
* maintenance operations. Unlike decommissioning, blocks are not always
* re-replicated for the DataNodes to enter maintenance state. When the
@@ -88,7 +88,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
* of maintenance expiry time. When DataNodes don't transition or join the
* cluster back by expiry time, blocks are re-replicated just as in
* decommissioning case as to avoid read or write performance degradation.
- * <p/>
+ * <p>
* This class depends on the FSNamesystem lock for synchronization.
*/
@InterfaceAudience.Private
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
index b7bf674..4ead0ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
@@ -33,17 +33,16 @@ import java.util.HashSet;
/**
* This class manages the include and exclude files for HDFS.
- * <p/>
+ * <p>
* These files control which DataNodes the NameNode expects to see in the
* cluster. Loosely speaking, the include file, if it exists and is not
* empty, is a list of everything we expect to see. The exclude file is
* a list of everything we want to ignore if we do see it.
- * <p/>
+ * <p>
* Entries may or may not specify a port. If they don't, we consider
* them to apply to every DataNode on that host. The code canonicalizes the
* entries into IP addresses.
- * <p/>
- * <p/>
+ * <p>
* The code ignores all entries that the DNS fails to resolve their IP
* addresses. This is okay because by default the NN rejects the registrations
* of DNs when it fails to do a forward and reverse lookup. Note that DNS
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java
index 958557b..cf7cfac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java
@@ -35,9 +35,9 @@ import java.util.Map;
/**
* The HostSet allows efficient queries on matching wildcard addresses.
- * <p/>
+ * <p>
* For InetSocketAddress A and B with the same host address,
- * we define a partial order between A and B, A <= B iff A.getPort() == B
+ * we define a partial order between A and B, A <= B iff A.getPort() == B
* .getPort() || B.getPort() == 0.
*/
public class HostSet implements Iterable<InetSocketAddress> {
@@ -46,7 +46,7 @@ public class HostSet implements Iterable<InetSocketAddress> {
/**
* The function that checks whether there exists an entry foo in the set
- * so that foo <= addr.
+ * so that foo <= addr.
*/
boolean matchedBy(InetSocketAddress addr) {
Collection<Integer> ports = addrs.get(addr.getAddress());
@@ -56,7 +56,7 @@ public class HostSet implements Iterable<InetSocketAddress> {
/**
* The function that checks whether there exists an entry foo in the set
- * so that addr <= foo.
+ * so that addr <= foo.
*/
boolean match(InetSocketAddress addr) {
int port = addr.getPort();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java
index 22983ea..03a6918 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
import org.apache.hadoop.util.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -82,7 +83,7 @@ public class SlowPeerTracker {
/**
* Information about peers that have reported a node as being slow.
- * Each outer map entry is a map of (DatanodeId) -> (timestamp),
+ * Each outer map entry is a map of (DatanodeId) {@literal ->} (timestamp),
* mapping reporting nodes to the timestamp of the last report from
* that node.
*
@@ -146,7 +147,7 @@ public class SlowPeerTracker {
/**
* Retrieve all reports for all nodes. Stale reports are excluded.
*
- * @return map from SlowNodeId -> (set of nodes reporting peers).
+ * @return map from SlowNodeId {@literal ->} (set of nodes reporting peers).
*/
public Map<String, SortedSet<String>> getReportsForAllDataNodes() {
if (allReports.isEmpty()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index 4fc47d8..539baf1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -56,9 +56,9 @@ import com.google.common.collect.Lists;
* block pool id, on this DataNode.
*
* This class supports the following functionality:
- * <ol>
+ * <ul>
* <li> Formatting a new block pool storage</li>
- * <li> Recovering a storage state to a consistent state (if possible></li>
+ * <li> Recovering a storage state to a consistent state (if possible)</li>
* <li> Taking a snapshot of the block pool during upgrade</li>
* <li> Rolling back a block pool to a previous snapshot</li>
* <li> Finalizing block storage by deletion of a snapshot</li>
@@ -139,11 +139,12 @@ public class BlockPoolSliceStorage extends Storage {
/**
* Load one storage directory. Recover from previous transitions if required.
- *
- * @param nsInfo namespace information
- * @param dataDir the root path of the storage directory
- * @param startOpt startup option
- * @return the StorageDirectory successfully loaded.
+ * @param nsInfo namespace information
+ * @param location the root path of the storage directory
+ * @param startOpt startup option
+ * @param callables list of callable storage directory
+ * @param conf configuration
+ * @return
* @throws IOException
*/
private StorageDirectory loadStorageDirectory(NamespaceInfo nsInfo,
@@ -205,8 +206,10 @@ public class BlockPoolSliceStorage extends Storage {
* data volume.
*
* @param nsInfo namespace information
- * @param dataDirs storage directories of block pool
+ * @param location storage directories of block pool
* @param startOpt startup option
+ * @param callables list of callable storage directory
+ * @param conf configuration
* @return an array of loaded block pool directories.
* @throws IOException on error
*/
@@ -240,8 +243,10 @@ public class BlockPoolSliceStorage extends Storage {
* data volume.
*
* @param nsInfo namespace information
- * @param dataDirs storage directories of block pool
+ * @param location storage directories of block pool
* @param startOpt startup option
+ * @param callables list of callable storage directory
+ * @param conf configuration
* @throws IOException on error
*/
List<StorageDirectory> recoverTransitionRead(NamespaceInfo nsInfo,
@@ -348,13 +353,18 @@ public class BlockPoolSliceStorage extends Storage {
* Analyze whether a transition of the BP state is required and
* perform it if necessary.
* <br>
- * Rollback if previousLV >= LAYOUT_VERSION && prevCTime <= namenode.cTime.
- * Upgrade if this.LV > LAYOUT_VERSION || this.cTime < namenode.cTime Regular
- * startup if this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime
+ * Rollback if:
+ * previousLV >= LAYOUT_VERSION && prevCTime <= namenode.cTime.
+ * Upgrade if:
+ * this.LV > LAYOUT_VERSION || this.cTime < namenode.cTime
+ * Regular startup if:
+ * this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime
*
- * @param sd storage directory <SD>/current/<bpid>
+ * @param sd storage directory @{literal <SD>/current/<bpid>}
* @param nsInfo namespace info
* @param startOpt startup option
+ * @param callables list of callable storage directory
+ * @param conf configuration
* @return true if the new properties has been written.
*/
private boolean doTransition(StorageDirectory sd, NamespaceInfo nsInfo,
@@ -416,20 +426,20 @@ public class BlockPoolSliceStorage extends Storage {
}
/**
- * Upgrade to any release after 0.22 (0.22 included) release e.g. 0.22 => 0.23
+ * Upgrade to any release after 0.22 (0.22 included) release
+ * e.g. 0.22 => 0.23
* Upgrade procedure is as follows:
* <ol>
- * <li>If <SD>/current/<bpid>/previous exists then delete it</li>
- * <li>Rename <SD>/current/<bpid>/current to
- * <SD>/current/bpid/current/previous.tmp</li>
- * <li>Create new <SD>current/<bpid>/current directory</li>
- * <ol>
+ * <li>If {@literal <SD>/current/<bpid>/previous} exists then delete it</li>
+ * <li>Rename {@literal <SD>/current/<bpid>/current} to
+ * {@literal <SD>/current/bpid/current/previous.tmp}</li>
+ * <li>Create new {@literal <SD>current/<bpid>/current} directory</li>
* <li>Hard links for block files are created from previous.tmp to current</li>
* <li>Save new version file in current directory</li>
+ * <li>Rename previous.tmp to previous</li>
* </ol>
- * <li>Rename previous.tmp to previous</li> </ol>
*
- * @param bpSd storage directory <SD>/current/<bpid>
+ * @param bpSd storage directory {@literal <SD>/current/<bpid>}
* @param nsInfo Namespace Info from the namenode
* @throws IOException on error
*/
@@ -777,12 +787,12 @@ public class BlockPoolSliceStorage extends Storage {
}
/**
- * Get a target subdirectory under current/ for a given block file that is being
- * restored from trash.
+ * Get a target subdirectory under current/ for a given block file that is
+ * being restored from trash.
*
* The subdirectory structure under trash/ mirrors that under current/ to keep
* implicit memory of where the files are to be restored.
- *
+ * @param blockFile block file that is being restored from trash.
* @return the target directory to restore a previously deleted block file.
*/
@VisibleForTesting
@@ -847,6 +857,7 @@ public class BlockPoolSliceStorage extends Storage {
/**
* Create a rolling upgrade marker file for each BP storage root, if it
* does not exist already.
+ * @param dnStorageDirs
*/
public void setRollingUpgradeMarkers(List<StorageDirectory> dnStorageDirs)
throws IOException {
@@ -872,6 +883,7 @@ public class BlockPoolSliceStorage extends Storage {
* Check whether the rolling upgrade marker file exists for each BP storage
* root. If it does exist, then the marker file is cleared and more
* importantly the layout upgrade is finalized.
+ * @param dnStorageDirs
*/
public void clearRollingUpgradeMarkers(List<StorageDirectory> dnStorageDirs)
throws IOException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index 34f6c33..fe0c7f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -338,19 +338,24 @@ public class BlockRecoveryWorker {
/**
* blk_0 blk_1 blk_2 blk_3 blk_4 blk_5 blk_6 blk_7 blk_8
- * 64k 64k 64k 64k 64k 64k 64k 64k 64k <-- stripe_0
+ * 64k 64k 64k 64k 64k 64k 64k 64k 64k <--
+ * stripe_0
* 64k 64k 64k 64k 64k 64k 64k 64k 64k
- * 64k 64k 64k 64k 64k 64k 64k 61k <-- startStripeIdx
+ * 64k 64k 64k 64k 64k 64k 64k 61k <--
+ * startStripeIdx
* 64k 64k 64k 64k 64k 64k 64k
* 64k 64k 64k 64k 64k 64k 59k
* 64k 64k 64k 64k 64k 64k
- * 64k 64k 64k 64k 64k 64k <-- last full stripe
- * 64k 64k 13k 64k 55k 3k <-- target last stripe
+ * 64k 64k 64k 64k 64k 64k <--
+ * last full stripe
+ * 64k 64k 13k 64k 55k 3k <--
+ * target last stripe
* 64k 64k 64k 1k
* 64k 64k 58k
* 64k 64k
* 64k 19k
- * 64k <-- total visible stripe
+ * 64k <--
+ * total visible stripe
*
* Due to different speed of streamers, the internal blocks in a block group
* could have different lengths when the block group isn't ended normally.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
index 8081895..6b1b96f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
@@ -189,7 +189,7 @@ public class BlockScanner {
}
/**
- * Returns true if the block scanner is enabled.<p/>
+ * Returns true if the block scanner is enabled.
*
* If the block scanner is disabled, no volume scanners will be created, and
* no threads will start.
@@ -234,7 +234,7 @@ public class BlockScanner {
}
/**
- * Stops and removes a volume scanner.<p/>
+ * Stops and removes a volume scanner.
*
* This function will block until the volume scanner has stopped.
*
@@ -260,7 +260,7 @@ public class BlockScanner {
}
/**
- * Stops and removes all volume scanners.<p/>
+ * Stops and removes all volume scanners.
*
* This function will block until all the volume scanners have stopped.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 40f80a9..99c0a87 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -242,7 +242,7 @@ import org.slf4j.LoggerFactory;
* DataNodes.
*
* The DataNode maintains just one critical table:
- * block-> stream of bytes (of BLOCK_SIZE or less)
+ * block{@literal ->} stream of bytes (of BLOCK_SIZE or less)
*
* This info is stored on a local disk. The DataNode
* reports the table's contents to the NameNode upon startup
@@ -527,7 +527,7 @@ public class DataNode extends ReconfigurableBase
}
/**
- * {@inheritdoc}.
+ * {@inheritDoc }.
*/
@Override
public String reconfigurePropertyImpl(String property, String newVal)
@@ -2713,7 +2713,8 @@ public class DataNode extends ReconfigurableBase
return locations;
}
- /** Instantiate & Start a single datanode daemon and wait for it to finish.
+ /** Instantiate & Start a single datanode daemon and wait for it to
+ * finish.
* If this thread is specifically interrupted, it will stop waiting.
*/
@VisibleForTesting
@@ -2722,7 +2723,8 @@ public class DataNode extends ReconfigurableBase
return createDataNode(args, conf, null);
}
- /** Instantiate & Start a single datanode daemon and wait for it to finish.
+ /** Instantiate & Start a single datanode daemon and wait for it to
+ * finish.
* If this thread is specifically interrupted, it will stop waiting.
*/
@VisibleForTesting
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index a85ae32..a803c0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -215,7 +215,9 @@ public class DataStorage extends Storage {
/**
* VolumeBuilder holds the metadata (e.g., the storage directories) of the
- * prepared volume returned from {@link prepareVolume()}. Calling {@link build()}
+ * prepared volume returned from
+ * {@link #prepareVolume(DataNode, StorageLocation, List)}.
+ * Calling {@link VolumeBuilder#build()}
* to add the metadata to {@link DataStorage} so that this prepared volume can
* be active.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 7ae9e45..445e021 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -280,7 +280,6 @@ public class DirectoryScanner implements Runnable {
/**
* Create a new directory scanner, but don't cycle it running yet.
*
- * @param datanode the parent datanode
* @param dataset the dataset to scan
* @param conf the Configuration object
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
index b8e08d0..6349062 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
@@ -178,7 +178,6 @@ public class FileIoProvider {
* Call sync_file_range on the given file descriptor.
*
* @param volume target volume. null if unavailable.
- * @throws IOException
*/
public void syncFileRange(
@Nullable FsVolumeSpi volume, FileDescriptor outFd,
@@ -198,7 +197,6 @@ public class FileIoProvider {
* Call posix_fadvise on the given file descriptor.
*
* @param volume target volume. null if unavailable.
- * @throws IOException
*/
public void posixFadvise(
@Nullable FsVolumeSpi volume, String identifier, FileDescriptor outFd,
@@ -394,7 +392,6 @@ public class FileIoProvider {
* @param volume target volume. null if unavailable.
* @param fd File descriptor object.
* @return FileOutputStream to the given file object.
- * @throws FileNotFoundException
*/
public FileOutputStream getFileOutputStream(
@Nullable FsVolumeSpi volume, FileDescriptor fd) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
index 181ef80..e0afb9b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
@@ -45,8 +45,8 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * VolumeScanner scans a single volume. Each VolumeScanner has its own thread.<p/>
- * They are all managed by the DataNode's BlockScanner.
+ * VolumeScanner scans a single volume. Each VolumeScanner has its own thread.
+ * <p>They are all managed by the DataNode's BlockScanner.
*/
public class VolumeScanner extends Thread {
public static final Logger LOG =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java
index 2719f71..ec2b656 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java
@@ -24,7 +24,10 @@ import com.google.common.annotations.Beta;
import com.google.common.annotations.GwtCompatible;
import com.google.common.base.Preconditions;
import static com.google.common.base.Preconditions.checkNotNull;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
import com.google.common.util.concurrent.Uninterruptibles;
import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater
.newUpdater;
@@ -52,9 +55,7 @@ import java.util.logging.Logger;
* {@link ListeningExecutorService}, and deriving a {@code Future} from an
* existing one, typically using methods like {@link Futures#transform
* (ListenableFuture, com.google.common.base.Function) Futures.transform}
- * and {@link Futures#catching(ListenableFuture, Class,
- * com.google.common.base.Function, java.util.concurrent.Executor)
- * Futures.catching}.
+ * and its overloaded versions.
* <p>
* <p>This class implements all methods in {@code ListenableFuture}.
* Subclasses should provide a way to set the result of the computation
@@ -1265,12 +1266,6 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
* r.run();
* }
* }}</pre>
- * <p>
- * <p>This should be preferred to {@link #newDirectExecutorService()}
- * because implementing the {@link ExecutorService} subinterface
- * necessitates significant performance overhead.
- *
- * @since 18.0
*/
public static Executor directExecutor() {
return DirectExecutor.INSTANCE;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
index f4bf839..bd9ed7f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
@@ -192,7 +192,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
FsVolumeReferences getFsVolumeReferences();
/**
- * Add a new volume to the FsDataset.<p/>
+ * Add a new volume to the FsDataset.
*
* If the FSDataset supports block scanning, this function registers
* the new volume with the block scanner.
@@ -226,7 +226,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
/** @return the volume that contains a replica of the block. */
V getVolume(ExtendedBlock b);
- /** @return a volume information map (name => info). */
+ /** @return a volume information map (name {@literal =>} info). */
Map<String, Object> getVolumeInfoMap();
/**
@@ -273,7 +273,8 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
/**
* Get reference to the replica meta info in the replicasMap.
- * To be called from methods that are synchronized on {@link FSDataset}
+ * To be called from methods that are synchronized on
+ * implementations of {@link FsDatasetSpi}
* @return replica from the replicas map
*/
@Deprecated
@@ -394,7 +395,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* Finalizes the block previously opened for writing using writeToBlock.
* The block size is what is in the parameter b and it must match the amount
* of data written
- * @param block Block to be finalized
+ * @param b Block to be finalized
* @param fsyncDir whether to sync the directory changes to durable device.
* @throws IOException
* @throws ReplicaNotFoundException if the replica can not be found when the
@@ -488,14 +489,13 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
/**
* Determine if the specified block is cached.
* @param bpid Block pool id
- * @param blockIds - block id
+ * @param blockId - block id
* @return true if the block is cached
*/
boolean isCached(String bpid, long blockId);
/**
* Check if all the data directories are healthy
- * @return A set of unhealthy data directories.
* @param failedVolumes
*/
void handleVolumeFailures(Set<FsVolumeSpi> failedVolumes);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
index 7329ba3..be978d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
@@ -98,17 +98,17 @@ public interface FsVolumeSpi
/**
* BlockIterator will return ExtendedBlock entries from a block pool in
- * this volume. The entries will be returned in sorted order.<p/>
+ * this volume. The entries will be returned in sorted order.<p>
*
* BlockIterator objects themselves do not always have internal
* synchronization, so they can only safely be used by a single thread at a
- * time.<p/>
+ * time.<p>
*
* Closing the iterator does not save it. You must call save to save it.
*/
interface BlockIterator extends Closeable {
/**
- * Get the next block.<p/>
+ * Get the next block.<p>
*
* Note that this block may be removed in between the time we list it,
* and the time the caller tries to use it, or it may represent a stale
@@ -146,7 +146,7 @@ public interface FsVolumeSpi
void save() throws IOException;
/**
- * Set the maximum staleness of entries that we will return.<p/>
+ * Set the maximum staleness of entries that we will return.<p>
*
* A maximum staleness of 0 means we will never return stale entries; a
* larger value will allow us to reduce resource consumption in exchange
@@ -211,12 +211,12 @@ public interface FsVolumeSpi
* Because millions of these structures may be created, we try to save
* memory here. So instead of storing full paths, we store path suffixes.
* The block file, if it exists, will have a path like this:
- * <volume_base_path>/<block_path>
+ * {@literal <volume_base_path>/<block_path>}
* So we don't need to store the volume path, since we already know what the
* volume is.
*
* The metadata file, if it exists, will have a path like this:
- * <volume_base_path>/<block_path>_<genstamp>.meta
+ * {@literal <volume_base_path>/<block_path>_<genstamp>.meta}
* So if we have a block file, there isn't any need to store the block path
* again.
*
@@ -439,6 +439,7 @@ public interface FsVolumeSpi
* @param bpid block pool id to scan
* @param report the list onto which blocks reports are placed
* @param reportCompiler
+ * @throws InterruptedException
* @throws IOException
*/
void compileReport(String bpid,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java
index 771a17b..401fc8b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java
@@ -94,7 +94,8 @@ public class OutlierDetector {
/**
* Return a set of nodes/ disks whose latency is much higher than
- * their counterparts. The input is a map of (resource -> aggregate latency)
+ * their counterparts. The input is a map of (resource {@literal ->} aggregate
+ * latency)
* entries.
*
* The aggregate may be an arithmetic mean or a percentile e.g.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
index 642cf21..7824f95 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
@@ -60,7 +60,6 @@ public class DiskBalancerException extends IOException {
/**
* Constructs an {@code IOException} with the specified detail message and
* cause.
- * <p/>
* <p> Note that the detail message associated with {@code cause} is
* <i>not</i>
* automatically incorporated into this exception's detail message.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
index 8de19aa..1307983 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
@@ -50,21 +50,20 @@ import java.util.concurrent.Future;
/**
* DiskBalancerCluster represents the nodes that we are working against.
- * <p/>
+ * <p>
* Please Note :
- * <p/>
* Semantics of inclusionList and exclusionLists.
- * <p/>
+ * <p>
* If a non-empty inclusionList is specified then the diskBalancer assumes that
* the user is only interested in processing that list of nodes. This node list
* is checked against the exclusionList and only the nodes in inclusionList but
* not in exclusionList is processed.
- * <p/>
+ * <p>
* if inclusionList is empty, then we assume that all live nodes in the nodes is
* to be processed by diskBalancer. In that case diskBalancer will avoid any
* nodes specified in the exclusionList but will process all nodes in the
* cluster.
- * <p/>
+ * <p>
* In other words, an empty inclusionList is means all the nodes otherwise
* only a given list is processed and ExclusionList is always honored.
*/
@@ -291,7 +290,7 @@ public class DiskBalancerCluster {
/**
* Compute plan takes a node and constructs a planner that creates a plan that
* we would like to follow.
- * <p/>
+ * <p>
* This function creates a thread pool and executes a planner on each node
* that we are supposed to plan for. Each of these planners return a NodePlan
* that we can persist or schedule for execution with a diskBalancer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java
index a200f4d..6cf244b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java
@@ -186,7 +186,7 @@ public class DiskBalancerDataNode implements Comparable<DiskBalancerDataNode> {
}
/**
- * returns NodeDataDensity Metric.
+ * Returns NodeDataDensity Metric.
*
* @return float
*/
@@ -195,8 +195,8 @@ public class DiskBalancerDataNode implements Comparable<DiskBalancerDataNode> {
}
/**
- * computes nodes data density.
- * <p/>
+ * Computes nodes data density.
+ *
* This metric allows us to compare different nodes and how well the data is
* spread across a set of volumes inside the node.
*/
@@ -231,8 +231,8 @@ public class DiskBalancerDataNode implements Comparable<DiskBalancerDataNode> {
/**
* Adds a volume to the DataNode.
- * <p/>
- * it is assumed that we have one thread per node hence this call is not
+ *
+ * It is assumed that we have one thread per node hence this call is not
* synchronised neither is the map is protected.
*
* @param volume - volume
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
index fb83eeb..568c1e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
@@ -34,7 +34,7 @@ import java.util.TreeSet;
/**
* Greedy Planner is a simple planner that computes the largest possible move at
* any point of time given a volumeSet.
- * <p/>
+ * <p>
* This is done by choosing the disks with largest amount of data above and
* below the idealStorage and then a move is scheduled between them.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
index 10ecc23..fa268c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
@@ -156,7 +156,7 @@ public final class AclStorage {
*
* @param inode INode to read
* @param snapshotId int ID of snapshot to read
- * @return List<AclEntry> containing extended inode ACL entries
+ * @return {@literal List<AclEntry>} containing extended inode ACL entries
*/
public static List<AclEntry> readINodeAcl(INode inode, int snapshotId) {
AclFeature f = inode.getAclFeature(snapshotId);
@@ -167,7 +167,7 @@ public final class AclStorage {
* Reads the existing extended ACL entries of an INodeAttribute object.
*
* @param inodeAttr INode to read
- * @return List<AclEntry> containing extended inode ACL entries
+ * @return {@code List<AclEntry>} containing extended inode ACL entries
*/
public static List<AclEntry> readINodeAcl(INodeAttributes inodeAttr) {
AclFeature f = inodeAttr.getAclFeature();
@@ -175,7 +175,7 @@ public final class AclStorage {
}
/**
- * Build list of AclEntries from the AclFeature
+ * Build list of AclEntries from the {@link AclFeature}
* @param aclFeature AclFeature
* @return List of entries
*/
@@ -204,7 +204,7 @@ public final class AclStorage {
* ACL modification APIs, which always apply a delta on top of current state.
*
* @param inode INode to read
- * @return List<AclEntry> containing all logical inode ACL entries
+ * @return {@code List<AclEntry>} containing all logical inode ACL entries
*/
public static List<AclEntry> readINodeLogicalAcl(INode inode) {
FsPermission perm = inode.getFsPermission();
@@ -262,7 +262,7 @@ public final class AclStorage {
* {@link AclFeature}.
*
* @param inode INode to update
- * @param newAcl List<AclEntry> containing new ACL entries
+ * @param newAcl {@code List<AclEntry>} containing new ACL entries
* @param snapshotId int latest snapshot ID of inode
* @throws AclException if the ACL is invalid for the given inode
* @throws QuotaExceededException if quota limit is exceeded
@@ -312,8 +312,8 @@ public final class AclStorage {
/**
* Creates an AclFeature from the given ACL entries.
*
- * @param accessEntries List<AclEntry> access ACL entries
- * @param defaultEntries List<AclEntry> default ACL entries
+ * @param accessEntries {@code List<AclEntry>} access ACL entries
+ * @param defaultEntries {@code List<AclEntry>} default ACL entries
* @return AclFeature containing the required ACL entries
*/
private static AclFeature createAclFeature(List<AclEntry> accessEntries,
@@ -347,7 +347,7 @@ public final class AclStorage {
* POSIX ACLs model, which presents the mask as the permissions of the group
* class.
*
- * @param accessEntries List<AclEntry> access ACL entries
+ * @param accessEntries {@code List<AclEntry>} access ACL entries
* @param existingPerm FsPermission existing permissions
* @return FsPermission new permissions
*/
@@ -365,7 +365,7 @@ public final class AclStorage {
* group and other permissions are in order. Also preserve sticky bit and
* toggle ACL bit off.
*
- * @param accessEntries List<AclEntry> access ACL entries
+ * @param accessEntries {@code List<AclEntry>} access ACL entries
* @param existingPerm FsPermission existing permissions
* @return FsPermission new permissions
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 5604a21..8fa9578 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -67,7 +67,7 @@ import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants
/**
* Manages the list of encryption zones in the filesystem.
- * <p/>
+ * <p>
* The EncryptionZoneManager has its own lock, but relies on the FSDirectory
* lock being held for many operations. The FSDirectory lock should not be
* taken if the manager lock is already held.
@@ -294,7 +294,7 @@ public class EncryptionZoneManager {
/**
* Add a new encryption zone.
- * <p/>
+ * <p>
* Called while holding the FSDirectory lock.
*
* @param inodeId of the encryption zone
@@ -308,7 +308,7 @@ public class EncryptionZoneManager {
/**
* Add a new encryption zone.
- * <p/>
+ * <p>
* Does not assume that the FSDirectory lock is held.
*
* @param inodeId of the encryption zone
@@ -326,7 +326,7 @@ public class EncryptionZoneManager {
/**
* Remove an encryption zone.
- * <p/>
+ * <p>
* Called while holding the FSDirectory lock.
*/
void removeEncryptionZone(Long inodeId) {
@@ -344,7 +344,7 @@ public class EncryptionZoneManager {
/**
* Returns true if an IIP is within an encryption zone.
- * <p/>
+ * <p>
* Called while holding the FSDirectory lock.
*/
boolean isInAnEZ(INodesInPath iip) throws UnresolvedLinkException,
@@ -355,7 +355,7 @@ public class EncryptionZoneManager {
/**
* Returns the full path from an INode id.
- * <p/>
+ * <p>
* Called while holding the FSDirectory lock.
*/
String getFullPathName(Long nodeId) {
@@ -370,7 +370,7 @@ public class EncryptionZoneManager {
/**
* Get the key name for an encryption zone. Returns null if <tt>iip</tt> is
* not within an encryption zone.
- * <p/>
+ * <p>
* Called while holding the FSDirectory lock.
*/
String getKeyName(final INodesInPath iip) throws IOException {
@@ -385,7 +385,7 @@ public class EncryptionZoneManager {
/**
* Looks up the EncryptionZoneInt for a path within an encryption zone.
* Returns null if path is not within an EZ.
- * <p/>
+ * <p>
* Called while holding the FSDirectory lock.
*/
private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip)
@@ -434,7 +434,7 @@ public class EncryptionZoneManager {
* Looks up the nearest ancestor EncryptionZoneInt that contains the given
* path (excluding itself).
* Returns null if path is not within an EZ, or the path is the root dir '/'
- * <p/>
+ * <p>
* Called while holding the FSDirectory lock.
*/
private EncryptionZoneInt getParentEncryptionZoneForPath(INodesInPath iip)
@@ -467,7 +467,7 @@ public class EncryptionZoneManager {
/**
* Throws an exception if the provided path cannot be renamed into the
* destination because of differing parent encryption zones.
- * <p/>
+ * <p>
* Called while holding the FSDirectory lock.
*
* @param srcIIP source IIP
@@ -529,7 +529,7 @@ public class EncryptionZoneManager {
/**
* Create a new encryption zone.
- * <p/>
+ * <p>
* Called while holding the FSDirectory lock.
*/
XAttr createEncryptionZone(INodesInPath srcIIP, CipherSuite suite,
@@ -573,7 +573,7 @@ public class EncryptionZoneManager {
/**
* Cursor-based listing of encryption zones.
- * <p/>
+ * <p>
* Called while holding the FSDirectory lock.
*/
BatchedListEntries<EncryptionZone> listEncryptionZones(long prevId)
@@ -621,6 +621,8 @@ public class EncryptionZoneManager {
* @param zoneId
* @param zonePath
* @return true if path resolve to the id, false if not.
+ * @throws AccessControlException
+ * @throws ParentNotDirectoryException
* @throws UnresolvedLinkException
*/
private boolean pathResolvesToId(final long zoneId, final String zonePath)
@@ -645,6 +647,9 @@ public class EncryptionZoneManager {
/**
* Re-encrypts the given encryption zone path. If the given path is not the
* root of an encryption zone, an exception is thrown.
+ * @param zoneIIP
+ * @param keyVersionName
+ * @throws IOException
*/
List<XAttr> reencryptEncryptionZone(final INodesInPath zoneIIP,
final String keyVersionName) throws IOException {
@@ -673,7 +678,9 @@ public class EncryptionZoneManager {
/**
* Cancels the currently-running re-encryption of the given encryption zone.
* If the given path is not the root of an encryption zone,
- * * an exception is thrown.
+ * an exception is thrown.
+ * @param zoneIIP
+ * @throws IOException
*/
List<XAttr> cancelReencryptEncryptionZone(final INodesInPath zoneIIP)
throws IOException {
@@ -693,8 +700,10 @@ public class EncryptionZoneManager {
/**
* Cursor-based listing of zone re-encryption status.
- * <p/>
+ * <p>
* Called while holding the FSDirectory lock.
+ * @param prevId
+ * @throws IOException
*/
BatchedListEntries<ZoneReencryptionStatus> listReencryptionStatus(
final long prevId) throws IOException {
@@ -735,6 +744,10 @@ public class EncryptionZoneManager {
/**
* Return whether an INode is an encryption zone root.
+ * @param inode
+ * @param name
+ * @return true when INode is an encryption zone root else false
+ * @throws FileNotFoundException
*/
boolean isEncryptionZoneRoot(final INode inode, final String name)
throws FileNotFoundException {
@@ -756,6 +769,7 @@ public class EncryptionZoneManager {
* Return whether an INode is an encryption zone root.
*
* @param inode the zone inode
+ * @param name
* @throws IOException if the inode is not a directory,
* or is a directory but not the root of an EZ.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 0140912..712a327 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -634,12 +634,10 @@ public class FSDirectory implements Closeable {
* no permission checks.
* @param src The path to resolve.
* @param dirOp The {@link DirOp} that controls additional checks.
- * @param resolveLink If false, only ancestor symlinks will be checked. If
- * true, the last inode will also be checked.
* @return if the path indicates an inode, return path after replacing up to
- * <inodeid> with the corresponding path of the inode, else the path
- * in {@code src} as is. If the path refers to a path in the "raw"
- * directory, return the non-raw pathname.
+ * {@code <inodeid>} with the corresponding path of the inode, else
+ * the path in {@code src} as is. If the path refers to a path in
+ * the "raw" directory, return the non-raw pathname.
* @throws FileNotFoundException
* @throws AccessControlException
* @throws ParentNotDirectoryException
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index eda1164..d1904fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -341,10 +341,11 @@ import org.slf4j.LoggerFactory;
*
* This class and its contents keep:
*
- * 1) Valid fsname --> blocklist (kept on disk, logged)
+ * 1) Valid fsname {@literal -->} blocklist (kept on disk, logged)
* 2) Set of all valid blocks (inverted #1)
- * 3) block --> machinelist (kept in memory, rebuilt dynamically from reports)
- * 4) machine --> blocklist (inverted #2)
+ * 3) block {@literal -->} machinelist (kept in memory, rebuilt dynamically
+ * from reports)
+ * 4) machine {@literal -->} blocklist (inverted #2)
* 5) LRU cache of updated-heartbeat machines
*/
@InterfaceAudience.Private
@@ -1732,11 +1733,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
/**
- * return a list of blocks & their locations on <code>datanode</code> whose
- * total size is <code>size</code>
+ * return a list of blocks & their locations on {@code datanode} whose
+ * total size is {@code size}
*
* @param datanode on which blocks are located
* @param size total size of blocks
+ * @param minimumBlockSize
*/
public BlocksWithLocations getBlocks(DatanodeID datanode, long size, long
minimumBlockSize) throws IOException {
@@ -1753,6 +1755,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
/**
* Dump all metadata into specified file
+ * @param filename
*/
void metaSave(String filename) throws IOException {
String operationName = "metaSave";
@@ -1884,6 +1887,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
/////////////////////////////////////////////////////////
/**
* Set permissions for an existing file.
+ * @param src
+ * @param permission
* @throws IOException
*/
void setPermission(String src, FsPermission permission) throws IOException {
@@ -1908,6 +1913,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
/**
* Set owner for an existing file.
+ * @param src
+ * @param group
+ * @param username
* @throws IOException
*/
void setOwner(String src, String username, String group)
@@ -2188,6 +2196,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* @param replication new replication
* @return true if successful;
* false if file does not exist or is a directory
+ * @throws IOException
*/
boolean setReplication(final String src, final short replication)
throws IOException {
@@ -2219,6 +2228,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
*
* @param src file/directory path
* @param policyName storage policy name
+ * @throws IOException
*/
void setStoragePolicy(String src, String policyName) throws IOException {
final String operationName = "setStoragePolicy";
@@ -2245,6 +2255,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* Satisfy the storage policy for a file or a directory.
*
* @param src file/directory path
+ * @throws IOException
*/
void satisfyStoragePolicy(String src, boolean logRetryCache)
throws IOException {
@@ -2295,6 +2306,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* unset storage policy set for a given file or a directory.
*
* @param src file/directory path
+ * @throws IOException
*/
void unsetStoragePolicy(String src) throws IOException {
final String operationName = "unsetStoragePolicy";
@@ -2321,6 +2333,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* @param src
* file/directory path
* @return storage policy object
+ * @throws IOException
*/
BlockStoragePolicy getStoragePolicy(String src) throws IOException {
checkOperation(OperationCategory.READ);
@@ -2336,6 +2349,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
/**
* @return All the existing block storage policies
+ * @throws IOException
*/
BlockStoragePolicy[] getStoragePolicies() throws IOException {
checkOperation(OperationCategory.READ);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 2123f4e..03b1ca3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -523,8 +523,8 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* 2. For a {@link WithName} node, since the node must be in a snapshot, we
* only count the quota usage for those nodes that still existed at the
* creation time of the snapshot associated with the {@link WithName} node.
- * We do not count in the size of the diff list.
- * <pre>
+ * We do not count in the size of the diff list.
+ * </pre>
*
* @param bsps Block storage policy suite to calculate intended storage type usage
* @param blockStoragePolicyId block storage policy id of the current INode
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index e4e14f7..8655bb3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -669,10 +669,10 @@ public abstract class INodeReference extends INode {
/**
* {@inheritDoc}
- * <br/>
+ * <br>
* To destroy a DstReference node, we first remove its link with the
- * referred node. If the reference number of the referred node is <= 0, we
- * destroy the subtree of the referred node. Otherwise, we clean the
+ * referred node. If the reference number of the referred node is <= 0,
+ * we destroy the subtree of the referred node. Otherwise, we clean the
* referred node's subtree and delete everything created after the last
* rename operation, i.e., everything outside of the scope of the prior
* WithName nodes.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 04fb50e..f072220 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -329,8 +329,8 @@ public class INodesInPath {
}
/**
- * @return the i-th inode if i >= 0;
- * otherwise, i < 0, return the (length + i)-th inode.
+ * @return the i-th inode if i {@literal >=} 0;
+ * otherwise, i {@literal <} 0, return the (length + i)-th inode.
*/
public INode getINode(int i) {
return inodes[(i < 0) ? inodes.length + i : i];
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
index d6d2094..7331676 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
@@ -112,7 +112,7 @@ public interface JournalManager extends Closeable, FormatConfirmable,
void doRollback() throws IOException;
/**
- * Discard the segments whose first txid is >= the given txid.
+ * Discard the segments whose first txid is {@literal >=} the given txid.
* @param startTxId The given txid should be right at the segment boundary,
* i.e., it should be the first txid of some segment, if segment corresponding
* to the txid exists.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index 31fb2bb..75db8de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -208,7 +208,7 @@ public class LeaseManager {
* read or write lock.
*
* @param ancestorDir the ancestor {@link INodeDirectory}
- * @return Set<INodesInPath>
+ * @return {@code Set<INodesInPath>}
*/
public Set<INodesInPath> getINodeWithLeases(final INodeDirectory
ancestorDir) throws IOException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java
index dad5779..ead56a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java
@@ -52,7 +52,7 @@ public final class MetaRecoveryContext {
* Display a prompt to the user and get his or her choice.
*
* @param prompt The prompt to display
- * @param default First choice (will be taken if autoChooseDefault is
+ * @param firstChoice First choice (will be taken if autoChooseDefault is
* true)
* @param choices Other choies
*
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 9f82cbd..b91e7ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -176,8 +176,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DE
* is a second backup/failover NameNode, or when using federated NameNodes.)
*
* The NameNode controls two critical tables:
- * 1) filename->blocksequence (namespace)
- * 2) block->machinelist ("inodes")
+ * 1) filename{@literal ->}blocksequence (namespace)
+ * 2) block{@literal ->}machinelist ("inodes")
*
* The first table is stored on disk and is very precious.
* The second table is rebuilt every time the NameNode comes up.
@@ -1111,7 +1111,7 @@ public class NameNode extends ReconfigurableBase implements
}
/**
- * @return NameNodeHttpServer, used by unit tests to ensure a full shutdown,
+ * NameNodeHttpServer, used by unit tests to ensure a full shutdown,
* so that no bind exception is thrown during restart.
*/
@VisibleForTesting
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 5d664cb..56607f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -94,14 +94,13 @@ import com.google.common.annotations.VisibleForTesting;
* <p>The tool scans all files and directories, starting from an indicated
* root path. The following abnormal conditions are detected and handled:</p>
* <ul>
- * <li>files with blocks that are completely missing from all datanodes.<br/>
+ * <li>files with blocks that are completely missing from all datanodes.<br>
* In this case the tool can perform one of the following actions:
* <ul>
- * <li>none ({@link #FIXING_NONE})</li>
* <li>move corrupted files to /lost+found directory on DFS
- * ({@link #FIXING_MOVE}). Remaining data blocks are saved as a
+ * ({@link #doMove}). Remaining data blocks are saved as a
* block chains, representing longest consecutive series of valid blocks.</li>
- * <li>delete corrupted files ({@link #FIXING_DELETE})</li>
+ * <li>delete corrupted files ({@link #doDelete})</li>
* </ul>
* </li>
* <li>detect files with under-replicated or over-replicated blocks</li>
@@ -201,7 +200,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
*/
NamenodeFsck(Configuration conf, NameNode namenode,
NetworkTopology networktopology,
- Map<String,String[]> pmap, PrintWriter out,
+ Map<String, String[]> pmap, PrintWriter out,
int totalDatanodes, InetAddress remoteAddress) {
this.conf = conf;
this.namenode = namenode;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org