You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by za...@apache.org on 2022/10/20 02:58:31 UTC

[hadoop] branch trunk updated: HDFS-16803.Improve some annotations in hdfs module. (#5031)

This is an automated email from the ASF dual-hosted git repository.

zanderxu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new c5c00f3d2ce HDFS-16803.Improve some annotations in hdfs module. (#5031)
c5c00f3d2ce is described below

commit c5c00f3d2ce0f28f49bc2ccdf1ea67c967f50e8f
Author: jianghuazhu <74...@qq.com>
AuthorDate: Thu Oct 20 10:58:23 2022 +0800

    HDFS-16803.Improve some annotations in hdfs module. (#5031)
---
 .../java/org/apache/hadoop/hdfs/DistributedFileSystem.java   |  6 +++---
 .../apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java  |  3 +++
 .../hadoop/hdfs/server/federation/router/RemoteMethod.java   |  2 +-
 .../apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java   |  2 +-
 .../org/apache/hadoop/hdfs/server/datanode/DataStorage.java  |  2 +-
 .../hdfs/server/datanode/checker/ThrottledAsyncChecker.java  |  4 ++--
 .../hadoop/hdfs/server/datanode/checker/TimeoutFuture.java   |  1 +
 .../datanode/fsdataset/impl/RamDiskReplicaTracker.java       |  2 +-
 .../apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java    | 12 ++++++------
 .../apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java |  2 +-
 .../hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java |  8 ++++----
 .../hdfs/tools/offlineImageViewer/PBImageTextWriter.java     |  2 +-
 .../main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java  |  2 +-
 13 files changed, 26 insertions(+), 22 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 2d8953e98b2..ff2c5f37c69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -568,7 +568,7 @@ public class DistributedFileSystem extends FileSystem
 
   /**
    * Same as
-   * {@link #create(Path, FsPermission, EnumSet<CreateFlag>, int, short, long,
+   * {@link #create(Path, FsPermission, EnumSet, int, short, long,
    * Progressable, ChecksumOpt)} with a few additions. First, addition of
    * favoredNodes that is a hint to where the namenode should place the file
    * blocks. The favored nodes hint is not persisted in HDFS. Hence it may be
@@ -637,12 +637,12 @@ public class DistributedFileSystem extends FileSystem
 
   /**
    * Similar to {@link #create(Path, FsPermission, EnumSet, int, short, long,
-   * Progressable, ChecksumOpt, InetSocketAddress[], String)}, it provides a
+   * Progressable, ChecksumOpt, InetSocketAddress[], String, String)}, it provides a
    * HDFS-specific version of {@link #createNonRecursive(Path, FsPermission,
    * EnumSet, int, short, long, Progressable)} with a few additions.
    *
    * @see #create(Path, FsPermission, EnumSet, int, short, long, Progressable,
-   * ChecksumOpt, InetSocketAddress[], String) for the descriptions of
+   * ChecksumOpt, InetSocketAddress[], String, String) for the descriptions of
    * additional parameters, i.e., favoredNodes, ecPolicyName and
    * storagePolicyName.
    */
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
index ad4cea6468d..fe87158c1cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
@@ -19,6 +19,9 @@ package org.apache.hadoop.hdfs.client;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.Path;
+
+import java.util.EnumSet;
 
 /**
  * CreateEncryptionZoneFlag is used in
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java
index e5df4893a91..ecaa97b9330 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java
@@ -131,7 +131,7 @@ public class RemoteMethod {
   /**
    * Get the represented java method.
    *
-   * @return Method
+   * @return {@link Method}
    * @throws IOException If the method cannot be found.
    */
   public Method getMethod() throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
index 624e574024c..a65120e3610 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
@@ -87,7 +87,7 @@ class AsyncLoggerSet {
   
   /**
    * @return the epoch number for this writer. This may only be called after
-   * a successful call to {@link #createNewUniqueEpoch(NamespaceInfo)}.
+   * a successful call to {@link QuorumJournalManager#createNewUniqueEpoch()}.
    */
   long getEpoch() {
     Preconditions.checkState(myEpoch != INVALID_EPOCH,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 782f2f36cc1..5cf13f698e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -1403,7 +1403,7 @@ public class DataStorage extends Storage {
   }
 
   /**
-   * Get the BlockPoolSliceStorage from {@link bpStorageMap}.
+   * Get the BlockPoolSliceStorage from {@link #bpStorageMap}.
    * If the object is not found, create a new object and put it to the map.
    */
   synchronized BlockPoolSliceStorage getBlockPoolSliceStorage(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
index f969c7ade28..af62835c4a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
@@ -44,14 +44,14 @@ import java.util.concurrent.TimeUnit;
 
 /**
  * An implementation of {@link AsyncChecker} that skips checking recently
- * checked objects. It will enforce at least {@link minMsBetweenChecks}
+ * checked objects. It will enforce at least {@link #minMsBetweenChecks}
  * milliseconds between two successive checks of any one object.
  *
  * It is assumed that the total number of Checkable objects in the system
  * is small, (not more than a few dozen) since the checker uses O(Checkables)
  * storage and also potentially O(Checkables) threads.
  *
- * {@link minMsBetweenChecks} should be configured reasonably
+ * {@link #minMsBetweenChecks} should be configured reasonably
  * by the caller to avoid spinning up too many threads frequently.
  */
 @InterfaceAudience.Private
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java
index 6bb2c7a8416..6f80780e140 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java
@@ -31,6 +31,7 @@ import java.util.concurrent.Future;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
+import java.util.concurrent.ExecutionException;
 
 /**
  * Implementation of {@code Futures#withTimeout}.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java
index 1103468d3c8..15bd9dec604 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java
@@ -184,7 +184,7 @@ public abstract class RamDiskReplicaTracker {
    * {@link org.apache.hadoop.hdfs.DFSConfigKeys#DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_KEY}.
    *
    * @param conf the configuration to be used
-   * @param dataset the FsDataset object.
+   * @param fsDataset the FsDataset object.
    * @return an instance of RamDiskReplicaTracker
    */
   static RamDiskReplicaTracker getInstance(final Configuration conf,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index c129d1928ab..8f7db1a06e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -152,8 +152,8 @@ class FSDirRenameOp {
    * @param srcIIP source path
    * @param dstIIP destination path
    * @return true INodesInPath if rename succeeds; null otherwise
-   * @deprecated See {@link #renameToInt(FSDirectory, String, String,
-   * boolean, Options.Rename...)}
+   * @deprecated See {@link #renameToInt(FSDirectory, FSPermissionChecker,
+   * String, String, boolean, Options.Rename...)}
    */
   @Deprecated
   static INodesInPath unprotectedRenameTo(FSDirectory fsd,
@@ -258,8 +258,8 @@ class FSDirRenameOp {
   }
 
   /**
-   * @see {@link #unprotectedRenameTo(FSDirectory, String, String, INodesInPath,
-   * INodesInPath, long, BlocksMapUpdateInfo, Options.Rename...)}
+   * @see {@link #unprotectedRenameTo(FSDirectory, INodesInPath, INodesInPath,
+   * long, BlocksMapUpdateInfo, Options.Rename...)}
    */
   static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc,
       String src, String dst, BlocksMapUpdateInfo collectedBlocks,
@@ -482,8 +482,8 @@ class FSDirRenameOp {
   }
 
   /**
-   * @deprecated Use {@link #renameToInt(FSDirectory, String, String,
-   * boolean, Options.Rename...)}
+   * @deprecated Use {@link #renameToInt(FSDirectory, FSPermissionChecker,
+   * String, String, boolean, Options.Rename...)}
    */
   @Deprecated
   private static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index f72ec7c9177..6921e204ae2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -123,7 +123,7 @@ public class EditLogTailer {
 
   /**
    * The timeout in milliseconds of calling rollEdits RPC to Active NN.
-   * @see HDFS-4176.
+   * See HDFS-4176.
    */
   private final long rollEditsTimeoutMs;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
index 21642da9c24..527d767b09a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
@@ -70,7 +70,7 @@ public class SnapshotFSImageFormat {
 
   /**
    * Save SnapshotDiff list for an INodeDirectoryWithSnapshot.
-   * @param sNode The directory that the SnapshotDiff list belongs to.
+   * @param diffs The directory that the SnapshotDiff list belongs to.
    * @param out The {@link DataOutput} to write.
    */
   private static <N extends INode, A extends INodeAttributes, D extends AbstractINodeDiff<N, A, D>>
@@ -186,7 +186,7 @@ public class SnapshotFSImageFormat {
    * @param createdList The created list associated with the deleted list in 
    *                    the same Diff.
    * @param in The {@link DataInput} to read.
-   * @param loader The {@link Loader} instance.
+   * @param loader The {@link FSImageFormat.Loader} instance.
    * @return The deleted list.
    */
   private static List<INode> loadDeletedList(INodeDirectory parent,
@@ -260,7 +260,7 @@ public class SnapshotFSImageFormat {
    * Load the snapshotINode field of {@link AbstractINodeDiff}.
    * @param snapshot The Snapshot associated with the {@link AbstractINodeDiff}.
    * @param in The {@link DataInput} to read.
-   * @param loader The {@link Loader} instance that this loading procedure is
+   * @param loader The {@link FSImageFormat.Loader} instance that this loading procedure is
    *               using.
    * @return The snapshotINode.
    */
@@ -281,7 +281,7 @@ public class SnapshotFSImageFormat {
    * Load {@link DirectoryDiff} from fsimage.
    * @param parent The directory that the SnapshotDiff belongs to.
    * @param in The {@link DataInput} instance to read.
-   * @param loader The {@link Loader} instance that this loading procedure is 
+   * @param loader The {@link FSImageFormat.Loader} instance that this loading procedure is
    *               using.
    * @return A {@link DirectoryDiff}.
    */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
index 5773d7fecf0..bd6c860ccf0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
@@ -85,7 +85,7 @@ import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_ERA
 /**
  * This class reads the protobuf-based fsimage and generates text output
  * for each inode to {@link PBImageTextWriter#out}. The sub-class can override
- * {@link getEntry()} to generate formatted string for each inode.
+ * {@link #getEntry(String, INode)} to generate formatted string for each inode.
  *
  * Since protobuf-based fsimage does not guarantee the order of inodes and
  * directories, PBImageTextWriter runs two-phase scans:
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java
index 2bc63ec77eb..77ec7890588 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java
@@ -68,7 +68,7 @@ public abstract class MD5FileUtils {
   /**
    * Read the md5 file stored alongside the given data file
    * and match the md5 file content.
-   * @param dataFile the file containing data
+   * @param md5File the file containing md5 data
    * @return a matcher with two matched groups
    *   where group(1) is the md5 string and group(2) is the data file path.
    */


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org