You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by aa...@apache.org on 2020/10/22 18:16:08 UTC

[hadoop] branch trunk updated: HADOOP-17175. [JDK 11] Fix javadoc errors in hadoop-common module. (#2397)

This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 6a9ceed  HADOOP-17175. [JDK 11] Fix javadoc errors in hadoop-common module. (#2397)
6a9ceed is described below

commit 6a9ceedfb3ee7c2f66a44083fb8e68cca508e207
Author: Akira Ajisaka <aa...@apache.org>
AuthorDate: Fri Oct 23 03:15:45 2020 +0900

    HADOOP-17175. [JDK 11] Fix javadoc errors in hadoop-common module. (#2397)
---
 hadoop-common-project/hadoop-common/pom.xml        |  1 -
 .../main/java/org/apache/hadoop/fs/FileUtil.java   |  2 +-
 .../java/org/apache/hadoop/fs/PartialListing.java  |  2 +-
 .../org/apache/hadoop/fs/impl/FutureIOSupport.java |  8 +-
 .../org/apache/hadoop/fs/viewfs/Constants.java     |  4 +-
 .../fs/viewfs/HCFSMountTableConfigLoader.java      |  2 +-
 .../fs/viewfs/ViewFileSystemOverloadScheme.java    | 95 ++++++++++++----------
 .../java/org/apache/hadoop/ipc/ProxyCombiner.java  |  4 +-
 .../hadoop/ipc/WeightedTimeCostProvider.java       |  4 +-
 .../hadoop/net/DomainNameResolverFactory.java      |  4 +-
 .../java/org/apache/hadoop/security/Groups.java    | 12 +--
 .../security/ssl/DelegatingSSLSocketFactory.java   | 23 +++---
 12 files changed, 87 insertions(+), 74 deletions(-)

diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index fc0927e..cc786e8 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -35,7 +35,6 @@
     <is.hadoop.common.component>true</is.hadoop.common.component>
     <wsce.config.dir>../etc/hadoop</wsce.config.dir>
     <wsce.config.file>wsce-site.xml</wsce.config.file>
-    <javadoc.skip.jdk11>true</javadoc.skip.jdk11>
   </properties>
 
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 73ca6e6..e078a2c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -1812,7 +1812,7 @@ public class FileUtil {
    * specified charset. This utility method opens the file for writing, creating
    * the file if it does not exist, or overwrites an existing file.
    *
-   * @param FileContext the file context with which to create the file
+   * @param fs the file context with which to create the file
    * @param path the path to the file
    * @param charseq the char sequence to write to the file
    * @param cs the charset to use for encoding
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java
index 80d173e..cec5d68 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java
@@ -30,7 +30,7 @@ import java.util.List;
  * A partial listing of the children of a parent directory. Since it is a
  * partial listing, multiple PartialListing may need to be combined to obtain
  * the full listing of a parent directory.
- * <p/>
+ * <p>
  * ListingBatch behaves similar to a Future, in that getting the result via
  * {@link #get()} will throw an Exception if there was a failure.
  */
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java
index f13d701..84ca94e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java
@@ -166,11 +166,11 @@ public final class FutureIOSupport {
    * Propagate options to any builder, converting everything with the
    * prefix to an option where, if there were 2+ dot-separated elements,
    * it is converted to a schema.
-   * <pre>
+   * <pre>{@code
    *   fs.example.s3a.option => s3a:option
    *   fs.example.fs.io.policy => s3a.io.policy
    *   fs.example.something => something
-   * </pre>
+   * }</pre>
    * @param builder builder to modify
    * @param conf configuration to read
    * @param optionalPrefix prefix for optional settings
@@ -196,11 +196,11 @@ public final class FutureIOSupport {
    * Propagate options to any builder, converting everything with the
    * prefix to an option where, if there were 2+ dot-separated elements,
    * it is converted to a schema.
-   * <pre>
+   * <pre>{@code
    *   fs.example.s3a.option => s3a:option
    *   fs.example.fs.io.policy => s3a.io.policy
    *   fs.example.something => something
-   * </pre>
+   * }</pre>
    * @param builder builder to modify
    * @param conf configuration to read
    * @param prefix prefix to scan/strip
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
index bf9f7db..5c27692 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
@@ -89,8 +89,8 @@ public interface Constants {
   /**
    * Config variable for specifying a regex link which uses regular expressions
    * as source and target could use group captured in src.
-   * E.g. (^/(?<firstDir>\\w+), /prefix-${firstDir}) =>
-   *   (/path1/file1 => /prefix-path1/file1)
+   * E.g. {@literal (^/(?<firstDir>\\w+), /prefix-${firstDir}) =>
+   *   (/path1/file1 => /prefix-path1/file1)}
    */
   String CONFIG_VIEWFS_LINK_REGEX = "linkRegex";
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/HCFSMountTableConfigLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/HCFSMountTableConfigLoader.java
index 3968e36..8dbb0f3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/HCFSMountTableConfigLoader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/HCFSMountTableConfigLoader.java
@@ -42,7 +42,7 @@ public class HCFSMountTableConfigLoader implements MountTableConfigLoader {
    * Loads the mount-table configuration from hadoop compatible file system and
    * add the configuration items to given configuration. Mount-table
    * configuration format should be suffixed with version number.
-   * Format: mount-table.<versionNumber>.xml
+   * Format: {@literal mount-table.<versionNumber>.xml}
    * Example: mount-table.1.xml
    * When user wants to update mount-table, the expectation is to upload new
    * mount-table configuration file with monotonically increasing integer as
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java
index 60d14d3..12877cc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java
@@ -33,73 +33,85 @@ import org.apache.hadoop.fs.UnsupportedFileSystemException;
 
 import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME;
 
-/******************************************************************************
- * This class is extended from the ViewFileSystem for the overloaded scheme
- * file system. Mount link configurations and in-memory mount table
- * building behaviors are inherited from ViewFileSystem. Unlike ViewFileSystem
- * scheme (viewfs://), the users would be able to use any scheme.
+/**
+ * <p> This class is extended from the ViewFileSystem for the overloaded
+ * scheme file system. Mount link configurations and in-memory mount table
+ * building behaviors are inherited from ViewFileSystem. Unlike
+ * ViewFileSystem scheme (viewfs://), the users would be able to use
+ * any scheme. </p>
  *
- * To use this class, the following configurations need to be added in
- * core-site.xml file.
- * 1) fs.<scheme>.impl
- *    = org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme
- * 2) fs.viewfs.overload.scheme.target.<scheme>.impl
- *    = <hadoop compatible file system implementation class name for the
- *    <scheme>"
+ * <p> To use this class, the following configurations need to be added in
+ * core-site.xml file. <br>
+ * 1) fs.{@literal <scheme>}.impl
+ *    = org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme <br>
+ * 2) fs.viewfs.overload.scheme.target.{@literal <scheme>}.impl
+ *    = {@literal <hadoop compatible file system implementation class name
+ *    for the <scheme>>} </p>
  *
- * Here <scheme> can be any scheme, but with that scheme there should be a
- * hadoop compatible file system available. Second configuration value should
- * be the respective scheme's file system implementation class.
+ * <p> Here {@literal <scheme>} can be any scheme, but with that scheme there
+ * should be a hadoop compatible file system available. Second configuration
+ * value should be the respective scheme's file system implementation class.
  * Example: if scheme is configured with "hdfs", then the 2nd configuration
  * class name will be org.apache.hadoop.hdfs.DistributedFileSystem.
  * if scheme is configured with "s3a", then the 2nd configuration class name
- * will be org.apache.hadoop.fs.s3a.S3AFileSystem.
+ * will be org.apache.hadoop.fs.s3a.S3AFileSystem. </p>
  *
- * Use Case 1:
- * ===========
+ * <p> Use Case 1: <br>
+ * =========== <br>
  * If users want some of their existing cluster (hdfs://Cluster)
  * data to mount with other hdfs and object store clusters(hdfs://NN1,
- * o3fs://bucket1.volume1/, s3a://bucket1/)
+ * o3fs://bucket1.volume1/, s3a://bucket1/) </p>
  *
- * fs.viewfs.mounttable.Cluster.link./user = hdfs://NN1/user
- * fs.viewfs.mounttable.Cluster.link./data = o3fs://bucket1.volume1/data
+ * <p>
+ * fs.viewfs.mounttable.Cluster.link./user = hdfs://NN1/user <br>
+ * fs.viewfs.mounttable.Cluster.link./data = o3fs://bucket1.volume1/data <br>
  * fs.viewfs.mounttable.Cluster.link./backup = s3a://bucket1/backup/
+ * </p>
  *
+ * <p>
  * Op1: Create file hdfs://Cluster/user/fileA will go to hdfs://NN1/user/fileA
+ * <br>
  * Op2: Create file hdfs://Cluster/data/datafile will go to
- *      o3fs://bucket1.volume1/data/datafile
+ *      o3fs://bucket1.volume1/data/datafile<br>
  * Op3: Create file hdfs://Cluster/backup/data.zip will go to
  *      s3a://bucket1/backup/data.zip
+ * </p>
  *
- * Use Case 2:
- * ===========
+ * <p> Use Case 2:<br>
+ * ===========<br>
  * If users want some of their existing cluster (s3a://bucketA/)
  * data to mount with other hdfs and object store clusters
- * (hdfs://NN1, o3fs://bucket1.volume1/)
+ * (hdfs://NN1, o3fs://bucket1.volume1/) </p>
  *
- * fs.viewfs.mounttable.bucketA.link./user = hdfs://NN1/user
- * fs.viewfs.mounttable.bucketA.link./data = o3fs://bucket1.volume1/data
+ * <p>
+ * fs.viewfs.mounttable.bucketA.link./user = hdfs://NN1/user<br>
+ * fs.viewfs.mounttable.bucketA.link./data = o3fs://bucket1.volume1/data<br>
  * fs.viewfs.mounttable.bucketA.link./salesDB = s3a://bucketA/salesDB/
+ * </p>
  *
+ * <p>
  * Op1: Create file s3a://bucketA/user/fileA will go to hdfs://NN1/user/fileA
+ * <br>
  * Op2: Create file s3a://bucketA/data/datafile will go to
- *      o3fs://bucket1.volume1/data/datafile
+ *      o3fs://bucket1.volume1/data/datafile<br>
  * Op3: Create file s3a://bucketA/salesDB/dbfile will go to
  *      s3a://bucketA/salesDB/dbfile
+ * </p>
  *
- * Note:
+ * <p> Note:<br>
  * (1) In ViewFileSystemOverloadScheme, by default the mount links will be
  * represented as non-symlinks. If you want to change this behavior, please see
- * {@link ViewFileSystem#listStatus(Path)}
+ * {@link ViewFileSystem#listStatus(Path)}<br>
  * (2) In ViewFileSystemOverloadScheme, only the initialized uri's hostname will
  * be considered as the mount table name. When the passed uri has hostname:port,
  * it will simply ignore the port number and only hostname will be considered as
- * the mount table name.
+ * the mount table name.<br>
  * (3) If there are no mount links configured with the initializing uri's
  * hostname as the mount table name, then it will automatically consider the
- * current uri as fallback( ex: fs.viewfs.mounttable.<mycluster>.linkFallback)
- * target fs uri.
- *****************************************************************************/
+ * current uri as fallback( ex:
+ * {@literal fs.viewfs.mounttable.<mycluster>.linkFallback}) target fs uri.
+ * </p>
+ */
 @InterfaceAudience.LimitedPrivate({ "MapReduce", "HBase", "Hive" })
 @InterfaceStability.Evolving
 public class ViewFileSystemOverloadScheme extends ViewFileSystem {
@@ -164,12 +176,13 @@ public class ViewFileSystemOverloadScheme extends ViewFileSystem {
   /**
    * This method is overridden because in ViewFileSystemOverloadScheme if
    * overloaded scheme matches with mounted target fs scheme, file system
-   * should be created without going into fs.<scheme>.impl based resolution.
-   * Otherwise it will end up in an infinite loop as the target will be
-   * resolved again to ViewFileSystemOverloadScheme as fs.<scheme>.impl points
-   * to ViewFileSystemOverloadScheme. So, below method will initialize the
-   * fs.viewfs.overload.scheme.target.<scheme>.impl. Other schemes can
-   * follow fs.newInstance
+   * should be created without going into {@literal fs.<scheme>.impl} based
+   * resolution. Otherwise it will end up in an infinite loop as the target
+   * will be resolved again to ViewFileSystemOverloadScheme as
+   * {@literal fs.<scheme>.impl} points to ViewFileSystemOverloadScheme.
+   * So, below method will initialize the
+   * {@literal fs.viewfs.overload.scheme.target.<scheme>.impl}.
+   * Other schemes can follow fs.newInstance
    */
   @Override
   protected FsGetter fsGetter() {
@@ -179,7 +192,7 @@ public class ViewFileSystemOverloadScheme extends ViewFileSystem {
   /**
    * This class checks whether the rooScheme is same as URI scheme. If both are
    * same, then it will initialize file systems by using the configured
-   * fs.viewfs.overload.scheme.target.<scheme>.impl class.
+   * {@literal fs.viewfs.overload.scheme.target.<scheme>.impl} class.
    */
   static class ChildFsGetter extends FsGetter {
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProxyCombiner.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProxyCombiner.java
index 835d806..b7188b7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProxyCombiner.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProxyCombiner.java
@@ -50,8 +50,8 @@ public final class ProxyCombiner {
    * all of the methods of the combined proxy interface, delegating calls
    * to which proxy implements that method. If multiple proxies implement the
    * same method, the first in the list will be used for delegation.
-   *
-   * <p/>This will check that every method on the combined interface is
+   * <p>
+   * This will check that every method on the combined interface is
    * implemented by at least one of the supplied proxy objects.
    *
    * @param combinedProxyInterface The interface of the combined proxy.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedTimeCostProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedTimeCostProvider.java
index 4304b24..1ecd19b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedTimeCostProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedTimeCostProvider.java
@@ -29,8 +29,8 @@ import static org.apache.hadoop.ipc.ProcessingDetails.Timing;
  * {@link ProcessingDetails}). This can be used by specifying the
  * {@link org.apache.hadoop.fs.CommonConfigurationKeys#IPC_COST_PROVIDER_KEY}
  * configuration key.
- *
- * <p/>This allows for configuration of how heavily each of the operations
+ * <p>
+ * This allows for configuration of how heavily each of the operations
  * within {@link ProcessingDetails} is weighted. By default,
  * {@link ProcessingDetails.Timing#LOCKFREE},
  * {@link ProcessingDetails.Timing#RESPONSE}, and
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DomainNameResolverFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DomainNameResolverFactory.java
index a0b0380..fdb45dd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DomainNameResolverFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DomainNameResolverFactory.java
@@ -22,7 +22,6 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.ReflectionUtils;
 
-import java.io.IOException;
 import java.net.URI;
 
 /**
@@ -49,7 +48,7 @@ public final class DomainNameResolverFactory {
    * @return Domain name resolver.
    */
   public static DomainNameResolver newInstance(
-      Configuration conf, URI uri, String configKey) throws IOException {
+      Configuration conf, URI uri, String configKey) {
     String host = uri.getHost();
     String confKeyWithHost = configKey + "." + host;
     return newInstance(conf, confKeyWithHost);
@@ -61,7 +60,6 @@ public final class DomainNameResolverFactory {
    * @param conf Configuration
    * @param configKey config key name.
    * @return Domain name resolver.
-   * @throws IOException when the class cannot be found or initiated.
    */
   public static DomainNameResolver newInstance(
       Configuration conf, String configKey) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
index 406d0d0..47dca6c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
@@ -201,10 +201,10 @@ public class Groups {
   /**
    * Get the group memberships of a given user.
    * If the user's group is not cached, this method may block.
-   * Note this method can be expensive as it involves Set->List conversion.
-   * For user with large group membership (i.e., > 1000 groups), we recommend
-   * using getGroupSet to avoid the conversion and fast membership look up via
-   * contains().
+   * Note this method can be expensive as it involves Set {@literal ->} List
+   * conversion. For user with large group membership
+   * (i.e., {@literal >} 1000 groups), we recommend using getGroupSet
+   * to avoid the conversion and fast membership look up via contains().
    * @param user User's name
    * @return the group memberships of the user as list
    * @throws IOException if user does not exist
@@ -220,7 +220,9 @@ public class Groups {
    * Get the group memberships of a given user.
    * If the user's group is not cached, this method may block.
    * This provide better performance when user has large group membership via
-   * 1) avoid set->list->set conversion for the caller UGI/PermissionCheck
+   * <br>
+   * 1) avoid {@literal set->list->set} conversion for the caller
+   * UGI/PermissionCheck <br>
    * 2) fast lookup using contains() via Set instead of List
    * @param user User's name
    * @return the group memberships of the user as set
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java
index 9d7afa9..5644234 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java
@@ -43,19 +43,20 @@ import org.slf4j.LoggerFactory;
  *
  * <p>
  *   The factory has several different modes of operation:
- *   <ul>
- *     <li>OpenSSL: Uses the wildly-openssl library to delegate to the
- *     system installed OpenSSL. If the wildfly-openssl integration is not
- *     properly setup, an exception is thrown.</li>
- *     <li>Default: Attempts to use the OpenSSL mode, if it cannot load the
- *     necessary libraries, it falls back to the Default_JSEE mode.</li>
- *     <li>Default_JSSE: Delegates to the JSSE implementation of SSL, but
- *     it disables the GCM cipher when running on Java 8.</li>
- *     <li>Default_JSSE_with_GCM: Delegates to the JSSE implementation of
- *     SSL with no modification to the list of enabled ciphers.</li>
- *   </ul>
  * </p>
  *
+ * <ul>
+ *   <li>OpenSSL: Uses the wildly-openssl library to delegate to the
+ *   system installed OpenSSL. If the wildfly-openssl integration is not
+ *   properly setup, an exception is thrown.</li>
+ *   <li>Default: Attempts to use the OpenSSL mode, if it cannot load the
+ *   necessary libraries, it falls back to the Default_JSEE mode.</li>
+ *   <li>Default_JSSE: Delegates to the JSSE implementation of SSL, but
+ *   it disables the GCM cipher when running on Java 8.</li>
+ *   <li>Default_JSSE_with_GCM: Delegates to the JSSE implementation of
+ *   SSL with no modification to the list of enabled ciphers.</li>
+ * </ul>
+ *
  * In order to load OpenSSL, applications must ensure the wildfly-openssl
  * artifact is on the classpath. Currently, only ABFS declares
  * wildfly-openssl as an explicit dependency.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org