You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by cg...@apache.org on 2021/10/08 12:28:07 UTC

[drill] branch master updated: DRILL-7993: LGTM Code Cleanup-Fixed Javadoc param tags and type conversions (#2310)

This is an automated email from the ASF dual-hosted git repository.

cgivre pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git


The following commit(s) were added to refs/heads/master by this push:
     new 317f164  DRILL-7993: LGTM Code Cleanup-Fixed Javadoc param tags and type conversions (#2310)
317f164 is described below

commit 317f164791bbbe8f937eb452b49e92c34f1c0333
Author: estherbuchwalter <85...@users.noreply.github.com>
AuthorDate: Fri Oct 8 08:27:55 2021 -0400

    DRILL-7993: LGTM Code Cleanup-Fixed Javadoc param tags and type conversions (#2310)
    
    * fixed some @param tags
    
     and type conversions
    
    * added descriptions for javadoc param tags
    
    * added descriptions for javadoc param tags
    
    * eliminated errors for unknown links in javadocs
    
    * added descriptions for Javadoc param and return tags
    
    * added descriptions for Javadoc param tags
    
    * added descriptions for Javadoc param and return tags
    
    * added descriptions for Javadoc param and return tags, fixed broken links
---
 .../org/apache/drill/common/HistoricalLog.java     | 12 ++---
 .../store/mapr/db/json/JsonTableGroupScan.java     | 21 ++++----
 .../yarn/appMaster/DrillControllerFactory.java     | 16 +++---
 .../org/apache/drill/yarn/client/StopCommand.java  |  4 +-
 .../java/org/apache/drill/yarn/core/DfsFacade.java | 14 ++---
 .../apache/drill/yarn/core/DrillOnYarnConfig.java  | 17 +++---
 .../drill/exec/physical/base/DbGroupScan.java      |  4 +-
 .../drill/exec/physical/base/IndexGroupScan.java   | 11 ++--
 .../RangePartitionRecordBatch.java                 |  7 ++-
 .../impl/scan/framework/SchemaNegotiatorImpl.java  |  1 -
 .../impl/scan/project/ConstantColumnLoader.java    |  3 +-
 .../scan/project/ExplicitSchemaProjection.java     | 10 +++-
 .../impl/scan/project/ScanLevelProjection.java     | 13 +++--
 .../scan/v3/lifecycle/SchemaNegotiatorImpl.java    |  2 -
 .../physical/resultSet/impl/BuildFromSchema.java   |  6 ++-
 .../physical/resultSet/impl/ColumnBuilder.java     | 14 ++---
 .../resultSet/impl/ResultSetOptionBuilder.java     |  2 +-
 .../physical/resultSet/impl/SingleVectorState.java |  4 +-
 .../drill/exec/planner/common/DrillRelOptUtil.java | 42 +++++++--------
 .../drill/exec/planner/index/IndexPlanUtils.java   | 60 ++++++++++++----------
 .../drill/exec/store/ClassicConnectorLocator.java  |  1 -
 .../org/apache/drill/exec/store/PluginHandle.java  |  1 -
 .../drill/exec/store/SystemPluginLocator.java      |  4 --
 .../drill/exec/store/base/filter/ExprNode.java     |  3 +-
 .../store/base/filter/FilterPushDownListener.java  | 16 ++----
 .../exec/store/easy/json/JSONRecordReader.java     | 21 ++++----
 .../exec/store/easy/json/parser/ObjectParser.java  |  7 ++-
 .../easy/text/reader/CompliantTextBatchReader.java |  9 ++--
 .../drill/exec/store/parquet/FooterGatherer.java   | 15 ++++--
 .../store/parquet/columnreaders/ColumnReader.java  | 37 ++++++++++---
 .../columnreaders/VarLenAbstractEntryReader.java   |  5 +-
 .../columnreaders/VarLenColumnBulkInput.java       | 13 +++--
 .../columnreaders/VarLenOverflowReader.java        |  6 +--
 .../batchsizing/BatchSizingMemoryUtil.java         |  8 +--
 .../drill/exec/util/record/RecordBatchStats.java   |  9 ++--
 .../apache/drill/exec/work/filter/BloomFilter.java |  6 +--
 .../drill/exec/work/foreman/rm/QueryQueue.java     |  6 +--
 .../work/foreman/rm/QueryResourceAllocator.java    |  5 +-
 .../vector/NullableVectorDefinitionSetter.java     |  2 +-
 .../apache/drill/exec/vector/VarLenBulkInput.java  |  3 +-
 .../vector/accessor/writer/UnionWriterImpl.java    |  4 +-
 .../org/apache/drill/common/graph/GraphAlgos.java  |  7 ++-
 42 files changed, 238 insertions(+), 213 deletions(-)

diff --git a/common/src/main/java/org/apache/drill/common/HistoricalLog.java b/common/src/main/java/org/apache/drill/common/HistoricalLog.java
index 08e5e7b..cec6d19 100644
--- a/common/src/main/java/org/apache/drill/common/HistoricalLog.java
+++ b/common/src/main/java/org/apache/drill/common/HistoricalLog.java
@@ -108,6 +108,7 @@ public class HistoricalLog {
    * events with their stack traces.
    *
    * @param sb {@link StringBuilder} to write to
+   * @param includeStackTrace true if history includes stack trace, otherwise false
    */
   public void buildHistory(final StringBuilder sb, boolean includeStackTrace) {
     buildHistory(sb, 0, includeStackTrace);
@@ -119,15 +120,8 @@ public class HistoricalLog {
    * events with their stack traces.
    *
    * @param sb {@link StringBuilder} to write to
-   * @param additional an extra string that will be written between the identifying
-   *     information and the history; often used for a current piece of state
-   */
-
-  /**
-   *
-   * @param sb
-   * @param indexLevel
-   * @param includeStackTrace
+   * @param indent size of indent
+   * @param includeStackTrace true if history includes stack trace, otherwise false
    */
   public synchronized void buildHistory(final StringBuilder sb, int indent, boolean includeStackTrace) {
     final char[] indentation = new char[indent];
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/JsonTableGroupScan.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/JsonTableGroupScan.java
index b93a62e..78c2012 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/JsonTableGroupScan.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/JsonTableGroupScan.java
@@ -490,7 +490,7 @@ public class JsonTableGroupScan extends MapRDBGroupScan implements IndexGroupSca
   /**
    * Get the estimated average rowsize. DO NOT call this API directly.
    * Call the stats API instead which modifies the counts based on preference options.
-   * @param index, to use for generating the estimate
+   * @param index to use for generating the estimate
    * @return row count post filtering
    */
   public MapRDBStatisticsPayload getAverageRowSizeStats(IndexDescriptor index) {
@@ -524,8 +524,9 @@ public class JsonTableGroupScan extends MapRDBGroupScan implements IndexGroupSca
   /**
    * Get the estimated statistics after applying the {@link RexNode} condition. DO NOT call this API directly.
    * Call the stats API instead which modifies the counts based on preference options.
-   * @param condition, filter to apply
-   * @param index, to use for generating the estimate
+   * @param condition filter to apply
+   * @param index to use for generating the estimate
+   * @param scanRel the current scan rel
    * @return row count post filtering
    */
   public MapRDBStatisticsPayload getFirstKeyEstimatedStats(QueryCondition condition, IndexDescriptor index, RelNode scanRel) {
@@ -538,8 +539,9 @@ public class JsonTableGroupScan extends MapRDBGroupScan implements IndexGroupSca
 
   /**
    * Get the estimated statistics after applying the {@link QueryCondition} condition
-   * @param condition, filter to apply
-   * @param index, to use for generating the estimate
+   * @param condition filter to apply
+   * @param index to use for generating the estimate
+   * @param scanRel the current scan rel
    * @return {@link MapRDBStatisticsPayload} statistics
    */
   private MapRDBStatisticsPayload getFirstKeyEstimatedStatsInternal(QueryCondition condition, IndexDesc index, RelNode scanRel) {
@@ -611,9 +613,9 @@ public class JsonTableGroupScan extends MapRDBGroupScan implements IndexGroupSca
   /**
    * Set the row count resulting from applying the {@link RexNode} condition. Forced row counts will take
    * precedence over stats row counts
-   * @param condition
-   * @param count
-   * @param capRowCount
+   * @param condition filter to apply
+   * @param count row count
+   * @param capRowCount row count limit
    */
   @Override
   @JsonIgnore
@@ -630,7 +632,8 @@ public class JsonTableGroupScan extends MapRDBGroupScan implements IndexGroupSca
 
   /**
    * Get the row count after applying the {@link RexNode} condition
-   * @param condition, filter to apply
+   * @param condition filter to apply
+   * @param scanRel the current scan rel
    * @return row count post filtering
    */
   @Override
diff --git a/drill-yarn/src/main/java/org/apache/drill/yarn/appMaster/DrillControllerFactory.java b/drill-yarn/src/main/java/org/apache/drill/yarn/appMaster/DrillControllerFactory.java
index 013fdba..c882508 100644
--- a/drill-yarn/src/main/java/org/apache/drill/yarn/appMaster/DrillControllerFactory.java
+++ b/drill-yarn/src/main/java/org/apache/drill/yarn/appMaster/DrillControllerFactory.java
@@ -128,7 +128,7 @@ public class DrillControllerFactory implements ControllerFactory {
    * download ("localize") for the Drillbit. We need both the Drill software and
    * the user's site-specific configuration.
    *
-   * @return
+   * @return resources
    * @throws YarnFacadeException
    */
 
@@ -185,8 +185,8 @@ public class DrillControllerFactory implements ControllerFactory {
    * This class is very Linux-specific. The usual adjustments must be made to
    * adapt it to Windows.
    *
-   * @param config
-   * @return
+   * @param resources the means to set up the required environment variables
+   * @return task specification
    * @throws DoyConfigException
    */
 
@@ -322,9 +322,9 @@ public class DrillControllerFactory implements ControllerFactory {
    * specification if a given Drill-on-YARN configuration variable is set,
    * copying the config value to the environment variable.
    *
-   * @param spec
-   * @param configParam
-   * @param envVar
+   * @param spec launch specification
+   * @param configParam config value
+   * @param envVar environment variable
    */
 
   public void addIfSet(LaunchSpec spec, String configParam, String envVar) {
@@ -357,8 +357,8 @@ public class DrillControllerFactory implements ControllerFactory {
    * Compared to the Drill version, this one takes its parameters via a builder
    * pattern in the form of the cluster coordinator driver.
    *
-   * @param config
-   * @param dispatcher
+   * @param config used to build a Drill-on-YARN configuration
+   * @param dispatcher dispatches different events to the cluster controller
    */
 
   private void buildZooKeeper(Config config, Dispatcher dispatcher) {
diff --git a/drill-yarn/src/main/java/org/apache/drill/yarn/client/StopCommand.java b/drill-yarn/src/main/java/org/apache/drill/yarn/client/StopCommand.java
index 95f7bf3..e515fd5 100644
--- a/drill-yarn/src/main/java/org/apache/drill/yarn/client/StopCommand.java
+++ b/drill-yarn/src/main/java/org/apache/drill/yarn/client/StopCommand.java
@@ -174,8 +174,8 @@ public class StopCommand extends ClientCommand {
    * Include the master key with the request to differentiate this request from
    * accidental uses of the stop REST API.
    *
-   * @param report
-   * @return
+   * @param baseUrl base url to be posted with the shutdown
+   * @return true if stops gracefully, otherwise false
    */
 
   private boolean gracefulStop(String baseUrl) {
diff --git a/drill-yarn/src/main/java/org/apache/drill/yarn/core/DfsFacade.java b/drill-yarn/src/main/java/org/apache/drill/yarn/core/DfsFacade.java
index 09e88ae..f5316eb 100644
--- a/drill-yarn/src/main/java/org/apache/drill/yarn/core/DfsFacade.java
+++ b/drill-yarn/src/main/java/org/apache/drill/yarn/core/DfsFacade.java
@@ -163,7 +163,7 @@ public class DfsFacade {
      * only retrieve the status once. Cache it here so that the client
      * doen't have to do the caching.
      *
-     * @return
+     * @return file status
      * @throws DfsFacadeException
      */
 
@@ -286,17 +286,17 @@ public class DfsFacade {
    * must be localized onto the remote node prior to running a command on that
    * node.
    * <p>
-   * YARN uses the size and timestamp are used to check if the file has changed
-   * on HDFS to check if YARN can use an existing copy, if any.
+   * YARN uses the size and timestamp to check if the file has changed
+   * on HDFS and to check if YARN can use an existing copy, if any.
    * <p>
    * Resources are made public.
    *
-   * @param conf
-   *          Configuration created from the Hadoop config files, in this case,
-   *          identifies the target file system.
-   * @param resourcePath
+   * @param dfsPath
    *          the path (relative or absolute) to the file on the configured file
    *          system (usually HDFS).
+   * @param dfsFileStatus the file status of the configured file system
+   * @param type local resource type (archive, file, or pattern)
+   * @param visibility local resource visibility (public, private, or application)
    * @return a YARN local resource records that contains information about path,
    *         size, type, resource and so on that YARN requires.
    * @throws IOException
diff --git a/drill-yarn/src/main/java/org/apache/drill/yarn/core/DrillOnYarnConfig.java b/drill-yarn/src/main/java/org/apache/drill/yarn/core/DrillOnYarnConfig.java
index cfc4605..283591f 100644
--- a/drill-yarn/src/main/java/org/apache/drill/yarn/core/DrillOnYarnConfig.java
+++ b/drill-yarn/src/main/java/org/apache/drill/yarn/core/DrillOnYarnConfig.java
@@ -296,7 +296,7 @@ public class DrillOnYarnConfig {
    * initialization. Not needed by the client, so done in an unsynchronized,
    * lazy fashion.
    *
-   * @return
+   * @return Drill's class path scan
    */
 
   public ScanResult getClassPathScan() {
@@ -349,8 +349,8 @@ public class DrillOnYarnConfig {
    * immutable, so it is not possible for unit tests to change the actual
    * environment.)
    *
-   * @param key
-   * @return
+   * @param key key to allow unit tests to replace this function
+   * @return environment variable
    */
 
   protected String getEnv(String key) {
@@ -466,7 +466,7 @@ public class DrillOnYarnConfig {
    * Return the Drill home on this machine as inferred from the config file
    * contents or location.
    *
-   * @return
+   * @return Drill home
    */
 
   public File getLocalDrillHome() {
@@ -747,7 +747,7 @@ public class DrillOnYarnConfig {
    * $PWD/site-key/drill-override.conf, where site-key is the key name used to
    * localize the site archive.
    *
-   * @return
+   * @return remote site directory name
    */
 
   public String getRemoteSiteDir() {
@@ -786,8 +786,7 @@ public class DrillOnYarnConfig {
    * root and/or cluster ID (just not the same combination), so the file name
    * contains both parts.
    *
-   * @param clusterId
-   * @return
+   * @return local app id file
    */
 
   public File getLocalAppIdFile() {
@@ -818,7 +817,7 @@ public class DrillOnYarnConfig {
    * only if the application is localized, it is not set for a non-localized
    * run.
    *
-   * @return
+   * @return the DFS path to the localized Drill archive
    */
 
   public String getDrillArchiveDfsPath() {
@@ -832,7 +831,7 @@ public class DrillOnYarnConfig {
    * does not use a site archive (configuration files reside in
    * $DRILL_HOME/conf), or the application is not localized.
    *
-   * @return
+   * @return the DFS path to the localized site archive
    */
 
   public String getSiteArchiveDfsPath() {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/DbGroupScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/DbGroupScan.java
index 497b88b..23c9ee3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/DbGroupScan.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/DbGroupScan.java
@@ -58,8 +58,8 @@ public interface DbGroupScan extends GroupScan {
   /**
    * Get the row count after applying the {@link RexNode} condition
    *
-   * @param condition, filter to apply
-   * @param scanRel, the current scan rel
+   * @param condition filter to apply
+   * @param scanRel the current scan rel
    * @return row count post filtering
    */
   @JsonIgnore
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/IndexGroupScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/IndexGroupScan.java
index a83304f..365e5ac 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/IndexGroupScan.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/IndexGroupScan.java
@@ -42,16 +42,17 @@ public interface IndexGroupScan extends GroupScan {
   /**
    * Set the artificial row count after applying the {@link RexNode} condition
    * Mainly used for debugging
-   * @param condition
-   * @param count
-   * @param capRowCount
+   * @param condition filter to apply
+   * @param count right index row count
+   * @param capRowCount row count limit
    */
   @JsonIgnore
   void setRowCount(RexNode condition, double count, double capRowCount);
 
   /**
    * Get the row count after applying the {@link RexNode} condition
-   * @param condition, filter to apply
+   * @param condition filter to apply
+   * @param scanRel the current scan rel
    * @return row count post filtering
    */
   @JsonIgnore
@@ -72,4 +73,4 @@ public interface IndexGroupScan extends GroupScan {
 
   @JsonIgnore
   void setParallelizationWidth(int width);
-}
\ No newline at end of file
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/rangepartitioner/RangePartitionRecordBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/rangepartitioner/RangePartitionRecordBatch.java
index 5e859f7..6d8f2a6 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/rangepartitioner/RangePartitionRecordBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/rangepartitioner/RangePartitionRecordBatch.java
@@ -97,7 +97,6 @@ public class RangePartitionRecordBatch extends AbstractSingleRecordBatch<RangePa
    * Sets up projection that will transfer all of the columns in batch, and also setup
    * the partition column based on which partition a record falls into
    *
-   * @param batch
    * @throws SchemaChangeException
    * @return True if the new schema differs from old schema, False otherwise
    */
@@ -124,7 +123,7 @@ public class RangePartitionRecordBatch extends AbstractSingleRecordBatch<RangePa
   /**
    *  Provide the partition function with the appropriate value vector(s) that
    *  are involved in the range partitioning
-   *  @param batch
+   *  @param batch batch of columns
    */
   private void setupPartitionCols(VectorAccessible batch) {
     List<VectorWrapper<?>> partitionCols = Lists.newArrayList();
@@ -156,8 +155,8 @@ public class RangePartitionRecordBatch extends AbstractSingleRecordBatch<RangePa
    * For each incoming record, get the partition id it belongs to by invoking the
    * partitioning function. Set this id in the output partitionIdVector.  For all other
    * incoming value vectors, just do a transfer.
-   * @param recordCount
-   * @param firstOutputIndex
+   * @param recordCount number of incoming records
+   * @param firstOutputIndex the index of the first output
    * @return the number of records projected
    */
   private final int projectRecords(int recordCount, int firstOutputIndex) {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/framework/SchemaNegotiatorImpl.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/framework/SchemaNegotiatorImpl.java
index 64bac43..653df53 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/framework/SchemaNegotiatorImpl.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/framework/SchemaNegotiatorImpl.java
@@ -143,7 +143,6 @@ public class SchemaNegotiatorImpl implements SchemaNegotiator {
    * both the table and scan operator. Returns the result set loader to be used
    * by the reader to write to the table's value vectors.
    *
-   * @param schemaNegotiator builder given to the reader to provide it's
    * schema information
    * @return the result set loader to be used by the reader
    */
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/project/ConstantColumnLoader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/project/ConstantColumnLoader.java
index ec0a65f..2a89a75 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/project/ConstantColumnLoader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/project/ConstantColumnLoader.java
@@ -87,8 +87,7 @@ public class ConstantColumnLoader extends StaticColumnLoader {
   /**
    * Populate static vectors with the defined static values.
    *
-   * @param rowCount number of rows to generate. Must match the
-   * row count in the batch returned by the reader
+   * @param writer writer for a tuple
    */
 
   private void loadRow(TupleWriter writer) {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/project/ExplicitSchemaProjection.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/project/ExplicitSchemaProjection.java
index fc91b38..62bd729 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/project/ExplicitSchemaProjection.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/project/ExplicitSchemaProjection.java
@@ -279,6 +279,11 @@ public class ExplicitSchemaProjection extends ReaderLevelProjection {
    * column reference ("a", say) and an implied map reference ("a.b", say.)
    * If the column appears to be a map, determine the set of children, which
    * map appear to any depth, that were requested.
+   *
+   * @param outputTuple
+   *           projected tuple being built
+   * @param requestedCol
+   *           column as requested in the project list
    */
 
   private void resolveNullColumn(ResolvedTuple outputTuple,
@@ -296,7 +301,8 @@ public class ExplicitSchemaProjection extends ReaderLevelProjection {
    * A child column of a map is not projected. Recurse to determine the full
    * set of nullable child columns.
    *
-   * @param projectedColumn the map column which was projected
+   * @param outputTuple projected tuple being built
+   * @param col the map column which was projected
    * @return a list of null markers for the requested children
    */
 
@@ -312,4 +318,4 @@ public class ExplicitSchemaProjection extends ReaderLevelProjection {
     }
     return mapCol;
   }
-}
\ No newline at end of file
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/project/ScanLevelProjection.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/project/ScanLevelProjection.java
index fc3b38d..9c57918 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/project/ScanLevelProjection.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/project/ScanLevelProjection.java
@@ -132,7 +132,7 @@ import org.apache.drill.shaded.guava.com.google.common.annotations.VisibleForTes
  * output schema, the reader, or  as nulls.</li>
  * </ul>
  * <p>
- * @see {@link ImplicitColumnExplorer}, the class from which this class
+ * @see {@link org.apache.drill.exec.store.ColumnExplorer}, the class from which this class
  * evolved
  */
 public class ScanLevelProjection {
@@ -183,11 +183,10 @@ public class ScanLevelProjection {
     SCHEMA_WILDCARD,
 
     /**
-     * Wldcard query expanded using an output schema in "strict" mode.
-     * Only columns from the output schema will be projected. Unlike the
-     * {@link SCHEMA_WILDCARD} mode, if a reader offers columns not in the
-     * output schema, they will be ignored. That is, a SELECT * query expands
-     * to exactly the columns in the schema.
+     * Wildcard query expanded using an output schema in "strict" mode.
+     * Only columns from the output schema will be projected. If a reader
+     * offers columns not in the output schema, they will be ignored. That
+     * is, a SELECT * query expands to exactly the columns in the schema.
      * <p>
      * TODO: Provide a strict column mode that will fail the query if a projected
      * column is required, has no default, and is not provided by the reader. In
@@ -233,7 +232,7 @@ public class ScanLevelProjection {
      * comes from the query planner, assumes that the planner has checked
      * the list for syntax and uniqueness.
      *
-     * @param queryCols list of columns in the SELECT list in SELECT list order
+     * @param projectionList list of columns in the SELECT list in SELECT list order
      * @return this builder
      */
     public Builder projection(List<SchemaPath> projectionList) {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/v3/lifecycle/SchemaNegotiatorImpl.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/v3/lifecycle/SchemaNegotiatorImpl.java
index 7b94cc4..24ad466 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/v3/lifecycle/SchemaNegotiatorImpl.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/v3/lifecycle/SchemaNegotiatorImpl.java
@@ -141,8 +141,6 @@ public class SchemaNegotiatorImpl implements SchemaNegotiator {
    * both the table and scan operator. Returns the result set loader to be used
    * by the reader to write to the table's value vectors.
    *
-   * @param schemaNegotiator builder given to the reader to provide it's
-   * schema information
    * @return the result set loader to be used by the reader
    */
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/BuildFromSchema.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/BuildFromSchema.java
index 386195a..84f2826 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/BuildFromSchema.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/BuildFromSchema.java
@@ -152,6 +152,10 @@ public class BuildFromSchema {
   /**
    * Build a column recursively. Called internally when adding a column
    * via the addColumn() method on the tuple writer.
+   *
+   * @param state the loader state for the tuple, a row or a map
+   * @param colSchema the schema of the column to add
+   * @return the object writer for the added column
    */
 
   public ObjectWriter buildColumn(TupleState state, ColumnMetadata colSchema) {
@@ -264,7 +268,7 @@ public class BuildFromSchema {
    * it may have may layers of other repeated lists before we get to the element
    * (inner-most) array.
    *
-   * @param writer tuple writer for the tuple that holds the array
+   * @param parent tuple writer for the tuple that holds the array
    * @param colSchema schema definition of the array
    */
 
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/ColumnBuilder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/ColumnBuilder.java
index 291888d..2731d3f 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/ColumnBuilder.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/ColumnBuilder.java
@@ -94,7 +94,7 @@ public class ColumnBuilder {
    * Implementation of the work to add a new column to this tuple given a
    * schema description of the column.
    *
-   * @param parent container
+   * @param parent container of vectors
    * @param columnSchema schema of the column as provided by the client
    * using the result set loader. This is the schema of the data to be
    * loaded
@@ -123,7 +123,7 @@ public class ColumnBuilder {
    * and manages the column.
    *
    * @param parent schema of the new primitive column
-   * @param colProj implied projection type for the column
+   * @param columnSchema implied projection type for the column
    * @return column state for the new column
    */
   private ColumnState buildPrimitive(ContainerState parent, ColumnMetadata columnSchema) {
@@ -189,7 +189,7 @@ public class ColumnBuilder {
    * map vector (or vector container) until harvest time.
    *
    * @param parent description of the map column
-   * @param colProj implied projection type for the column
+   * @param columnSchema implied projection type for the column
    * @return column state for the map column
    */
   private ColumnState buildMap(ContainerState parent, ColumnMetadata columnSchema) {
@@ -309,8 +309,8 @@ public class ColumnBuilder {
    * in a join column, say.) Still, Drill supports unions, so the code here
    * does so. Unions are fully tested in the row set writer mechanism.
    *
-   * @param parent container
-   * @param colProj column schema
+   * @param parent container of vectors
+   * @param columnSchema implied projection type for the column
    * @return column
    */
   private ColumnState buildUnion(ContainerState parent, ColumnMetadata columnSchema) {
@@ -368,7 +368,7 @@ public class ColumnBuilder {
    * not support the {@code ListVector</tt> type.
    *
    * @param parent the parent (tuple, union or list) that holds this list
-   * @param colProj metadata description of the list which must contain
+   * @param columnSchema metadata description of the list which must contain
    * exactly one subtype
    * @return the column state for the list
    */
@@ -418,7 +418,7 @@ public class ColumnBuilder {
    * not support the {@code ListVector} type.
    *
    * @param parent the parent (tuple, union or list) that holds this list
-   * @param colProj metadata description of the list (must be empty of
+   * @param columnSchema description of the list (must be empty of
    * subtypes)
    * @return the column state for the list
    */
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/ResultSetOptionBuilder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/ResultSetOptionBuilder.java
index 3a9c8cb..a4e75b2 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/ResultSetOptionBuilder.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/ResultSetOptionBuilder.java
@@ -96,7 +96,7 @@ public class ResultSetOptionBuilder {
    * operation. Leave this method unset to start with an empty schema.</li>
    * <li>A combination of the above.</li>
    * </ul>
-   * @param schema the initial schema for the loader
+   * @param readerSchema the initial schema for the loader
    * @return this builder
    */
   public ResultSetOptionBuilder readerSchema(TupleMetadata readerSchema) {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/SingleVectorState.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/SingleVectorState.java
index 033e427..83c71f5 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/SingleVectorState.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/SingleVectorState.java
@@ -258,10 +258,12 @@ public abstract class SingleVectorState implements VectorState {
    * row, or for some previous row, depending on exactly when and where the
    * overflow occurs.
    *
-   * @param sourceStartIndex the index of the row that caused the overflow, the
+   * sourceStartIndex: the index of the row that caused the overflow, the
    * values of which should be copied to a new "look-ahead" vector. If the
    * vector is an array, then the overflowIndex is the position of the first
    * element to be moved, and multiple elements may need to move
+   *
+   * @param cardinality the number of unique columns in the row
    */
 
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/common/DrillRelOptUtil.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/common/DrillRelOptUtil.java
index 42d7a14..ec9bf4e 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/common/DrillRelOptUtil.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/common/DrillRelOptUtil.java
@@ -132,7 +132,7 @@ public abstract class DrillRelOptUtil {
    * underlying expression, but the fields have different names.
    *
    *
-   * @param rel        Relational expression
+   * @param rel Relational expression
    * @param fieldNames Field names
    * @return Renamed relational expression
    */
@@ -171,8 +171,8 @@ public abstract class DrillRelOptUtil {
 
   /** Returns a rowType having all unique field name.
    *
-   * @param rowType : input rowType
-   * @param typeFactory : type factory used to create a new row type.
+   * @param rowType input rowType
+   * @param typeFactory type factory used to create a new row type.
    * @return a rowType having all unique field name.
    */
   public static RelDataType uniqifyFieldName(final RelDataType rowType, final RelDataTypeFactory typeFactory) {
@@ -215,10 +215,10 @@ public abstract class DrillRelOptUtil {
    * Travesal RexNode to find at least one operator in the given collection. Continue search if RexNode has a
    * RexInputRef which refers to a RexNode in project expressions.
    *
-   * @param node : RexNode to search
-   * @param projExprs : the list of project expressions. Empty list means there is No project operator underneath.
+   * @param node RexNode to search
+   * @param projExprs the list of project expressions. Empty list means there is No project operator underneath.
    * @param operators collection of operators to find
-   * @return : Return null if there is NONE; return the first appearance of item/flatten RexCall.
+   * @return Return null if there is NONE; return the first appearance of item/flatten RexCall.
    */
   public static RexCall findOperators(final RexNode node, final List<RexNode> projExprs, final Collection<String> operators) {
     try {
@@ -278,8 +278,8 @@ public abstract class DrillRelOptUtil {
   /**
    * Find whether the given project rel can produce non-scalar output (hence unknown rowcount). This
    * would happen if the project has a flatten
-   * @param project : The project rel
-   * @return : Return true if the rowcount is unknown. Otherwise, false.
+   * @param project The project rel
+   * @return Return true if the rowcount is unknown. Otherwise, false.
    */
   public static boolean isProjectOutputRowcountUnknown(Project project) {
     for (RexNode rex : project.getProjects()) {
@@ -295,8 +295,8 @@ public abstract class DrillRelOptUtil {
   /**
    * Find whether the given project rel has unknown output schema. This would happen if the
    * project has CONVERT_FROMJSON which can only derive the schema after evaluation is performed
-   * @param project : The project rel
-   * @return : Return true if the project output schema is unknown. Otherwise, false.
+   * @param project The project rel
+   * @return Return true if the project output schema is unknown. Otherwise, false.
    */
   public static boolean isProjectOutputSchemaUnknown(Project project) {
     try {
@@ -366,8 +366,8 @@ public abstract class DrillRelOptUtil {
 
   /**
    * For a given row type return a map between old field indices and one index right shifted fields.
-   * @param rowType : row type to be right shifted.
-   * @return map: hash map between old and new indices
+   * @param rowType row type to be right shifted.
+   * @return map hash map between old and new indices
    */
   public static Map<Integer, Integer> rightShiftColsInRowType(RelDataType rowType) {
     Map<Integer, Integer> map = new HashMap<>();
@@ -380,10 +380,10 @@ public abstract class DrillRelOptUtil {
 
   /**
    * Given a list of rexnodes it transforms the rexnodes by changing the expr to use new index mapped to the old index.
-   * @param builder : RexBuilder from the planner.
-   * @param exprs: RexNodes to be transformed.
-   * @param corrMap: Mapping between old index to new index.
-   * @return
+   * @param builder RexBuilder from the planner.
+   * @param exprs RexNodes to be transformed.
+   * @param corrMap Mapping between old index to new index.
+   * @return list of transformed expressions
    */
   public static List<RexNode> transformExprs(RexBuilder builder, List<RexNode> exprs, Map<Integer, Integer> corrMap) {
     List<RexNode> outputExprs = new ArrayList<>();
@@ -396,10 +396,10 @@ public abstract class DrillRelOptUtil {
 
   /**
    * Given a of rexnode it transforms the rexnode by changing the expr to use new index mapped to the old index.
-   * @param builder : RexBuilder from the planner.
-   * @param expr: RexNode to be transformed.
-   * @param corrMap: Mapping between old index to new index.
-   * @return
+   * @param builder RexBuilder from the planner.
+   * @param expr RexNode to be transformed.
+   * @param corrMap Mapping between old index to new index.
+   * @return transformed expression
    */
   public static RexNode transformExpr(RexBuilder builder, RexNode expr, Map<Integer, Integer> corrMap) {
     DrillRelOptUtil.RexFieldsTransformer transformer = new DrillRelOptUtil.RexFieldsTransformer(builder, corrMap);
@@ -596,7 +596,7 @@ public abstract class DrillRelOptUtil {
   /**
    * Returns whether statistics-based estimates or guesses are used by the optimizer
    * for the {@link RelNode} rel.
-   * @param rel : input
+   * @param rel input
    * @return TRUE if the estimate is a guess, FALSE otherwise
    * */
   public static boolean guessRows(RelNode rel) {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/IndexPlanUtils.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/IndexPlanUtils.java
index 3242761..7e122d6 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/IndexPlanUtils.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/IndexPlanUtils.java
@@ -73,8 +73,8 @@ public class IndexPlanUtils {
   /**
    * Check if any of the fields of the index are present in a list of LogicalExpressions supplied
    * as part of IndexableExprMarker
-   * @param exprMarker, the marker that has analyzed original index condition on top of original scan
-   * @param indexDesc
+   * @param exprMarker the marker that has analyzed original index condition on top of original scan
+   * @param indexDesc the index definition plus functions to access materialized index
    * @return ConditionIndexed.FULL, PARTIAL or NONE depending on whether all, some or no columns
    * of the indexDesc are present in the list of LogicalExpressions supplied as part of exprMarker
    *
@@ -96,8 +96,8 @@ public class IndexPlanUtils {
    * check if we want to apply index rules on this scan,
    * if group scan is not instance of DbGroupScan, or this DbGroupScan instance does not support secondary index, or
    *    this scan is already an index scan or Restricted Scan, do not apply index plan rules on it.
-   * @param scanRel
-   * @return
+   * @param scanRel the current scan rel
+   * @return true to indicate that we want to apply index rules on this scan, otherwise false
    */
   static public boolean checkScan(DrillScanRel scanRel) {
     GroupScan groupScan = scanRel.getGroupScan();
@@ -112,7 +112,7 @@ public class IndexPlanUtils {
 
   /**
    * For a particular table scan for table T1 and an index on that table, find out if it is a covering index
-   * @return
+   * @return true if it is a covering index, otherwise false
    */
   static public boolean isCoveringIndex(IndexCallContext indexContext, FunctionalIndexInfo functionInfo) {
     if (functionInfo.hasFunctional()) {
@@ -134,8 +134,8 @@ public class IndexPlanUtils {
    * is an indexed field named '$0'. In this case, by looking at Scan, we see only 'a.b' which is not in index. We have to
    * look into Project, and if we see 'a.b' is only used in functional index expression cast(a.b as INT), then we know
    * this Project+Scan is covered.
-   * @param indexContext
-   * @param functionInfo
+   * @param indexContext the index call context
+   * @param functionInfo functional index information that may impact rewrite
    * @return false if the query could not be covered by the index (should not create covering index plan)
    */
   static private boolean queryCoveredByIndex(IndexCallContext indexContext,
@@ -235,9 +235,9 @@ public class IndexPlanUtils {
 
   /**
    * Build collation property for the 'lower' project, the one closer to the Scan
-   * @param projectRexs
-   * @param input
-   * @param indexInfo
+   * @param projectRexs list of row expressions
+   * @param input input as a relational expression
+   * @param indexInfo collects functional index information
    * @return the output RelCollation
    */
   public static RelCollation buildCollationLowerProject(List<RexNode> projectRexs, RelNode input, FunctionalIndexInfo indexInfo) {
@@ -273,10 +273,10 @@ public class IndexPlanUtils {
 
   /**
    * Build collation property for the 'upper' project, the one above the filter
-   * @param projectRexs
-   * @param inputCollation
-   * @param indexInfo
-   * @param collationFilterMap
+   * @param projectRexs list of row expressions
+   * @param inputCollation the input collation
+   * @param indexInfo collects functional index information
+   * @param collationFilterMap map for collation filter
    * @return the output RelCollation
    */
   public static RelCollation buildCollationUpperProject(List<RexNode> projectRexs,
@@ -365,7 +365,8 @@ public class IndexPlanUtils {
 
   /**
    * generate logical expressions for sort rexNodes in SortRel, the result is store to IndexPlanCallContext
-   * @param indexContext
+   * @param indexContext the index call context
+   * @param coll list of field collations
    */
   public static void updateSortExpression(IndexCallContext indexContext, List<RelFieldCollation> coll) {
 
@@ -406,7 +407,8 @@ public class IndexPlanUtils {
 
   /**
    * generate logical expressions for sort rexNodes in SortRel, the result is store to IndexPlanCallContext
-   * @param indexContext
+   * @param indexContext the index call context
+   * @param coll list of field collations
    */
   public static void updateSortExpression(IndexPhysicalPlanCallContext indexContext, List<RelFieldCollation> coll) {
 
@@ -447,8 +449,8 @@ public class IndexPlanUtils {
 
   /**
    *
-   * @param expr
-   * @param context
+   * @param expr the input expression
+   * @param context the index call context
    * @return if there is filter and expr is only in equality condition of the filter, return true
    */
   private static boolean exprOnlyInEquality(LogicalExpression expr, IndexCallContext context) {
@@ -464,7 +466,7 @@ public class IndexPlanUtils {
    * Build collation property for project, the one closer to the Scan
    * @param projectRexs the expressions to project
    * @param project the project between projectRexs and input, it could be null if no such intermediate project(lower project)
-   * @param input  the input RelNode to the project, usually it is the scan operator.
+   * @param input the input RelNode to the project, usually it is the scan operator.
    * @param indexInfo the index for which we are building index plan
    * @param context the context of this index planning process
    * @return the output RelCollation
@@ -692,9 +694,10 @@ public class IndexPlanUtils {
    * For IndexGroupScan, if a column is only appeared in the should-be-renamed function,
    * this column is to-be-replaced column, we replace that column(schemaPath) from 'a.b'
    * to '$1' in the list of SchemaPath.
-   * @param paths
+   * @param paths list of paths
    * @param functionInfo functional index information that may impact rewrite
-   * @return
+   * @param addedPaths list of paths added
+   * @return list of new paths
    */
   public static List<SchemaPath> rewriteFunctionColumn(List<SchemaPath> paths,
                                                        FunctionalIndexInfo functionInfo,
@@ -739,13 +742,14 @@ public class IndexPlanUtils {
    * then collect the schema paths in the indexed expression but found out of the indexed expression -- node (5),
    * and other regular schema paths (3) (4)
    *
-   * @param parseContext
-   * @param project
-   * @param scan
-   * @param toRewriteRex  the RexNode to be converted if it contain a functional index expression.
-   * @param newRowType
-   * @param functionInfo
-   * @return
+   * @param indexContext the index call context
+   * @param parseContext the drill parse context
+   * @param project the drill base class for logical and physical projects
+   * @param scan a rel node scan
+   * @param toRewriteRex the RexNode to be converted if it contains a functional index expression
+   * @param newRowType data type for new row
+   * @param functionInfo functional index information that may impact rewrite
+   * @return rewritten functional row expression
    */
   public static RexNode rewriteFunctionalRex(IndexCallContext indexContext,
                                        DrillParseContext parseContext,
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ClassicConnectorLocator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ClassicConnectorLocator.java
index 0b2694b..5b36207 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ClassicConnectorLocator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ClassicConnectorLocator.java
@@ -234,7 +234,6 @@ public class ClassicConnectorLocator implements ConnectorLocator {
    * {@link ExecConstants#BOOTSTRAP_FORMAT_PLUGINS_FILE} files for the first
    * fresh install of Drill.
    *
-   * @param lpPersistence deserialization mapper provider
    * @return bootstrap storage plugins
    * @throws IOException if a read error occurs
    */
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/PluginHandle.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/PluginHandle.java
index 9b0d0ed..6ea1a98 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/PluginHandle.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/PluginHandle.java
@@ -129,7 +129,6 @@ public class PluginHandle {
    * time if the plugin creates a connection to another system, especially if that system
    * suffers timeouts.
    *
-   * @param context the context to use for creating a new instance, if needed
    * @return the initialized storage plugin
    * @throws UserException if the storage plugin creation failed due to class errors
    * (unlikely) or external system errors (more likely)
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/SystemPluginLocator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/SystemPluginLocator.java
index b583ff7..de85343 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/SystemPluginLocator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/SystemPluginLocator.java
@@ -55,10 +55,6 @@ public class SystemPluginLocator implements ConnectorLocator {
    * Will skip plugin initialization if no matching constructor, incorrect
    * class implementation, name absence are detected.
    *
-   * @param classpathScan
-   *          classpath scan result
-   * @param context
-   *          drillbit context
    * @return map with system plugins stored by name
    */
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/base/filter/ExprNode.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/base/filter/ExprNode.java
index 5ccb35e..c331b16 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/base/filter/ExprNode.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/base/filter/ExprNode.java
@@ -42,7 +42,7 @@ import com.fasterxml.jackson.annotation.JsonInclude.Include;
  * the selectivity that Calcite attaches to the node. Any
  * expressions pushed to a scan must reduce the scan
  * cost by the amount of the selectivity, else Calcite will
- * conclude that the orginal plan (without push-down) is
+ * conclude that the original plan (without push-down) is
  * cheaper.
  */
 public abstract class ExprNode {
@@ -202,6 +202,7 @@ public abstract class ExprNode {
     /**
      * Rewrite a relop using the given normalized value.
      *
+     * @param normalizedValue given normalized value
      * @return a new RelOp with the normalized value. Will be the same relop
      * if the normalized value is the same as the unnormalized value.
      */
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/base/filter/FilterPushDownListener.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/base/filter/FilterPushDownListener.java
index 1c81fe6..3c59e31 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/base/filter/FilterPushDownListener.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/base/filter/FilterPushDownListener.java
@@ -43,7 +43,7 @@ import org.apache.drill.exec.store.base.filter.ExprNode.AndNode;
  * <dl>
  * <p>
  * In both cases, the conditions are in the form of a
- * {@link ColRelOpConst} in which one side refers to a column in the scan
+ * {@link ExprNode.ColRelOpConstNode} in which one side refers to a column in the scan
  * and the other is a constant expression. The "driver" will ensure
  * the rel op is of the correct form; this class ensures that the
  * column is valid for the scan and the type of the value matches the
@@ -67,6 +67,7 @@ public interface FilterPushDownListener {
    *   return scan.getGroupScan() instanceof MyGroupScan;
    * }
    * </pre></code>
+   * @param groupScan the scan node
    * @return true if the given group scan is one this listener can
    * handle, false otherwise
    */
@@ -100,10 +101,7 @@ public interface FilterPushDownListener {
      * If so, return an equivalent RelOp with the value normalized to what
      * the plugin needs. The returned value may be the same as the original
      * one if the value is already normalized.
-     *
-     * @param groupScan the scan element. Use {@code scan.getGroupScan()}
-     * to get the group scan
-     * @param relOp the description of the relational operator expression
+     * @param conjunct condensed form of a Drill WHERE clause expression node
      * @return a normalized RelOp if this relop can be transformed into a filter
      * push-down, @{code null} if not and thus the relop should remain in
      * the Drill plan
@@ -126,13 +124,7 @@ public interface FilterPushDownListener {
      * to leave in the query. Those terms can be the ones passed in, or
      * new terms to handle special needs.
      *
-     * @param groupScan the scan node
-     * @param andTerms a list of the CNF (AND) terms, in which each is given
-     * by the Calcite AND node and the derived RelOp expression.
-     * @param orTerm the DNF (OR) term, if any, that includes the Calcite
-     * node for that term and the set of OR terms. Only provided if the OR
-     * term represents a simple list of values (all OR clauses are on the
-     * same column). The OR term itself is AND'ed with the CNF terms.
+     * @param expr a set of AND'ed expressions in Conjunctive Normal Form (CNF)
      * @return a pair of elements: a new scan (that represents the pushed filters),
      * and the original or new expression to appear in the WHERE clause
      * joined by AND with any non-candidate expressions. That is, if analysis
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/JSONRecordReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/JSONRecordReader.java
index 731bf2d..19367c3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/JSONRecordReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/JSONRecordReader.java
@@ -74,10 +74,10 @@ public class JSONRecordReader extends AbstractRecordReader {
 
   /**
    * Create a JSON Record Reader that uses a file based input stream.
-   * @param fragmentContext
-   * @param inputPath
-   * @param fileSystem
-   * @param columns  pathnames of columns/subfields to read
+   * @param fragmentContext the Drill fragment
+   * @param inputPath the input path
+   * @param fileSystem a Drill file system wrapper around the file system implementation
+   * @param columns path names of columns/subfields to read
    * @throws OutOfMemoryException
    */
   public JSONRecordReader(FragmentContext fragmentContext, Path inputPath, DrillFileSystem fileSystem,
@@ -87,10 +87,10 @@ public class JSONRecordReader extends AbstractRecordReader {
 
   /**
    * Create a new JSON Record Reader that uses a in memory materialized JSON stream.
-   * @param fragmentContext
-   * @param embeddedContent
-   * @param fileSystem
-   * @param columns  pathnames of columns/subfields to read
+   * @param fragmentContext the Drill fragment
+   * @param embeddedContent embedded content
+   * @param fileSystem a Drill file system wrapper around the file system implementation
+   * @param columns path names of columns/subfields to read
    * @throws OutOfMemoryException
    */
   public JSONRecordReader(FragmentContext fragmentContext, JsonNode embeddedContent, DrillFileSystem fileSystem,
@@ -100,9 +100,8 @@ public class JSONRecordReader extends AbstractRecordReader {
 
   /**
    * Create a JSON Record Reader that uses an InputStream directly
-   * @param fragmentContext The Drill Fragmement
-   * @param inputStream The inputStream from which data will be received
-   * @param columns  pathnames of columns/subfields to read
+   * @param fragmentContext the Drill fragment
+   * @param columns path names of columns/subfields to read
    * @throws OutOfMemoryException
    */
   public JSONRecordReader(FragmentContext fragmentContext, List<SchemaPath> columns) throws OutOfMemoryException {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/parser/ObjectParser.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/parser/ObjectParser.java
index 6ddec1c..7c68e40 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/parser/ObjectParser.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/parser/ObjectParser.java
@@ -158,7 +158,8 @@ public abstract class ObjectParser extends AbstractElementParser {
    * <li>{@code foo: []}</li>
    * </ul>
    *
-   * @param field description of the field, including the field name
+   * @param key name of the field
+   * @param tokenizer an instance of a token iterator
    * @return a parser for the newly-created field
    */
   protected abstract ElementParser onField(String key, TokenIterator tokenizer);
@@ -171,6 +172,8 @@ public abstract class ObjectParser extends AbstractElementParser {
 
   /**
    * Parses <code>{ ^ ... }</code>
+   *
+   * @param tokenizer an instance of a token iterator
    */
   @Override
   public void parse(TokenIterator tokenizer) {
@@ -206,6 +209,8 @@ public abstract class ObjectParser extends AbstractElementParser {
    * look up the parser for that field and use it. If this is the first time
    * we've seen the field, "sniff" tokens to determine field type, create a
    * parser, then parse.
+   *
+   * @param tokenizer an instance of a token iterator
    */
   private void parseMember(TokenIterator tokenizer) {
     // Position: key: ^ ?
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/reader/CompliantTextBatchReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/reader/CompliantTextBatchReader.java
index bddbac7..4c0c698 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/reader/CompliantTextBatchReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/reader/CompliantTextBatchReader.java
@@ -86,8 +86,9 @@ public class CompliantTextBatchReader implements ManagedReader<ColumnsSchemaNego
    * Performs the initial setup required for the record reader.
    * Initializes the input stream, handling of the output record batch
    * and the actual reader to be used.
-   * @param errorContext  operator context from which buffer's will be allocated and managed
-   * @param outputMutator  Used to create the schema in the output record batch
+   *
+   * @param schemaNegotiator Used to create the schema in the output record batch
+   * @return true if opens successfully, false if output is null
    */
   @Override
   public boolean open(ColumnsSchemaNegotiator schemaNegotiator) {
@@ -128,10 +129,10 @@ public class CompliantTextBatchReader implements ManagedReader<ColumnsSchemaNego
    * Extract header and use that to define the reader schema.
    *
    * @param schemaNegotiator used to define the reader schema
-   * @param providedHeaders "artificial" headers created from a
    * provided schema, if any. Used when using a provided schema
    * with a text file that contains no headers; ignored for
    * text file with headers
+   * @return text output
    */
   private TextOutput openWithHeaders(ColumnsSchemaNegotiator schemaNegotiator) throws IOException {
     final String [] fieldNames = extractHeader();
@@ -293,7 +294,7 @@ public class CompliantTextBatchReader implements ManagedReader<ColumnsSchemaNego
 
   /**
    * Generates the next record batch
-   * @return  number of records in the batch
+   * @return number of records in the batch
    */
   @Override
   public boolean next() {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/FooterGatherer.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/FooterGatherer.java
index da5bcbb..54d3d91 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/FooterGatherer.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/FooterGatherer.java
@@ -69,6 +69,15 @@ public class FooterGatherer {
     }
   }
 
+  /**
+   * A function to get a list of footers.
+   *
+   * @param conf configuration for file system
+   * @param statuses list of file statuses
+   * @param parallelism parallelism
+   * @return a list of footers
+   * @throws IOException
+   */
   public static List<Footer> getFooters(final Configuration conf, List<FileStatus> statuses, int parallelism) throws IOException {
     final List<TimedCallable<Footer>> readers = new ArrayList<>();
     final List<Footer> foundFooters = new ArrayList<>();
@@ -128,9 +137,9 @@ public class FooterGatherer {
   /**
    * An updated footer reader that tries to read the entire footer without knowing the length.
    * This should reduce the amount of seek/read roundtrips in most workloads.
-   * @param fs
-   * @param status
-   * @return
+   * @param config configuration for file system
+   * @param status file status
+   * @return Footer
    * @throws IOException
    */
   public static Footer readFooter(final Configuration config, final FileStatus status) throws IOException {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ColumnReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ColumnReader.java
index 0474040..e67fae6 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ColumnReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ColumnReader.java
@@ -159,8 +159,8 @@ public abstract class ColumnReader<V extends ValueVector> {
     try {
       readField(recordsToRead);
 
-      valuesReadInCurrentPass += recordsReadInThisIteration;
-      pageReader.valuesRead += recordsReadInThisIteration;
+      valuesReadInCurrentPass += (int)recordsReadInThisIteration;
+      pageReader.valuesRead += (int)recordsReadInThisIteration;
       pageReader.readPosInBytes = readStartInBytes + readLength;
     } catch (Exception e) {
       UserException ex = UserException.dataReadError(e)
@@ -180,9 +180,8 @@ public abstract class ColumnReader<V extends ValueVector> {
    *
    * Return value indicates if we have finished a row group and should stop reading
    *
-   * @param recordsReadInCurrentPass
-   * @param lengthVarFieldsInCurrentRecord
-   * @return - true if we should stop reading
+   * @param recordsReadInCurrentPass records read in current pass
+   * @return true if we should stop reading
    * @throws IOException
    */
   public boolean determineSize(long recordsReadInCurrentPass) throws IOException {
@@ -245,7 +244,12 @@ public abstract class ColumnReader<V extends ValueVector> {
     return f;
   }
 
-  // Read a page if we need more data, returns true if we need to exit the read loop
+  /**
+   * Read a page. If we need more data, exit the read loop and return true.
+   *
+   * @return true if we need more data and page is not read successfully
+   * @throws IOException
+   */
   public boolean readPage() throws IOException {
     if (!pageReader.hasPage()
         || totalValuesReadAndReadyToReadInPage() == pageReader.currentPageCount) {
@@ -286,7 +290,14 @@ public abstract class ColumnReader<V extends ValueVector> {
     return valuesReadInCurrentPass > valueVec.getValueCapacity();
   }
 
-  // copied out of Parquet library, didn't want to deal with the uneeded throws statement they had declared
+  /**
+   * This is copied out of Parquet library, didn't want to deal with the
+   * unnecessary throws statement they had declared
+   *
+   * @param in incoming data
+   * @param offset offset
+   * @return little endian integer
+   */
   public static int readIntLittleEndian(DrillBuf in, int offset) {
     int ch4 = in.getByte(offset) & 0xff;
     int ch3 = in.getByte(offset + 1) & 0xff;
@@ -304,6 +315,12 @@ public abstract class ColumnReader<V extends ValueVector> {
       this.recordsToReadInThisPass = recordsToReadInThisPass;
     }
 
+    /**
+     * This method calls the column reader.
+     *
+     * @return records to read
+     * @throws IOException
+     */
     @Override public Long call() throws IOException{
 
       String oldname = Thread.currentThread().getName();
@@ -328,6 +345,12 @@ public abstract class ColumnReader<V extends ValueVector> {
       this.recordsToRead = recordsToRead;
     }
 
+    /**
+     * This method calls the column reader.
+     *
+     * @return records to read
+     * @throws IOException
+     */
     @Override public Integer call() throws IOException{
 
       String oldname = Thread.currentThread().getName();
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/VarLenAbstractEntryReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/VarLenAbstractEntryReader.java
index 3c9610a..780eda6 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/VarLenAbstractEntryReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/VarLenAbstractEntryReader.java
@@ -35,9 +35,8 @@ abstract class VarLenAbstractEntryReader {
   /**
    * CTOR.
    * @param buffer byte buffer for data buffering (within CPU cache)
-   * @param pageInfo page being processed information
-   * @param columnPrecInfo column precision information
    * @param entry reusable bulk entry object
+   * @param containerCallback callback to allow a bulk reader to interact with its parent
    */
   VarLenAbstractEntryReader(ByteBuffer buffer,
     VarLenColumnBulkEntry entry,
@@ -49,7 +48,7 @@ abstract class VarLenAbstractEntryReader {
   }
 
   /**
-   * @param valuesToRead maximum values to read within the current page
+   * @param valsToReadWithinPage maximum values to read within the current page
    * @return a bulk entry object
    */
   abstract VarLenColumnBulkEntry getEntry(int valsToReadWithinPage);
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/VarLenColumnBulkInput.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/VarLenColumnBulkInput.java
index 3079d5d..f8d89b8 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/VarLenColumnBulkInput.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/VarLenColumnBulkInput.java
@@ -75,7 +75,7 @@ public final class VarLenColumnBulkInput<V extends ValueVector> implements VarLe
    * CTOR.
    * @param parentInst parent object instance
    * @param recordsToRead number of records to read
-   * @param columnPrecInfo column precision information
+   * @param bulkReaderState bulk reader state
    * @throws IOException runtime exception in case of processing error
    */
   VarLenColumnBulkInput(VarLengthValuesColumn<V> parentInst,
@@ -91,7 +91,7 @@ public final class VarLenColumnBulkInput<V extends ValueVector> implements VarLe
     this.fieldOverflowStateContainer = this.batchSizerMgr.getFieldOverflowContainer(parentInst.valueVec.getField().getName());
 
     // Load page if none have been read
-    loadPageIfNeeed();
+    loadPageIfNeed();
 
     // Create the internal READ_STATE object based on the current page-reader state
     this.oprReadState = new OprBulkReadState(parentInst.pageReader.readyToReadPosInBytes, parentInst.pageReader.valuesRead);
@@ -287,7 +287,7 @@ public final class VarLenColumnBulkInput<V extends ValueVector> implements VarLe
   private final void guessColumnPrecision(ColumnPrecisionInfo columnPrecInfo) throws IOException {
     columnPrecInfo.columnPrecisionType = ColumnPrecisionType.DT_PRECISION_IS_VARIABLE;
 
-    loadPageIfNeeed();
+    loadPageIfNeed();
 
     // Minimum number of values within a data size to consider bulk processing
     final int minNumVals = VarLenBulkPageReader.BUFF_SZ / BULK_PROCESSING_MAX_PREC_LEN;
@@ -363,7 +363,7 @@ public final class VarLenColumnBulkInput<V extends ValueVector> implements VarLe
     }
   }
 
-  private void loadPageIfNeeed() throws IOException {
+  private void loadPageIfNeed() throws IOException {
     if (!parentInst.pageReader.hasPage()) {
       // load a page
       parentInst.pageReader.next();
@@ -681,7 +681,7 @@ public final class VarLenColumnBulkInput<V extends ValueVector> implements VarLe
      * @return underlying reader object; this object is now unusable
      *         note that you have to invoke the {@link #set(ValuesReader, int)} method
      *         to update this object state in case a) you have used the {@link ValuesReader} object and b)
-     *         want to resume using this {@link DefinitionLevelReader} object instance
+     *         want to resume using this {@link DefLevelReaderWrapper} object instance
      */
     public ValuesReader getUnderlyingReader() {
       currValue = -1; // to make this object unusable
@@ -720,8 +720,7 @@ public final class VarLenColumnBulkInput<V extends ValueVector> implements VarLe
     /**
      * Set the {@link PageReader#dictionaryValueReader} object; if a null value is passed, then it is understood
      * the current page doesn't use dictionary encoding
-     * @param valuesReader {@link ValuesReader} object
-     * @param numValues total number of values that can be read from the stream
+     * @param _rawReader {@link ValuesReader} object
      */
     void set(ValuesReader _rawReader) {
       this.valuesReader    = _rawReader;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/VarLenOverflowReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/VarLenOverflowReader.java
index c6e18ee..7848028 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/VarLenOverflowReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/VarLenOverflowReader.java
@@ -39,8 +39,8 @@ public final class VarLenOverflowReader extends VarLenAbstractEntryReader {
   /**
    * CTOR.
    * @param buffer byte buffer for data buffering (within CPU cache)
-   * @param pageInfo page being processed information
-   * @param columnPrecInfo column precision information
+   * @param containerCallback container callback
+   * @param fieldOverflowContainer field overflow container
    * @param entry reusable bulk entry object
    */
   VarLenOverflowReader(ByteBuffer buffer,
@@ -372,4 +372,4 @@ public final class VarLenOverflowReader extends VarLenAbstractEntryReader {
     }
   }
 
-}
\ No newline at end of file
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/batchsizing/BatchSizingMemoryUtil.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/batchsizing/BatchSizingMemoryUtil.java
index 8782568..505f0f6 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/batchsizing/BatchSizingMemoryUtil.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/batchsizing/BatchSizingMemoryUtil.java
@@ -91,9 +91,9 @@ public final class BatchSizingMemoryUtil {
   /**
    * Load memory usage information for a variable length value vector
    *
-   * @param vector source value vector
+   * @param sourceVector source value vector
    * @param currValueCount current value count
-   * @param vectorMemory result object which contains source vector memory usage information
+   * @param vectorMemoryUsage result object which contains source vector memory usage information
    */
   public static void getMemoryUsage(ValueVector sourceVector,
     int currValueCount,
@@ -223,7 +223,7 @@ public final class BatchSizingMemoryUtil {
   }
 
   /**
-   * @param fixed column's metadata
+   * @param column column's metadata
    * @param valueCount number of column values
    * @return memory size required to store "valueCount" within a value vector
    */
@@ -243,7 +243,7 @@ public final class BatchSizingMemoryUtil {
   }
 
   /**
-   * @param variable length column's metadata
+   * @param column length column's metadata
    * @param averagePrecision VL column average precision
    * @param valueCount number of column values
    * @return memory size required to store "valueCount" within a value vector
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/util/record/RecordBatchStats.java b/exec/java-exec/src/main/java/org/apache/drill/exec/util/record/RecordBatchStats.java
index e7ea6dd..5e90520 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/util/record/RecordBatchStats.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/util/record/RecordBatchStats.java
@@ -49,7 +49,8 @@ public final class RecordBatchStats {
     private final String contextOperatorId;
 
     /**
-     * @param options options manager
+     * @param context fragment context
+     * @param oContext operator context
      */
     public RecordBatchStatsContext(FragmentContext context, OperatorContext oContext) {
       final boolean operatorEnabledForStatsLogging = isBatchStatsEnabledForOperator(context, oContext);
@@ -158,7 +159,7 @@ public final class RecordBatchStats {
   }
 
   /**
-   * @see {@link RecordBatchStats#logRecordBatchStats(IOType, String, RecordBatchSizer, RecordBatchStatsContext)}
+   * @see {@link RecordBatchStats#logRecordBatchStats(RecordBatchIOType, String, RecordBatch, RecordBatchStatsContext)}
    */
   public static void logRecordBatchStats(RecordBatchIOType ioType,
     String sourceId,
@@ -173,7 +174,7 @@ public final class RecordBatchStats {
   }
 
   /**
-   * @see {@link RecordBatchStats#logRecordBatchStats(IOType, String, RecordBatchSizer, RecordBatchStatsContext)}
+   * @see {@link RecordBatchStats#logRecordBatchStats(RecordBatchIOType, RecordBatch, RecordBatchStatsContext)}
    */
   public static void logRecordBatchStats(RecordBatchIOType ioType,
     RecordBatch recordBatch,
@@ -300,7 +301,7 @@ public final class RecordBatchStats {
   /**
    * Constructs record batch statistics for the input record batch
    *
-   * @param stats instance identifier
+   * @param statsId instance identifier
    * @param ioType whether a record batch is an input or/and output
    * @param sourceId optional source identifier for scanners
    * @param batchSizer contains batch sizing information
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/work/filter/BloomFilter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/work/filter/BloomFilter.java
index afbc56a..14b84f7 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/work/filter/BloomFilter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/filter/BloomFilter.java
@@ -135,7 +135,7 @@ public class BloomFilter {
   /**
    * Merge this bloom filter with other one
    *
-   * @param other
+   * @param other other bloom filter
    */
   public void or(BloomFilter other) {
     int otherLength = other.byteBuf.capacity();
@@ -156,8 +156,8 @@ public class BloomFilter {
    * Calculate optimal size according to the number of distinct values and false positive probability.
    * See http://en.wikipedia.org/wiki/Bloom_filter#Probability_of_false_positives for the formula.
    *
-   * @param ndv: The number of distinct values.
-   * @param fpp: The false positive probability.
+   * @param ndv The number of distinct values.
+   * @param fpp The false positive probability.
    * @return optimal number of bytes of given ndv and fpp.
    */
   public static int optimalNumOfBytes(long ndv, double fpp) {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/rm/QueryQueue.java b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/rm/QueryQueue.java
index 67a8b96..706d8fd 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/rm/QueryQueue.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/rm/QueryQueue.java
@@ -40,10 +40,8 @@ public interface QueryQueue {
     long queryMemoryPerNode();
 
     /**
-     * Release a query lease obtained from {@link #queue(QueryId, double))}.
+     * Release a query lease obtained from {@link #enqueue(QueryId, double)}.
      * Should be called by the per-query resource manager.
-     *
-     * @param lease the lease to be released.
      */
 
     void release();
@@ -127,7 +125,7 @@ public interface QueryQueue {
    * wait time.
    * @param queryId the query ID
    * @param cost the cost of the query used for cost-based queueing
-   * @return the query lease which must be passed to {@link #release(QueueLease)}
+   * @return the query lease which must be passed to {@code #release(QueueLease)}
    * upon query completion
    * @throws QueueTimeoutException if the query times out waiting to be
    * admitted.
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/rm/QueryResourceAllocator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/rm/QueryResourceAllocator.java
index 35dbe59..34565d5 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/rm/QueryResourceAllocator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/rm/QueryResourceAllocator.java
@@ -40,7 +40,7 @@ public interface QueryResourceAllocator {
   /**
    * Make any needed adjustments to the query plan before parallelization.
    *
-   * @param plan
+   * @param plan physical plan
    */
   void visitAbstractPlan(PhysicalPlan plan);
 
@@ -48,8 +48,7 @@ public interface QueryResourceAllocator {
    * Provide the manager with the physical plan and node assignments
    * for the query to be run. This class will plan memory for the query.
    *
-   * @param plan
-   * @param work
+   * @param work query work unit
    */
 
   void visitPhysicalPlan(QueryWorkUnit work);
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/NullableVectorDefinitionSetter.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/NullableVectorDefinitionSetter.java
index 283555b..3b6bfb9 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/NullableVectorDefinitionSetter.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/NullableVectorDefinitionSetter.java
@@ -28,7 +28,7 @@ public interface NullableVectorDefinitionSetter {
   /**
    * Set a contiguous set of values starting at position "index" to be defined.
    * @param index value position
-   * @param number of contiguous values
+   * @param numValues of contiguous values
    */
   public void setIndexDefined(int index, int numValues);
 
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/VarLenBulkInput.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/VarLenBulkInput.java
index 7da040c..fa174ed 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/VarLenBulkInput.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/VarLenBulkInput.java
@@ -33,7 +33,6 @@ public interface VarLenBulkInput<T extends VarLenBulkEntry> extends Iterator<T>
    * Indicates we're done processing (processor might stop processing when memory buffers
    * are depleted); this allows caller to re-submit any unprocessed data.
    *
-   * @param numCommitted number of processed entries
    */
   void done();
 
@@ -53,4 +52,4 @@ public interface VarLenBulkInput<T extends VarLenBulkEntry> extends Iterator<T>
      */
     void onEndBulkInput();
   }
-}
\ No newline at end of file
+}
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionWriterImpl.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionWriterImpl.java
index c7a4995..98f0798 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionWriterImpl.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionWriterImpl.java
@@ -232,7 +232,7 @@ public class UnionWriterImpl implements VariantWriter, WriterEvents {
    * The corresponding metadata must already have been added to the schema.
    * Called by the shim's <tt>addMember</tt> to do writer-level tasks.
    *
-   * @param colWriter the column writer to add
+   * @param writer the column writer to add
    */
 
   protected void addMember(AbstractObjectWriter writer) {
@@ -392,4 +392,4 @@ public class UnionWriterImpl implements VariantWriter, WriterEvents {
   public void dump(HierarchicalFormatter format) {
     // TODO Auto-generated method stub
   }
-}
\ No newline at end of file
+}
diff --git a/logical/src/main/java/org/apache/drill/common/graph/GraphAlgos.java b/logical/src/main/java/org/apache/drill/common/graph/GraphAlgos.java
index 39ae952..dfb0c42 100644
--- a/logical/src/main/java/org/apache/drill/common/graph/GraphAlgos.java
+++ b/logical/src/main/java/org/apache/drill/common/graph/GraphAlgos.java
@@ -67,10 +67,9 @@ public class GraphAlgos {
     /**
      * Execute a depth-first sort on the reversed DAG.
      *
-     * @param graph
-     *          The adjacency list for the DAG.
-     * @param sourceNodes
-     *          List of nodes that
+     * @param graph the adjacency list for the DAG.
+     * @param reverse true if reversed, otherwise false
+     *
      * @return
      */
     static <V extends GraphValue<V>> List<AdjacencyList<V>.Node> sortInternal(AdjacencyList<V> graph, boolean reverse) {