You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by kg...@apache.org on 2019/03/08 09:05:38 UTC

[hive] 01/02: HIVE-16716: Clean up javadoc from errors in module ql (Robert Kucsora via Zoltan Haindrich)

This is an automated email from the ASF dual-hosted git repository.

kgyrtkirk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git

commit e2f10b03cd2f61354edc0ec1f9006f41bb0d6d2a
Author: Robert Kucsora <ku...@gmail.com>
AuthorDate: Fri Mar 8 09:38:45 2019 +0100

    HIVE-16716: Clean up javadoc from errors in module ql (Robert Kucsora via Zoltan Haindrich)
    
    Signed-off-by: Zoltan Haindrich <ki...@rxd.hu>
---
 .../Decimal64ColumnCompareDecimal64Column.txt      |  2 +-
 .../Decimal64ColumnCompareDecimal64Scalar.txt      |  2 +-
 .../Decimal64ScalarCompareDecimal64Column.txt      |  2 +-
 .../ExpressionTemplates/FilterColumnBetween.txt    |  2 +-
 .../FilterColumnCompareColumn.txt                  |  2 +-
 .../FilterColumnCompareScalar.txt                  |  2 +-
 .../FilterDTIColumnCompareScalar.txt               |  2 +-
 .../FilterDTIScalarCompareColumn.txt               |  2 +-
 ...FilterDecimal64ColumnCompareDecimal64Column.txt |  2 +-
 ...FilterDecimal64ColumnCompareDecimal64Scalar.txt |  2 +-
 ...FilterDecimal64ScalarCompareDecimal64Column.txt |  2 +-
 .../FilterDecimalColumnBetween.txt                 |  2 +-
 .../FilterDecimalColumnCompareDecimalColumn.txt    |  2 +-
 .../FilterDecimalColumnCompareDecimalScalar.txt    |  2 +-
 .../FilterDecimalScalarCompareDecimalColumn.txt    |  2 +-
 ...ilterLongDoubleColumnCompareTimestampColumn.txt |  2 +-
 ...ilterLongDoubleColumnCompareTimestampScalar.txt |  2 +-
 ...ilterLongDoubleScalarCompareTimestampColumn.txt |  2 +-
 .../FilterScalarCompareColumn.txt                  |  2 +-
 ...ringGroupColumnCompareStringGroupScalarBase.txt |  2 +-
 .../FilterStringGroupColumnCompareStringScalar.txt |  2 +-
 ...erStringGroupColumnCompareTruncStringScalar.txt |  2 +-
 ...ringGroupScalarCompareStringGroupColumnBase.txt |  2 +-
 .../FilterStringScalarCompareStringGroupColumn.txt |  2 +-
 .../FilterTimestampColumnBetween.txt               |  2 +-
 ...ilterTimestampColumnCompareLongDoubleColumn.txt |  2 +-
 ...ilterTimestampColumnCompareLongDoubleScalar.txt |  2 +-
 ...FilterTimestampColumnCompareTimestampColumn.txt |  2 +-
 ...FilterTimestampColumnCompareTimestampScalar.txt |  2 +-
 ...ilterTimestampScalarCompareLongDoubleColumn.txt |  2 +-
 ...FilterTimestampScalarCompareTimestampColumn.txt |  2 +-
 ...erTruncStringScalarCompareStringGroupColumn.txt |  2 +-
 ...ringGroupColumnCompareStringGroupScalarBase.txt |  2 +-
 .../StringGroupColumnCompareStringScalar.txt       |  2 +-
 .../StringGroupColumnCompareTruncStringScalar.txt  |  2 +-
 ...ringGroupScalarCompareStringGroupColumnBase.txt |  2 +-
 .../StringScalarCompareStringGroupColumn.txt       |  2 +-
 .../TruncStringScalarCompareStringGroupColumn.txt  |  2 +-
 .../hadoop/hive/llap/LlapArrowRecordWriter.java    |  2 +-
 .../hive/ql/cache/results/QueryResultsCache.java   |  4 --
 .../hive/ql/exec/AbstractFileMergeOperator.java    | 10 ++--
 .../hadoop/hive/ql/exec/CommonJoinOperator.java    | 14 +++---
 .../hive/ql/exec/ComparisonOpMethodResolver.java   |  4 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java    |  4 --
 .../hadoop/hive/ql/exec/DummyStoreOperator.java    |  8 +--
 .../hadoop/hive/ql/exec/FileSinkOperator.java      |  4 +-
 .../hadoop/hive/ql/exec/FunctionRegistry.java      |  6 +--
 .../hive/ql/exec/LateralViewJoinOperator.java      |  2 +-
 .../org/apache/hadoop/hive/ql/exec/TopNHash.java   |  5 +-
 .../java/org/apache/hadoop/hive/ql/exec/UDAF.java  |  7 ++-
 .../org/apache/hadoop/hive/ql/exec/Utilities.java  |  6 +--
 .../hive/ql/exec/errors/ScriptErrorHeuristic.java  |  2 +-
 .../exec/persistence/BytesBytesMultiHashMap.java   |  2 +-
 .../exec/repl/bootstrap/events/FunctionEvent.java  |  4 +-
 .../events/filesystem/BootstrapEventsIterator.java |  2 +-
 .../hadoop/hive/ql/exec/repl/util/TaskTracker.java |  2 +-
 .../apache/hadoop/hive/ql/exec/tez/DagUtils.java   |  4 +-
 .../hive/ql/exec/tez/HivePreWarmProcessor.java     |  2 +-
 .../hadoop/hive/ql/exec/tez/TezSessionState.java   |  5 --
 .../hive/ql/exec/vector/VectorAggregationDesc.java | 14 +++---
 .../hive/ql/exec/vector/VectorizedRowBatchCtx.java |  2 -
 .../ql/exec/vector/expressions/CuckooSetBytes.java |  2 +-
 .../exec/vector/expressions/CuckooSetDouble.java   |  2 +-
 .../ql/exec/vector/expressions/CuckooSetLong.java  |  2 +-
 .../VectorMapJoinOuterGenerateResultOperator.java  |  6 +--
 .../fast/VectorMapJoinFastBytesHashKeyRef.java     |  1 -
 .../fast/VectorMapJoinFastBytesHashMapStore.java   | 22 ++++----
 .../VectorMapJoinFastBytesHashMultiSetStore.java   |  4 +-
 .../fast/VectorMapJoinFastBytesHashSetStore.java   |  4 +-
 .../vector/wrapper/VectorHashKeyWrapperBase.java   |  2 +-
 .../wrapper/VectorHashKeyWrapperGeneral.java       |  2 +-
 .../apache/hadoop/hive/ql/io/AcidOutputFormat.java |  2 +-
 .../org/apache/hadoop/hive/ql/io/BucketCodec.java  |  2 +-
 .../hadoop/hive/ql/io/FlatFileInputFormat.java     |  7 ---
 .../hive/ql/io/HiveContextAwareRecordReader.java   |  2 +-
 .../hive/ql/io/HiveIgnoreKeyTextOutputFormat.java  |  4 +-
 .../hive/ql/io/IgnoreKeyTextOutputFormat.java      |  2 +-
 .../java/org/apache/hadoop/hive/ql/io/RCFile.java  | 13 ++---
 .../hadoop/hive/ql/io/merge/MergeFileWork.java     |  2 +-
 .../hadoop/hive/ql/io/orc/OrcRecordUpdater.java    |  4 +-
 .../org/apache/hadoop/hive/ql/io/orc/OrcSplit.java |  2 +-
 .../ql/io/orc/VectorizedOrcAcidRowBatchReader.java |  4 +-
 .../hive/ql/io/orc/encoded/EncodedReader.java      |  2 +-
 .../apache/hadoop/hive/ql/io/orc/package-info.java |  6 +--
 .../serde/ArrayWritableObjectInspector.java        |  2 +-
 .../parquet/serde/DeepParquetHiveMapInspector.java |  4 +-
 .../parquet/serde/ParquetHiveArrayInspector.java   |  2 +-
 .../serde/StandardParquetHiveMapInspector.java     |  2 +-
 .../java/org/apache/hadoop/hive/ql/lib/Node.java   |  2 +-
 .../apache/hadoop/hive/ql/lib/RuleExactMatch.java  |  4 +-
 .../org/apache/hadoop/hive/ql/lib/RuleRegExp.java  |  2 +-
 .../hadoop/hive/ql/lockmgr/HiveTxnManager.java     |  2 +-
 .../hive/ql/log/PidFilePatternConverter.java       |  6 +--
 .../org/apache/hadoop/hive/ql/metadata/Hive.java   | 16 +++---
 .../hadoop/hive/ql/metadata/PartitionIterable.java |  6 +--
 .../org/apache/hadoop/hive/ql/metadata/Table.java  |  4 +-
 .../ql/metadata/formatting/MetaDataFormatter.java  |  1 -
 .../hive/ql/optimizer/ColumnPrunerProcFactory.java |  2 +-
 .../ql/optimizer/ConstantPropagateProcCtx.java     |  2 +-
 .../ql/optimizer/ConstantPropagateProcFactory.java |  2 +-
 .../ql/optimizer/CountDistinctRewriteProc.java     |  6 +--
 .../hadoop/hive/ql/optimizer/GenMRProcContext.java |  2 -
 .../hadoop/hive/ql/optimizer/GenMapRedUtils.java   | 20 ++++----
 .../hive/ql/optimizer/GlobalLimitOptimizer.java    |  8 +--
 .../hive/ql/optimizer/IdentityProjectRemover.java  | 12 ++---
 .../hive/ql/optimizer/LimitPushdownOptimizer.java  |  2 +-
 .../hadoop/hive/ql/optimizer/MapJoinProcessor.java |  7 ---
 .../hive/ql/optimizer/PrunerOperatorFactory.java   |  2 +-
 .../hive/ql/optimizer/SharedWorkOptimizer.java     |  4 +-
 .../hive/ql/optimizer/SparkMapJoinProcessor.java   |  2 -
 .../hive/ql/optimizer/calcite/HiveCalciteUtil.java |  2 +-
 .../calcite/reloperators/HiveProject.java          |  2 +-
 .../calcite/rules/HiveExceptRewriteRule.java       |  4 +-
 .../calcite/rules/HiveInsertExchange4JoinRule.java |  4 +-
 .../rules/HivePointLookupOptimizerRule.java        |  4 +-
 .../rules/HiveReduceExpressionsWithStatsRule.java  |  4 +-
 .../calcite/rules/HiveSubQueryRemoveRule.java      |  6 +--
 .../rules/jdbc/JDBCAggregationPushDownRule.java    |  2 +-
 .../rules/jdbc/JDBCProjectPushDownRule.java        |  2 +-
 .../HiveAggregateIncrementalRewritingRule.java     |  6 +--
 .../calcite/translator/HiveGBOpConvUtil.java       |  2 +-
 .../correlation/CorrelationOptimizer.java          |  2 +-
 .../correlation/CorrelationUtilities.java          |  7 ++-
 .../listbucketingpruner/ListBucketingPruner.java   | 58 +++++++++++-----------
 .../ql/optimizer/physical/PhysicalOptimizer.java   |  2 +-
 .../hive/ql/optimizer/ppr/PartExprEvalUtils.java   |  1 -
 .../stats/annotation/StatsRulesProcFactory.java    | 51 +++++++------------
 .../org/apache/hadoop/hive/ql/parse/ASTNode.java   |  2 +-
 .../hive/ql/parse/AcidExportSemanticAnalyzer.java  |  2 +-
 .../hadoop/hive/ql/parse/PTFInvocationSpec.java    |  2 +-
 .../hadoop/hive/ql/parse/ReplicationSpec.java      |  2 +-
 .../apache/hadoop/hive/ql/parse/TableSample.java   |  2 +-
 .../apache/hadoop/hive/ql/plan/AlterTableDesc.java |  2 +-
 .../apache/hadoop/hive/ql/plan/CreateViewDesc.java |  3 +-
 .../org/apache/hadoop/hive/ql/plan/ExportWork.java |  2 +-
 .../hadoop/hive/ql/plan/ExprNodeDescUtils.java     |  6 +--
 .../hadoop/hive/ql/plan/ListBucketingCtx.java      |  2 -
 .../org/apache/hadoop/hive/ql/plan/MapWork.java    |  6 +--
 .../org/apache/hadoop/hive/ql/plan/PlanUtils.java  |  2 +-
 .../hive/ql/plan/ShowCreateDatabaseDesc.java       |  2 +-
 .../hadoop/hive/ql/plan/ShowFunctionsDesc.java     |  2 +-
 .../hive/ql/plan/SkewedColumnPositionPair.java     |  2 +-
 .../org/apache/hadoop/hive/ql/plan/SparkWork.java  |  1 -
 .../org/apache/hadoop/hive/ql/plan/TezWork.java    |  1 -
 .../org/apache/hadoop/hive/ql/plan/UDTFDesc.java   |  4 +-
 .../hadoop/hive/ql/plan/VectorGroupByDesc.java     | 16 +++---
 .../hadoop/hive/ql/ppd/PredicatePushDown.java      |  4 +-
 .../ql/processors/CommandProcessorResponse.java    |  2 +-
 .../hadoop/hive/ql/processors/CryptoProcessor.java |  2 +-
 .../security/authorization/AuthorizationUtils.java |  1 -
 .../ql/security/authorization/PrivilegeType.java   |  2 +-
 .../authorization/plugin/HiveAuthorizer.java       | 15 +++---
 .../authorization/plugin/HivePrivilegeObject.java  |  6 +--
 .../hadoop/hive/ql/stats/StatsAggregator.java      |  3 --
 .../hadoop/hive/ql/stats/StatsPublisher.java       |  3 --
 .../apache/hadoop/hive/ql/stats/StatsUtils.java    |  2 +-
 .../org/apache/hadoop/hive/ql/udf/SettableUDF.java |  1 -
 .../org/apache/hadoop/hive/ql/udf/UDFConv.java     |  2 +-
 .../org/apache/hadoop/hive/ql/udf/UDFParseUrl.java |  2 +-
 .../org/apache/hadoop/hive/ql/udf/UDFSign.java     |  2 +-
 .../ql/udf/generic/GenericUDAFCorrelation.java     | 24 ++++-----
 .../hive/ql/udf/generic/GenericUDAFCovariance.java | 16 +++---
 .../hive/ql/udf/generic/GenericUDAFEvaluator.java  |  2 +-
 .../hive/ql/udf/generic/GenericUDAFLeadLag.java    |  2 +-
 .../hadoop/hive/ql/udf/generic/GenericUDF.java     |  5 +-
 .../hive/ql/udf/generic/GenericUDFConcatWS.java    |  2 +-
 .../hadoop/hive/ql/udf/generic/GenericUDFIf.java   |  2 +-
 .../hive/ql/udf/generic/GenericUDFTimestamp.java   |  2 +-
 .../udf/generic/GenericUDFToIntervalDayTime.java   |  2 +-
 .../udf/generic/GenericUDFToIntervalYearMonth.java |  2 +-
 .../hadoop/hive/ql/udf/generic/GenericUDTF.java    |  2 +-
 .../apache/hadoop/hive/ql/udf/ptf/MatchPath.java   |  2 +-
 .../hive/ql/udf/ptf/TableFunctionEvaluator.java    |  2 +-
 .../hive/ql/udf/ptf/TableFunctionResolver.java     |  8 ++-
 .../apache/hadoop/hive/ql/util/UpgradeTool.java    |  2 +-
 .../org/apache/hadoop/hive/ql/wm/Expression.java   |  4 +-
 176 files changed, 337 insertions(+), 423 deletions(-)

diff --git a/ql/src/gen/vectorization/ExpressionTemplates/Decimal64ColumnCompareDecimal64Column.txt b/ql/src/gen/vectorization/ExpressionTemplates/Decimal64ColumnCompareDecimal64Column.txt
index d260716..127c905 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/Decimal64ColumnCompareDecimal64Column.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/Decimal64ColumnCompareDecimal64Column.txt
@@ -23,7 +23,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 /**
  * Generated from template Decimal64ColumnCompareDecimal64Column.txt, which covers
  * decimal64 comparison expressions between two columns, however output is not produced in
- * a separate column. The selected vector of the input {@link VectorizedRowBatch} is updated
+ * a separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated
  * for in-place filtering.
  */
 public class <ClassName> extends <BaseClassName> {
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/Decimal64ColumnCompareDecimal64Scalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/Decimal64ColumnCompareDecimal64Scalar.txt
index 802b9a6..cc7aa89 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/Decimal64ColumnCompareDecimal64Scalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/Decimal64ColumnCompareDecimal64Scalar.txt
@@ -26,7 +26,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
 /**
  * Generated from template Decimal64ColumnCompareDecimal64Scalar.txt, which covers decimal64
  * comparison  expressions between a column and a scalar, however output is not produced in a
- * separate column. The selected vector of the input {@link VectorizedRowBatch} is updated for
+ * separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated for
  * in-place filtering.
  */
 public class <ClassName> extends <BaseClassName> {
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/Decimal64ScalarCompareDecimal64Column.txt b/ql/src/gen/vectorization/ExpressionTemplates/Decimal64ScalarCompareDecimal64Column.txt
index c8b10b6..009a877 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/Decimal64ScalarCompareDecimal64Column.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/Decimal64ScalarCompareDecimal64Column.txt
@@ -26,7 +26,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
 /**
  * Generated from template Decimal64ScalarCompareDecimal64Column.txt, which covers decimal64
  * comparison expressions between a scalar and a column, however output is not produced in a
- * separate column. The selected vector of the input {@link VectorizedRowBatch} is updated for
+ * separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated for
  * in-place filtering.
  */
 public class <ClassName> extends <BaseClassName> {
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnBetween.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnBetween.txt
index 47dd42f..26fee47 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnBetween.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnBetween.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
  * Generated from template FilterColumnBetween.txt, which covers [NOT] BETWEEN filter 
  * expressions where a column is [NOT] between one scalar and another.
  * Output is not produced in a separate column.  The selected vector of the input 
- * {@link VectorizedRowBatch} is updated for in-place filtering.
+ * {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated for in-place filtering.
  */
 public class <ClassName> extends VectorExpression {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt
index be9bbb2..cd12223 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 /**
  * Generated from template FilterColumnCompareColumn.txt, which covers binary comparison 
  * expressions between two columns, however output is not produced in a separate column. 
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
  */
 public class <ClassName> extends VectorExpression {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt
index 8b6f978..eeea48a 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 /**
  * Generated from template FilterColumnCompareScalar.txt, which covers binary comparison 
  * expressions between a column and a scalar, however output is not produced in a separate column. 
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
  */
 public class <ClassName> extends VectorExpression {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterDTIColumnCompareScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterDTIColumnCompareScalar.txt
index 2eaf062..a88435b 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterDTIColumnCompareScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterDTIColumnCompareScalar.txt
@@ -24,7 +24,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
  * Generated from template FilterDTIColumnCompareScalar.txt, which covers comparison 
  * expressions between a datetime/interval column and a scalar of the same type, however output is not
  * produced in a separate column. 
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
  */
 public class <ClassName> extends <BaseClassName> {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterDTIScalarCompareColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterDTIScalarCompareColumn.txt
index 69f0d6b..454e1a7 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterDTIScalarCompareColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterDTIScalarCompareColumn.txt
@@ -24,7 +24,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
  * Generated from template FilterDTIScalarCompareColumn.txt, which covers comparison 
  * expressions between a datetime/interval scalar and a column of the same type,
  * however output is not produced in a separate column. 
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
  */
 public class <ClassName> extends <BaseClassName> {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ColumnCompareDecimal64Column.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ColumnCompareDecimal64Column.txt
index be3add0..6af237d 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ColumnCompareDecimal64Column.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ColumnCompareDecimal64Column.txt
@@ -23,7 +23,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 /**
  * Generated from template FilterDecimal64ColumnCompareDecimal64Column.txt, which covers
  * decimal64 comparison expressions between two columns, however output is not produced in
- * a separate column. The selected vector of the input {@link VectorizedRowBatch} is updated
+ * a separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated
  * for in-place filtering.
  */
 public class <ClassName> extends <BaseClassName> {
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ColumnCompareDecimal64Scalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ColumnCompareDecimal64Scalar.txt
index 715d04d..65737e8 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ColumnCompareDecimal64Scalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ColumnCompareDecimal64Scalar.txt
@@ -23,7 +23,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 /**
  * Generated from template FilterDecimal64ColumnCompareDecimal64Scalar.txt, which covers decimal64
  * comparison  expressions between a column and a scalar, however output is not produced in a
- * separate column. The selected vector of the input {@link VectorizedRowBatch} is updated for
+ * separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated for
  * in-place filtering.
  */
 public class <ClassName> extends <BaseClassName> {
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ScalarCompareDecimal64Column.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ScalarCompareDecimal64Column.txt
index 1904180..5c7d001 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ScalarCompareDecimal64Column.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ScalarCompareDecimal64Column.txt
@@ -23,7 +23,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 /**
  * Generated from template FilterDecimal64ScalarCompareDecimal64Column.txt, which covers decimal64
  * comparison expressions between a scalar and a column, however output is not produced in a
- * separate column. The selected vector of the input {@link VectorizedRowBatch} is updated for
+ * separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated for
  * in-place filtering.
  */
 public class <ClassName> extends <BaseClassName> {
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt
index c7cfc4d..8ddbd7b 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt
@@ -31,7 +31,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
  * Generated from template FilterDecimalColumnBetween.txt, which covers [NOT] BETWEEN filter
  * expressions where a column is [NOT] between one scalar and another.
  * Output is not produced in a separate column.  The selected vector of the input
- * {@link VectorizedRowBatch} is updated for in-place filtering.
+ * {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated for in-place filtering.
  */
 public class <ClassName> extends VectorExpression {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalColumn.txt
index ae2bb17..7522ee3 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalColumn.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 /**
  * Generated from template FilterDecimalColumnCompareColumn.txt, which covers binary comparison
  * filter expressions between two columns. Output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
  */
 public class <ClassName> extends VectorExpression {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalScalar.txt
index 5e59c03..b3728de 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalScalar.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 
 /**
- * This is a generated class to evaluate a <OperatorSymbol> comparison on a vector of decimal
+ * This is a generated class to evaluate a comparison on a vector of decimal
  * values.
  */
 public class <ClassName> extends VectorExpression {
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareDecimalColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareDecimalColumn.txt
index 00d0042..4c5c224 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareDecimalColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareDecimalColumn.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 
 /**
- * This is a generated class to evaluate a <OperatorSymbol> comparison on a vector of decimal
+ * This is a generated class to evaluate a comparison on a vector of decimal
  * values.
  */
 public class <ClassName> extends VectorExpression {
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampColumn.txt
index 4e78fd6..9d6dc79 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampColumn.txt
@@ -31,7 +31,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 /**
  * Generated from template FilterColumnCompareColumn.txt, which covers binary comparison
  * expressions between two columns, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
  */
 public class <ClassName> extends VectorExpression {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampScalar.txt
index 5ae21e6..64916dd 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampScalar.txt
@@ -29,7 +29,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 /**
  * Generated from template FilterColumnCompareScalar.txt, which covers binary comparison
  * expressions between a column and a scalar, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
  */
 public class <ClassName> extends <BaseClassName> {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleScalarCompareTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleScalarCompareTimestampColumn.txt
index 69cf579..87b4655 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleScalarCompareTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleScalarCompareTimestampColumn.txt
@@ -31,7 +31,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 /**
  * Generated from template FilterScalarCompareTimestampColumn.txt, which covers comparison
  * expressions between a long/double scalar and a timestamp column, however output is not produced
- * in a separate column. The selected vector of the input {@link VectorizedRowBatch} is updated
+ * in a separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated
  * for in-place filtering.
  */
 public class <ClassName> extends VectorExpression {
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt
index 2ff9e98..3398a47 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 /**
  * Generated from template FilterScalarCompareColumn.txt, which covers binary comparison 
  * expressions between a scalar and a column, however output is not produced in a separate column. 
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
  */
 public class <ClassName> extends VectorExpression {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupScalarBase.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupScalarBase.txt
index 6efa1ca..4ae3ddb 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupScalarBase.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupScalarBase.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 
 /**
- * This is a generated class to evaluate a <OperatorSymbol> comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
  */
 public abstract class <ClassName> extends VectorExpression {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringScalar.txt
index 97be5f4..5cab792 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringScalar.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 
 /**
- * This is a generated class to evaluate a <OperatorSymbol> comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
  */
 public class <ClassName> extends <BaseClassName> {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareTruncStringScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareTruncStringScalar.txt
index 0c67902..657bb7e 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareTruncStringScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareTruncStringScalar.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 
 /**
- * This is a generated class to evaluate a <OperatorSymbol> comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
  */
 public class <ClassName> extends <BaseClassName> {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupScalarCompareStringGroupColumnBase.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupScalarCompareStringGroupColumnBase.txt
index 7165eb2..ac26485 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupScalarCompareStringGroupColumnBase.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupScalarCompareStringGroupColumnBase.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 
 /**
- * This is a generated class to evaluate a <OperatorSymbol> comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
  * Do not edit the generated code directly.
  */
 public abstract class <ClassName> extends VectorExpression {
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringScalarCompareStringGroupColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringScalarCompareStringGroupColumn.txt
index 2e7bec7..5b69062 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringScalarCompareStringGroupColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringScalarCompareStringGroupColumn.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 
 /**
- * This is a generated class to evaluate a <OperatorSymbol> comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
  * Do not edit the generated code directly.
  */
 public class <ClassName> extends <BaseClassName> {
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnBetween.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnBetween.txt
index ad5985f..5ceb390 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnBetween.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnBetween.txt
@@ -30,7 +30,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
  * Generated from template FilterTimestampColumnBetween.txt, which covers [NOT] BETWEEN filter
  * expressions where a column is [NOT] between one scalar and another.
  * Output is not produced in a separate column.  The selected vector of the input
- * {@link VectorizedRowBatch} is updated for in-place filtering.
+ * {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated for in-place filtering.
  */
 public class <ClassName> extends VectorExpression {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleColumn.txt
index 8399334..608492a 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleColumn.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 /**
  * Generated from template FilterColumnCompareColumn.txt, which covers binary comparison
  * expressions between two columns, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
  */
 public class <ClassName> extends VectorExpression {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleScalar.txt
index 57834c2..747e95d 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleScalar.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 /**
  * Generated from template FilterTimestampColumnCompareScalar.txt, which covers comparison
  * expressions between a timestamp column and a long/double scalar, however output is not produced
- * in a separate column. The selected vector of the input {@link VectorizedRowBatch} is updated
+ * in a separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated
  * for in-place filtering.
  */
 public class <ClassName> extends VectorExpression {
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampColumn.txt
index b855714..a1e54a0 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampColumn.txt
@@ -32,7 +32,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 /**
  * Generated from template FilterTimestampColumnCompareColumn.txt, which covers binary comparison
  * filter expressions between two columns. Output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
  */
 public class <ClassName> extends VectorExpression {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
index 6a05d77..5061bc9 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
@@ -31,7 +31,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 /**
  * Generated from template FilterColumnCompareScalar.txt, which covers binary comparison
  * expressions between a column and a scalar, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
  */
 public class <ClassName> extends VectorExpression {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareLongDoubleColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareLongDoubleColumn.txt
index c1ddc08..8a3ad3f 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareLongDoubleColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareLongDoubleColumn.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 /**
  * Generated from template FilterScalarCompareColumn.txt, which covers binary comparison
  * expressions between a scalar and a column, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
  */
 public class <ClassName> extends <BaseClassName> {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareTimestampColumn.txt
index 36628a7..f86d728 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareTimestampColumn.txt
@@ -31,7 +31,7 @@ import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 
 /**
- * This is a generated class to evaluate a <OperatorSymbol> comparison on a vector of timestamp
+ * This is a generated class to evaluate a comparison on a vector of timestamp
  * values.
  */
 public class <ClassName> extends VectorExpression {
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterTruncStringScalarCompareStringGroupColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterTruncStringScalarCompareStringGroupColumn.txt
index 31c443c..ee88e7c 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterTruncStringScalarCompareStringGroupColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterTruncStringScalarCompareStringGroupColumn.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 
 /**
- * This is a generated class to evaluate a <OperatorSymbol> comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
  * Do not edit the generated code directly.
  */
 public class <ClassName> extends <BaseClassName> {
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupScalarBase.txt b/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupScalarBase.txt
index 48913a3..ef4ee9d 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupScalarBase.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupScalarBase.txt
@@ -30,7 +30,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 
 /**
- * This is a generated class to evaluate a <OperatorSymbol> comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
  */
 public abstract class <ClassName> extends VectorExpression {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringScalar.txt
index 07b4bf3..0332e2c 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringScalar.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 
 /**
- * This is a generated class to evaluate a <OperatorSymbol> comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
  */
 public class <ClassName> extends <BaseClassName> {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareTruncStringScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareTruncStringScalar.txt
index eccbee2..4e132b9 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareTruncStringScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareTruncStringScalar.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 
 /**
- * This is a generated class to evaluate a <OperatorSymbol> comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
  */
 public class <ClassName> extends <BaseClassName> {
 
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/StringGroupScalarCompareStringGroupColumnBase.txt b/ql/src/gen/vectorization/ExpressionTemplates/StringGroupScalarCompareStringGroupColumnBase.txt
index 3cf4a2e..bba788e 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/StringGroupScalarCompareStringGroupColumnBase.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/StringGroupScalarCompareStringGroupColumnBase.txt
@@ -30,7 +30,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 
 /**
- * This is a generated class to evaluate a <OperatorSymbol> comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
  * Do not edit the generated code directly. 
  */
 public abstract class <ClassName> extends VectorExpression {
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/StringScalarCompareStringGroupColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/StringScalarCompareStringGroupColumn.txt
index 4e29f7e..88babac 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/StringScalarCompareStringGroupColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/StringScalarCompareStringGroupColumn.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 
 /**
- * This is a generated class to evaluate a <OperatorSymbol> comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
  * Do not edit the generated code directly. 
  */
 public class <ClassName> extends <BaseClassName> {
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/TruncStringScalarCompareStringGroupColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/TruncStringScalarCompareStringGroupColumn.txt
index 3d2d280..3b35ba9 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/TruncStringScalarCompareStringGroupColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/TruncStringScalarCompareStringGroupColumn.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 
 /**
- * This is a generated class to evaluate a <OperatorSymbol> comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
  * Do not edit the generated code directly. 
  */
 public class <ClassName> extends <BaseClassName> {
diff --git a/ql/src/java/org/apache/hadoop/hive/llap/LlapArrowRecordWriter.java b/ql/src/java/org/apache/hadoop/hive/llap/LlapArrowRecordWriter.java
index 4cd8a61..73e1d06 100644
--- a/ql/src/java/org/apache/hadoop/hive/llap/LlapArrowRecordWriter.java
+++ b/ql/src/java/org/apache/hadoop/hive/llap/LlapArrowRecordWriter.java
@@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory;
  * calls to the {@link #write(Writable, Writable)} method only serve as a signal that
  * a new batch has been loaded to the associated VectorSchemaRoot.
  * Payload data for writing is indirectly made available by reference:
- * ArrowStreamWriter -> VectorSchemaRoot -> List<FieldVector>
+ * ArrowStreamWriter -&gt; VectorSchemaRoot -&gt; List&lt;FieldVector&gt;
  * i.e. both they key and value are ignored once a reference to the VectorSchemaRoot
  * is obtained.
  */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/cache/results/QueryResultsCache.java b/ql/src/java/org/apache/hadoop/hive/ql/cache/results/QueryResultsCache.java
index a51b7e7..0b7166b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/cache/results/QueryResultsCache.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/cache/results/QueryResultsCache.java
@@ -412,10 +412,6 @@ public final class QueryResultsCache {
   /**
    * Check if the cache contains an entry for the requested LookupInfo.
    * @param request
-   * @param addReader Should the reader count be incremented during the lookup.
-   *        This will ensure the returned entry can be used after the lookup.
-   *        If true, the caller will be responsible for decrementing the reader count
-   *        using CacheEntry.releaseReader().
    * @return  The cached result if there is a match in the cache, or null if no match is found.
    */
   public CacheEntry lookup(LookupInfo request) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
index df84417..bb89f80 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
@@ -132,8 +132,8 @@ public abstract class AbstractFileMergeOperator<T extends FileMergeDesc>
   /**
    * Fixes tmpPath to point to the correct partition. Initialize operator will
    * set tmpPath and taskTmpPath based on root table directory. So initially,
-   * tmpPath will be <prefix>/_tmp.-ext-10000 and taskTmpPath will be
-   * <prefix>/_task_tmp.-ext-10000. The depth of these two paths will be 0.
+   * tmpPath will be &lt;prefix&gt;/_tmp.-ext-10000 and taskTmpPath will be
+   * &lt;prefix&gt;/_task_tmp.-ext-10000. The depth of these two paths will be 0.
    * Now, in case of dynamic partitioning or list bucketing the inputPath will
    * have additional sub-directories under root table directory. This function
    * updates the tmpPath and taskTmpPath to reflect these additional
@@ -146,10 +146,10 @@ public abstract class AbstractFileMergeOperator<T extends FileMergeDesc>
    * Note: The path difference between inputPath and tmpDepth can be DP or DP+LB.
    * This method will automatically handle it.
    *
-   * Continuing the example above, if inputPath is <prefix>/-ext-10000/hr=a1/,
+   * Continuing the example above, if inputPath is &lt;prefix&gt;/-ext-10000/hr=a1/,
    * newPath will be hr=a1/. Then, tmpPath and taskTmpPath will be updated to
-   * <prefix>/-ext-10000/hr=a1/_tmp.ext-10000 and
-   * <prefix>/-ext-10000/hr=a1/_task_tmp.ext-10000 respectively.
+   * &lt;prefix&gt;/-ext-10000/hr=a1/_tmp.ext-10000 and
+   * &lt;prefix&gt;/-ext-10000/hr=a1/_task_tmp.ext-10000 respectively.
    * We have list_bucket_dml_6.q cover this case: DP + LP + multiple skewed
    * values + merge.
    *
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
index 1c32588..2d76848 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
@@ -72,8 +72,8 @@ public abstract class CommonJoinOperator<T extends JoinDesc> extends
    * evaluated before emitting rows. Currently, relevant only for outer joins.
    *
    * For instance, given the query:
-   *     select * from t1 right outer join t2 on t1.c1 + t2.c2 > t1.c3;
-   * The expression evaluator for t1.c1 + t2.c2 > t1.c3 will be stored in this list.
+   *     select * from t1 right outer join t2 on t1.c1 + t2.c2 &gt; t1.c3;
+   * The expression evaluator for t1.c1 + t2.c2 &gt; t1.c3 will be stored in this list.
    */
   protected transient List<ExprNodeEvaluator> residualJoinFilters;
 
@@ -448,21 +448,21 @@ public abstract class CommonJoinOperator<T extends JoinDesc> extends
    *   a = 100, 10 | 100, 20 | 100, 30
    *   b = 100, 10 | 100, 20 | 100, 30
    *
-   * the query "a FO b ON a.k=b.k AND a.v>10 AND b.v>30" makes filter map
-   *   0(a) = [1(b),1] : a.v>10
-   *   1(b) = [0(a),1] : b.v>30
+   * the query "a FO b ON a.k=b.k AND a.v&gt;10 AND b.v&gt;30" makes filter map
+   *   0(a) = [1(b),1] : a.v&gt;10
+   *   1(b) = [0(a),1] : b.v&gt;30
    *
    * for filtered rows in a (100,10) create a-NULL
    * for filtered rows in b (100,10) (100,20) (100,30) create NULL-b
    *
-   * with 0(a) = [1(b),1] : a.v>10
+   * with 0(a) = [1(b),1] : a.v&gt;10
    *   100, 10 = 00000010 (filtered)
    *   100, 20 = 00000000 (valid)
    *   100, 30 = 00000000 (valid)
    * -------------------------
    *       sum = 00000000 : for valid rows in b, there is at least one pair in a
    *
-   * with 1(b) = [0(a),1] : b.v>30
+   * with 1(b) = [0(a),1] : b.v&gt;30
    *   100, 10 = 00000001 (filtered)
    *   100, 20 = 00000001 (filtered)
    *   100, 30 = 00000001 (filtered)
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ComparisonOpMethodResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ComparisonOpMethodResolver.java
index 2bbcef1..6620687 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ComparisonOpMethodResolver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ComparisonOpMethodResolver.java
@@ -28,8 +28,8 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 
 /**
- * The class implements the method resolution for operators like (> < <= >= =
- * <>). The resolution logic is as follows: 1. If one of the parameters is null,
+ * The class implements the method resolution for operators like (&gt; &lt; &lt;= &gt;= =
+ * &lt;&gt;). The resolution logic is as follows: 1. If one of the parameters is null,
  * then it resolves to evaluate(Double, Double) 2. If both of the parameters are
  * of type T, then it resolves to evaluate(T, T) 3. If 1 and 2 fails then it
  * resolves to evaluate(Double, Double).
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index a39126f..a56695b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -4808,10 +4808,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
    *
    * @param databaseName
    *          Database name.
-   * @param sd
-   *          Storage descriptor.
-   * @param name
-   *          Object name.
    */
   public static void makeLocationQualified(String databaseName, Table table, HiveConf conf) throws HiveException {
     Path path = null;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
index d66384a..2c63001 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
@@ -36,9 +36,9 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.Object
  * Consider a query like:
  *
  * select * from
- *   (subq1 --> has a filter)
+ *   (subq1 --&gt; has a filter)
  *   join
- *   (subq2 --> has a filter)
+ *   (subq2 --&gt; has a filter)
  * on some key
  *
  * Let us assume that subq1 is the small table (either specified by the user or inferred
@@ -50,12 +50,12 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.Object
  *
  * Therefore the following operator tree is created:
  *
- * TableScan (subq1) --> Select --> Filter --> DummyStore
+ * TableScan (subq1) --&gt; Select --&gt; Filter --&gt; DummyStore
  *                                                         \
  *                                                          \     SMBJoin
  *                                                          /
  *                                                         /
- * TableScan (subq2) --> Select --> Filter
+ * TableScan (subq2) --&gt; Select --&gt; Filter
  *
  * In order to fetch the row with the least join key from the small table, the row from subq1
  * is partially processed, and stored in DummyStore. For the actual processing of the join,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 79e41d9..cd13397 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -383,8 +383,8 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
      * FileSink, in ways similar to the multi file spray, but without knowing the total number of
      * buckets ahead of time.
      *
-     * ROW__ID (1,2[0],3) => bucket_00002
-     * ROW__ID (1,3[0],4) => bucket_00003 etc
+     * ROW__ID (1,2[0],3) =&gt; bucket_00002
+     * ROW__ID (1,3[0],4) =&gt; bucket_00003 etc
      *
      * A new FSP is created for each partition, so this only requires the bucket numbering and that
      * is mapped in directly as an index.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index d6dc9a9..3e4aa74 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -726,8 +726,8 @@ public final class FunctionRegistry {
    * return a TypeInfo corresponding to the common PrimitiveCategory, and with type qualifiers
    * (if applicable) that match the 2 TypeInfo types.
    * Examples:
-   *   varchar(10), varchar(20), primitive category varchar => varchar(20)
-   *   date, string, primitive category string => string
+   *   varchar(10), varchar(20), primitive category varchar =&gt; varchar(20)
+   *   date, string, primitive category string =&gt; string
    * @param a  TypeInfo of the first type
    * @param b  TypeInfo of the second type
    * @param typeCategory PrimitiveCategory of the designated common type between a and b
@@ -1382,7 +1382,6 @@ public final class FunctionRegistry {
   /**
    * A shortcut to get the "index" GenericUDF. This is used for getting elements
    * out of array and getting values out of map.
-   * @throws SemanticException
    */
   public static GenericUDF getGenericUDFForIndex() {
     try {
@@ -1394,7 +1393,6 @@ public final class FunctionRegistry {
 
   /**
    * A shortcut to get the "and" GenericUDF.
-   * @throws SemanticException
    */
   public static GenericUDF getGenericUDFForAnd() {
     try {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java
index 6585b19..15a2cbc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java
@@ -64,7 +64,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
  *
  * The output of select in the left branch and output of the UDTF in the right
  * branch are then sent to the lateral view join (LVJ). In most cases, the UDTF
- * will generate > 1 row for every row received from the TS, while the left
+ * will generate &gt; 1 row for every row received from the TS, while the left
  * select operator will generate only one. For each row output from the TS, the
  * LVJ outputs all possible rows that can be created by joining the row from the
  * left select and one of the rows output from the UDTF.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java
index 5c502e1..01d0392 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java
@@ -264,7 +264,7 @@ public class TopNHash {
   /**
    * Get vectorized batch result for particular index.
    * @param batchIndex index of the key in the batch.
-   * @return the result, same as from {@link #tryStoreKey(HiveKey)}
+   * @return the result, same as from {@link TopNHash#tryStoreKey(HiveKey,boolean)}
    */
   public int getVectorizedBatchResult(int batchIndex) {
     int result = batchIndexToResult[batchIndex];
@@ -309,9 +309,8 @@ public class TopNHash {
   /**
    * Stores the value for the key in the heap.
    * @param index The index, either from tryStoreKey or from tryStoreVectorizedKey result.
-   * @param hasCode hashCode of key, used by ptfTopNHash.
+   * @param hashCode hashCode of key, used by ptfTopNHash.
    * @param value The value to store.
-   * @param keyHash The key hash to store.
    * @param vectorized Whether the result is coming from a vectorized batch.
    */
   public void storeValue(int index, int hashCode, BytesWritable value, boolean vectorized) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java
index 63ddb6b..dc33a08 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java
@@ -20,9 +20,9 @@ package org.apache.hadoop.hive.ql.exec;
 
 /**
  * <strong>Please see the deprecation notice</strong>
- * <p/>
+ * <br>
  * Base class for all User-defined Aggregation Function (UDAF) classes.
- * <p/>
+ * <br>
  * Requirements for a UDAF class:
  * <ol>
  *   <li>Implement the {@code init()} method, which resets the status of the aggregation function.</li>
@@ -57,7 +57,7 @@ package org.apache.hadoop.hive.ql.exec;
  *     aggregation result and returns a boolean. The method should always return
  *     {@code true} on valid inputs, or the framework will throw an Exception.</li>
  * </ol>
- * <p/>
+ * <br>
  * Following are some examples:
  * <ul>
  *   <li>public int evaluatePartial();</li>
@@ -65,7 +65,6 @@ package org.apache.hadoop.hive.ql.exec;
  *   <li>public String evaluatePartial();</li>
  *   <li>public boolean aggregatePartial(String partial);</li>
  * </ul>
- * <p/>
  *
  * @deprecated Either implement {@link org.apache.hadoop.hive.ql.udf.generic.GenericUDAFResolver2} or extend
  * {@link org.apache.hadoop.hive.ql.udf.generic.AbstractGenericUDAFResolver} instead.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 4e621a4..018bc52 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -2228,7 +2228,7 @@ public final class Utilities {
    * If there is no db name part, set the current sessions default db
    * @param dbtable
    * @return String array with two elements, first is db name, second is table name
-   * @throws HiveException
+   * @throws SemanticException
    */
   public static String[] getDbTableName(String dbtable) throws SemanticException {
     return getDbTableName(SessionState.get().getCurrentDatabase(), dbtable);
@@ -3941,9 +3941,9 @@ public final class Utilities {
   }
 
   /**
-   * Checks if the current HiveServer2 logging operation level is >= PERFORMANCE.
+   * Checks if the current HiveServer2 logging operation level is &gt;= PERFORMANCE.
    * @param conf Hive configuration.
-   * @return true if current HiveServer2 logging operation level is >= PERFORMANCE.
+   * @return true if current HiveServer2 logging operation level is &gt;= PERFORMANCE.
    * Else, false.
    */
   public static boolean isPerfOrAboveLogging(HiveConf conf) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/errors/ScriptErrorHeuristic.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/errors/ScriptErrorHeuristic.java
index 671fb95..c686f72 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/errors/ScriptErrorHeuristic.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/errors/ScriptErrorHeuristic.java
@@ -28,7 +28,7 @@ import java.util.regex.Pattern;
  *
  * Conditions to check:
  *
- * 1. "Script failed with code <some number>" is in the log
+ * 1. "Script failed with code &lt;some number&gt;" is in the log
  *
  */
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
index a6b0dbc..dbf75b4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
@@ -493,7 +493,7 @@ public final class BytesBytesMultiHashMap implements MemoryEstimate {
   }
 
   /**
-   * Take the segment reference from {@link #getValueRefs(byte[], int, List)}
+   * Take the segment reference from getValueRefs(byte[],int,List)
    * result and makes it self-contained - adds byte array where the value is stored, and
    * updates the offset from "global" write buffers offset to offset within that array.
    */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/FunctionEvent.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/FunctionEvent.java
index b75ebcb..9dfcbfa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/FunctionEvent.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/FunctionEvent.java
@@ -24,8 +24,8 @@ import org.apache.hadoop.fs.Path;
  * <p>
  * Since the bootstrap and incremental for functions is handled similarly. There
  * is additional work to make sure we pass the event object from both places.
- *
- * @see org.apache.hadoop.hive.ql.parse.repl.load.message.CreateFunctionHandler.FunctionDescBuilder
+ * <p>
+ * FunctionDescBuilder in {@link org.apache.hadoop.hive.ql.parse.repl.load.message.CreateFunctionHandler}
  * would be merged here mostly.
  */
 public interface FunctionEvent extends BootstrapEvent {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java
index ef6e31f..f1d7563 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java
@@ -61,7 +61,7 @@ import java.util.stream.Collectors;
  * 2. Table before partition is not explicitly required as table and partition metadata are in the same file.
  *
  *
- * For future integrations other sources of events like kafka, would require to implement an Iterator<BootstrapEvent>
+ * For future integrations other sources of events like kafka, would require to implement an Iterator&lt;BootstrapEvent&gt;
  *
  */
 public class BootstrapEventsIterator implements Iterator<BootstrapEvent> {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/TaskTracker.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/TaskTracker.java
index 1d01bc9..20ede9c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/TaskTracker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/TaskTracker.java
@@ -30,7 +30,7 @@ import java.util.List;
 /**
  * This class will be responsible to track how many tasks have been created,
  * organization of tasks such that after the number of tasks for next execution are created
- * we create a dependency collection task(DCT) -> another bootstrap task,
+ * we create a dependency collection task(DCT) -&gt; another bootstrap task,
  * and then add DCT as dependent to all existing tasks that are created so the cycle can continue.
  */
 public class TaskTracker {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
index 1d38f99..f06ac37 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
@@ -1016,7 +1016,7 @@ public class DagUtils {
    * to provide on the cluster as resources for execution.
    *
    * @param conf
-   * @return List<LocalResource> local resources to add to execution
+   * @return List&lt;LocalResource&gt; local resources to add to execution
    * @throws IOException when hdfs operation fails
    * @throws LoginException when getDefaultDestDir fails with the same exception
    */
@@ -1101,7 +1101,7 @@ public class DagUtils {
    * @param hdfsDirPathStr Destination directory in HDFS.
    * @param conf Configuration.
    * @param inputOutputJars The file names to localize.
-   * @return List<LocalResource> local resources to add to execution
+   * @return List&lt;LocalResource&gt; local resources to add to execution
    * @throws IOException when hdfs operation fails.
    * @throws LoginException when getDefaultDestDir fails with the same exception
    */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HivePreWarmProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HivePreWarmProcessor.java
index 39a9c77..b6c0d7f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HivePreWarmProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HivePreWarmProcessor.java
@@ -47,7 +47,7 @@ import javax.crypto.Mac;
  * A simple sleep processor implementation that sleeps for the configured
  * time in milliseconds.
  *
- * @see Config for configuring the HivePreWarmProcessor
+ * @see Configuration for configuring the HivePreWarmProcessor
  */
 public class HivePreWarmProcessor extends AbstractLogicalIOProcessor {
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
index 08e65a4..767b359 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
@@ -222,11 +222,6 @@ public class TezSessionState {
     return true;
   }
 
-
-  /**
-   * Get all open sessions. Only used to clean up at shutdown.
-   * @return List<TezSessionState>
-   */
   public static String makeSessionId() {
     return UUID.randomUUID().toString();
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationDesc.java
index 417beec..48b5764 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationDesc.java
@@ -47,22 +47,22 @@ import com.google.common.base.Preconditions;
  *
  *    (Notice the these names are a subset of GroupByDesc.Mode...)
  *
- *        PARTIAL1       Original data            --> Partial aggregation data
+ *        PARTIAL1       Original data            --&gt; Partial aggregation data
  *
- *        PARTIAL2       Partial aggregation data --> Partial aggregation data
+ *        PARTIAL2       Partial aggregation data --&gt; Partial aggregation data
  *
- *        FINAL          Partial aggregation data --> Full aggregation data
+ *        FINAL          Partial aggregation data --&gt; Full aggregation data
  *
- *        COMPLETE       Original data            --> Full aggregation data
+ *        COMPLETE       Original data            --&gt; Full aggregation data
  *
  *
- * SIMPLEST CASE --> The data type/semantics of original data, partial aggregation
+ * SIMPLEST CASE --&gt; The data type/semantics of original data, partial aggregation
  *     data, and full aggregation data ARE THE SAME.  E.g. MIN, MAX, SUM.  The different
  *     modes can be handled by one aggregation class.
  *
  *     This case has a null for the Mode.
  *
- * FOR OTHERS --> The data type/semantics of partial aggregation data and full aggregation data
+ * FOR OTHERS --&gt; The data type/semantics of partial aggregation data and full aggregation data
  *    ARE THE SAME but different than original data.  This results in 2 aggregation classes:
  *
  *       1) A class that takes original rows and outputs partial/full aggregation
@@ -75,7 +75,7 @@ import com.google.common.base.Preconditions;
  *
  *    E.g. COUNT(*) and COUNT(column)
  *
- * OTHERWISE FULL --> The data type/semantics of partial aggregation data is different than
+ * OTHERWISE FULL --&gt; The data type/semantics of partial aggregation data is different than
  *    original data and full aggregation data.
  *
  *    E.g. AVG uses a STRUCT with count and sum for partial aggregation data.  It divides
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
index fa056e9..83e41a3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
@@ -323,7 +323,6 @@ public class VectorizedRowBatchCtx {
    * Creates a Vectorized row batch and the column vectors.
    *
    * @return VectorizedRowBatch
-   * @throws HiveException
    */
   public VectorizedRowBatch createVectorizedRowBatch()
   {
@@ -381,7 +380,6 @@ public class VectorizedRowBatchCtx {
    *
    * @param batch
    * @param partitionValues
-   * @throws HiveException
    */
   public void addPartitionColsToBatch(VectorizedRowBatch batch, Object[] partitionValues)
   {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java
index 9de2e92..f9a86ae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java
@@ -24,7 +24,7 @@ import java.util.Random;
  * A high-performance set implementation used to support fast set membership testing,
  * using Cuckoo hashing. This is used to support fast tests of the form
  *
- *       column IN ( <list-of-values )
+ *       column IN ( list-of-values )
  *
  * For details on the algorithm, see R. Pagh and F. F. Rodler, "Cuckoo Hashing,"
  * Elsevier Science preprint, Dec. 2003. http://www.itu.dk/people/pagh/papers/cuckoo-jour.pdf.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetDouble.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetDouble.java
index c5212c6..1842d0d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetDouble.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetDouble.java
@@ -23,7 +23,7 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions;
  * A high-performance set implementation used to support fast set membership testing,
  * using Cuckoo hashing. This is used to support fast tests of the form
  *
- *       column IN ( <list-of-values )
+ *       column IN ( list-of-values )
  *
  * For double, we simply layer over the implementation for long. Double.doubleToRawLongBits
  * is used to convert a 64-bit double to a 64-bit long with bit-for-bit fidelity.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetLong.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetLong.java
index c355985..be77f9c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetLong.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetLong.java
@@ -25,7 +25,7 @@ import java.util.Random;
  * A high-performance set implementation used to support fast set membership testing,
  * using Cuckoo hashing. This is used to support fast tests of the form
  *
- *       column IN ( <list-of-values )
+ *       column IN ( list-of-values )
  *
  * For details on the algorithm, see R. Pagh and F. F. Rodler, "Cuckoo Hashing,"
  * Elsevier Science preprint, Dec. 2003. http://www.itu.dk/people/pagh/papers/cuckoo-jour.pdf.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java
index 61bcbf0..32f77b1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java
@@ -611,7 +611,7 @@ public abstract class VectorMapJoinOuterGenerateResultOperator
    *          selected in use.
    * @param joinResult
    *          The hash map lookup result for the repeated key.
-   * @param hashMapResults
+   * @param hashMapResult
    *          The array of all hash map results for the batch.
    * @param someRowsFilteredOut
    *          Whether some rows of the repeated key batch were knocked out by the filter.
@@ -619,10 +619,6 @@ public abstract class VectorMapJoinOuterGenerateResultOperator
    *          A copy of the batch's selectedInUse flag on input to the process method.
    * @param inputLogicalSize
    *          The batch's size on input to the process method.
-   * @param scratch1
-   *          Pre-allocated storage to internal use.
-   * @param scratch2
-   *          Pre-allocated storage to internal use.
    */
   public void finishOuterRepeated(VectorizedRowBatch batch, JoinUtil.JoinResult joinResult,
       VectorMapJoinHashMapResult hashMapResult, boolean someRowsFilteredOut,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashKeyRef.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashKeyRef.java
index dbfe518..88b1aab 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashKeyRef.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashKeyRef.java
@@ -169,7 +169,6 @@ public class VectorMapJoinFastBytesHashKeyRef {
 
   /**
    * Get partial hash code from the reference word.
-   * @param hashCode
    * @return
    */
   public static long getPartialHashCodeFromRefWord(long refWord) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMapStore.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMapStore.java
index b71ebb6..65a1c68 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMapStore.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMapStore.java
@@ -62,7 +62,7 @@ public class VectorMapJoinFastBytesHashMapStore implements MemoryEstimate {
    *      ---------------------------------
    *                                       |
    *                                       v
-   *       <5 0's for Next Relative Offset> <Key Bytes> <Value Length> <Value Bytes>
+   *       &lt;5 0's for Next Relative Offset&gt; &lt;Key Bytes&gt; &lt;Value Length&gt; &lt;Value Bytes&gt;
    *                NEXT (NONE)                 KEY                        VALUE
    *
    * NOTE: AbsoluteOffset.byteLength = 5
@@ -76,7 +76,7 @@ public class VectorMapJoinFastBytesHashMapStore implements MemoryEstimate {
    *      ---------------------------------
    *                                       |
    *                                       v
-   *      <5 0's for Next Relative Offset> [Big Key Length] <Key Bytes> <Value Length> <Value Bytes>
+   *      &lt;5 0's for Next Relative Offset&gt; [Big Key Length] &lt;Key Bytes&gt; &lt;Value Length&gt; &lt;Value Bytes&gt;
    *                NEXT (NONE)                optional        KEY                        VALUE
    *
    *  3) Two elements when key length is small and stored in reference word:
@@ -88,7 +88,7 @@ public class VectorMapJoinFastBytesHashMapStore implements MemoryEstimate {
    *      ------------------------------------
    *                                         |
    *                                         v
-   *      <Next Value Rel Offset as 5 bytes> <Key Bytes> <Value Bytes>
+   *      &lt;Next Value Rel Offset as 5 bytes&gt; &lt;Key Bytes&gt; &lt;Value Bytes&gt;
    *         |     NEXT                         KEY         VALUE
    *         |
    *         | first record absolute offset + relative offset
@@ -96,7 +96,7 @@ public class VectorMapJoinFastBytesHashMapStore implements MemoryEstimate {
    *         --------
    *                 |
    *                 v
-   *                <5 0's Padding for Next Value Ref> <Value Length> <Value Bytes>
+   *                &lt;5 0's Padding for Next Value Ref&gt; &lt;Value Length&gt; &lt;Value Bytes&gt;
    *                     NEXT (NONE)                                     VALUE
    *
    *  4) Three elements showing how first record updated to point to new value and
@@ -109,20 +109,20 @@ public class VectorMapJoinFastBytesHashMapStore implements MemoryEstimate {
    *      ------------------------------------
    *                                         |
    *                                         v
-   *      <Next Value Rel Offset as 5 bytes> <Key Bytes> <Value Bytes>
+   *      &lt;Next Value Rel Offset as 5 bytes&gt; &lt;Key Bytes&gt; &lt;Value Bytes&gt;
    *         |     NEXT                         KEY         VALUE
    *         |
    *         | first record absolute offset + relative offset
    *         |
    *         |
-   *         |      <5 0's Padding for Next Value Ref> <Value Length> <Value Bytes>
+   *         |      &lt;5 0's Padding for Next Value Ref&gt; &lt;Value Length&gt; &lt;Value Bytes&gt;
    *         |      ^    NEXT (NONE)                                    VALUE
    *         |      |
    *         |      ------
    *         |            |
    *         |            | new record absolute offset - (minus) relative offset
    *         |            |
-   *          -----><Next Value Rel Offset as 5 bytes> <Value Length> <Value Bytes>
+   *          -----&gt;&lt;Next Value Rel Offset as 5 bytes&gt; &lt;Value Length&gt; &lt;Value Bytes&gt;
    *                     NEXT                                            VALUE
    *
    *
@@ -136,26 +136,26 @@ public class VectorMapJoinFastBytesHashMapStore implements MemoryEstimate {
    *      ------------------------------------
    *                                         |
    *                                         v
-   *      <Next Value Rel Offset as 5 bytes> <Key Bytes> <Value Length> <Value Bytes>
+   *      &lt;Next Value Rel Offset as 5 bytes&gt; &lt;Key Bytes&gt; &lt;Value Length&gt; &lt;Value Bytes&gt;
    *         |     NEXT                          KEY                      VALUE
    *         |
    *         | first record absolute offset + relative offset
    *         |
    *         |
-   *         |      <5 0's Padding for Next Value Ref> <Value Length> <Value Bytes>
+   *         |      &lt;5 0's Padding for Next Value Ref&gt; &lt;Value Length&gt; &lt;Value Bytes&gt;
    *         |      ^    NEXT (NONE)                                     VALUE
    *         |      |
    *         |      ------
    *         |            | record absolute offset - (minus) relative offset
    *         |            |
-   *         |      <Next Value Rel Offset as 5 bytes> <Value Length> <Value Bytes>
+   *         |      &lt;Next Value Rel Offset as 5 bytes&gt; &lt;Value Length&gt; &lt;Value Bytes&gt;
    *         |      ^       NEXT                                         VALUE
    *         |      |
    *         |      ------
    *         |            |
    *         |            | new record absolute offset - (minus) relative offset
    *         |            |
-   *          -----><Next Value Rel Offset as 5 bytes> <Value Length> <Value Bytes>
+   *          -----&gt;&lt;Next Value Rel Offset as 5 bytes&gt; &lt;Value Length&gt; &lt;Value Bytes&gt;
    *                        NEXT                                         VALUE
    *
    *
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMultiSetStore.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMultiSetStore.java
index 20fa03a..db103b6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMultiSetStore.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMultiSetStore.java
@@ -58,7 +58,7 @@ public class VectorMapJoinFastBytesHashMultiSetStore implements MemoryEstimate {
    *      --------------------------------------
    *                                           |
    *                                           v
-   *       <4 bytes's for set membership count> <Key Bytes>
+   *       &lt;4 bytes's for set membership count&gt; &lt;Key Bytes&gt;
    *            COUNT                              KEY
    *
    * NOTE: MultiSetCount.byteLength = 4
@@ -72,7 +72,7 @@ public class VectorMapJoinFastBytesHashMultiSetStore implements MemoryEstimate {
    *      -------------------------------------
    *                                          |
    *                                          v
-   *      <4 byte's for set membership count> [Big Key Length] <Key Bytes>
+   *      &lt;4 byte's for set membership count&gt; [Big Key Length] &lt;Key Bytes&gt;
    *                NEXT (NONE)                optional           KEY
    */
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashSetStore.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashSetStore.java
index 1a78688..d95722d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashSetStore.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashSetStore.java
@@ -57,7 +57,7 @@ public class VectorMapJoinFastBytesHashSetStore implements MemoryEstimate {
    *      |
    *      |
    *      v
-   *      <Key Bytes>
+   *      &lt;Key Bytes&gt;
    *        KEY
    *
    *  2) One element, general: shows optional big key length.
@@ -68,7 +68,7 @@ public class VectorMapJoinFastBytesHashSetStore implements MemoryEstimate {
    *      |
    *      |
    *      v
-   *      [Big Key Length] <Key Bytes>
+   *      [Big Key Length] &lt;Key Bytes&gt;
    *        optional           KEY
    */
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/wrapper/VectorHashKeyWrapperBase.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/wrapper/VectorHashKeyWrapperBase.java
index 8bf2ccb..1bb2249 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/wrapper/VectorHashKeyWrapperBase.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/wrapper/VectorHashKeyWrapperBase.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
  * A hash map key wrapper for vectorized processing.
  * It stores the key values as primitives in arrays for each supported primitive type.
  * This works in conjunction with
- * {@link org.apache.hadoop.hive.ql.exec.VectorHashKeyWrapperBatch VectorHashKeyWrapperBatch}
+ * {@link VectorHashKeyWrapperBatch}
  * to hash vectorized processing units (batches).
  */
 public abstract class VectorHashKeyWrapperBase extends KeyWrapper {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/wrapper/VectorHashKeyWrapperGeneral.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/wrapper/VectorHashKeyWrapperGeneral.java
index 8fe53e7..c605ce3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/wrapper/VectorHashKeyWrapperGeneral.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/wrapper/VectorHashKeyWrapperGeneral.java
@@ -44,7 +44,7 @@ import com.google.common.base.Preconditions;
  * A hash map key wrapper for vectorized processing.
  * It stores the key values as primitives in arrays for each supported primitive type.
  * This works in conjunction with
- * {@link org.apache.hadoop.hive.ql.exec.VectorHashKeyWrapperBatch VectorHashKeyWrapperBatch}
+ * {@link VectorHashKeyWrapperBatch}
  * to hash vectorized processing units (batches).
  */
 public class VectorHashKeyWrapperGeneral extends VectorHashKeyWrapperBase {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
index b45cc8c..e6b8490 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
@@ -199,7 +199,7 @@ public interface AcidOutputFormat<K extends WritableComparable, V> extends HiveO
      * Multiple inserts into legacy (pre-acid) tables can generate multiple copies of each bucket
      * file.
      * @see org.apache.hadoop.hive.ql.exec.Utilities#COPY_KEYWORD
-     * @param copyNumber the number of the copy ( > 0)
+     * @param copyNumber the number of the copy ( &gt; 0)
      * @return this
      */
     public Options copyNumber(int copyNumber) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/BucketCodec.java b/ql/src/java/org/apache/hadoop/hive/ql/io/BucketCodec.java
index bdd16c5..eb9ded7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/BucketCodec.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/BucketCodec.java
@@ -72,7 +72,7 @@ public enum BucketCodec {
    * by {@link RecordIdentifier} which includes the {@link RecordIdentifier#getBucketProperty()}
    * which has the actual bucket ID in the high order bits.  This scheme also ensures that 
    * {@link org.apache.hadoop.hive.ql.exec.FileSinkOperator#process(Object, int)} works in case
-   * there numBuckets > numReducers.  (The later could be fixed by changing how writers are
+   * there numBuckets &gt; numReducers.  (The later could be fixed by changing how writers are
    * initialized in "if (fpaths.acidLastBucket != bucketNum) {")
    */
   V1(1) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java
index 41d9016..7a49121 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java
@@ -121,11 +121,6 @@ public class FlatFileInputFormat<T> extends
       return conf;
     }
 
-    /**
-     * @return the actual class being deserialized.
-     * @exception does
-     *              not currently throw IOException
-     */
     @Override
     public Class<S> getRealClass() throws IOException {
       return (Class<S>) conf.getClass(SerializationSubclassKey, null,
@@ -145,8 +140,6 @@ public class FlatFileInputFormat<T> extends
      * deserialized; in this context, that assumption isn't necessarily true.
      *
      * @return the serialization object for this context
-     * @exception does
-     *              not currently throw any IOException
      */
     @Override
     public Serialization<S> getSerialization() throws IOException {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
index 60833bf..11876fb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
@@ -50,7 +50,7 @@ import org.apache.hadoop.mapred.RecordReader;
   * data.  The binary search can be used by setting the value of inputFormatSorted in the
   * MapreduceWork to true, but it should only be used if the data is going to a FilterOperator,
   * which filters by comparing a value in the data with a constant, using one of the comparisons
-  * =, <, >, <=, >=.  If the RecordReader's underlying format is an RCFile, this object can perform
+  * =, &lt;, &gt;, &lt;=, &gt;=.  If the RecordReader's underlying format is an RCFile, this object can perform
   * a binary search to find the block to begin reading from, and stop reading once it can be
   * determined no other entries will match the filter.
   */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java
index 8746a20..9d05510 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java
@@ -37,8 +37,8 @@ import org.apache.hadoop.mapred.TextOutputFormat;
 import org.apache.hadoop.util.Progressable;
 
 /**
- * HiveIgnoreKeyTextOutputFormat replaces key with null before feeding the <key,
- * value> to TextOutputFormat.RecordWriter.
+ * HiveIgnoreKeyTextOutputFormat replaces key with null before feeding the &lt;key,
+ * value&gt; to TextOutputFormat.RecordWriter.
  *
  */
 public class HiveIgnoreKeyTextOutputFormat<K extends WritableComparable, V extends Writable>
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/IgnoreKeyTextOutputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/IgnoreKeyTextOutputFormat.java
index c221579..a3bddbf 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/IgnoreKeyTextOutputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/IgnoreKeyTextOutputFormat.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.mapred.TextOutputFormat;
 import org.apache.hadoop.util.Progressable;
 
 /**
- * This class replaces key with null before feeding the <key, value> to
+ * This class replaces key with null before feeding the &lt;key, value&gt; to
  * TextOutputFormat.RecordWriter.
  * 
  * @deprecated use {@link HiveIgnoreKeyTextOutputFormat} instead}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java b/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
index b7c990b..3e45e45 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
@@ -99,10 +99,10 @@ import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.*;
  * The {@link Reader} is used to read and explain the bytes of RCFile.
  * </p>
  *
- * <h4 id="Formats">RCFile Formats</h4>
+ * <h3 id="Formats">RCFile Formats</h3>
  *
  *
- * <h5 id="Header">RC Header</h5>
+ * <h4 id="Header">RC Header</h4>
  * <ul>
  * <li>version - 3 bytes of magic header <b>RCF</b>, followed by 1 byte of
  * actual version number (e.g. RCF1)</li>
@@ -114,10 +114,10 @@ import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.*;
  * <li>sync - A sync marker to denote end of the header.</li>
  * </ul>
  *
- * <h5>RCFile Format</h5>
+ * <h4>RCFile Format</h4>
  * <ul>
  * <li><a href="#Header">Header</a></li>
- * <li>Record
+ * <li>Record </li>
  * <li>Key part
  * <ul>
  * <li>Record length in bytes</li>
@@ -133,7 +133,6 @@ import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.*;
  * <li>...</li>
  * </ul>
  * </li>
- * </li>
  * <li>Value part
  * <ul>
  * <li>Compressed or plain data of [column_1_row_1_value,
@@ -143,7 +142,6 @@ import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.*;
  * </ul>
  * </li>
  * </ul>
- * <p>
  * <pre>
  * {@code
  * The following is a pseudo-BNF grammar for RCFile. Comments are prefixed
@@ -336,7 +334,6 @@ import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.*;
  * Text ::= VInt, Chars (Length prefixed UTF-8 characters)
  * }
  * </pre>
- * </p>
  */
 public class RCFile {
 
@@ -1095,7 +1092,7 @@ public class RCFile {
     private int columnBufferSize = 0;
 
     /**
-     * Append a row of values. Currently it only can accept <
+     * Append a row of values. Currently it only can accept &lt;
      * {@link BytesRefArrayWritable}. If its <code>size()</code> is less than the
      * column number in the file, zero bytes are appended for the empty columns.
      * If its size() is greater then the column number in the file, the exceeded
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java b/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java
index 07abd37..3044603 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java
@@ -144,7 +144,7 @@ public class MergeFileWork extends MapWork {
 
   /**
    * alter table ... concatenate
-   * <p/>
+   * <br>
    * If it is skewed table, use subdirectories in inputpaths.
    */
   public void resolveConcatenateMerge(HiveConf conf) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
index 2255f8b..21fe9ce 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
@@ -63,8 +63,8 @@ import com.google.common.base.Strings;
 /**
  * A RecordUpdater where the files are stored as ORC.
  * A note on various record structures: the {@code row} coming in (as in {@link #insert(long, Object)}
- * for example), is a struct like <RecordIdentifier, f1, ... fn> but what is written to the file
- * * is <op, owid, writerId, rowid, cwid, <f1, ... fn>> (see {@link #createEventSchema(ObjectInspector)})
+ * for example), is a struct like &lt;RecordIdentifier, f1, ... fn&gt; but what is written to the file
+ * * is &lt;op, owid, writerId, rowid, cwid, &lt;f1, ... fn&gt;&gt; (see {@link #createEventObjectInspector(ObjectInspector)})
  * So there are OIs here to make the translation.
  */
 public class OrcRecordUpdater implements RecordUpdater {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
index 61e7558..3eadc26 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
@@ -349,7 +349,7 @@ public class OrcSplit extends FileSplit implements ColumnarSplit, LlapAwareSplit
   /**
    * Note: this is the bucket number as seen in the file name that contains this split.
    * Hive 3.0 encodes a bunch of info in the Acid schema's bucketId attribute.
-   * See: {@link org.apache.hadoop.hive.ql.io.BucketCodec.V1} for details.
+   * See: {@link org.apache.hadoop.hive.ql.io.BucketCodec#V1} for details.
    * @return
    */
   public int getBucketId() {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
index 2349cda..1795bb5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
@@ -798,10 +798,10 @@ public class VectorizedOrcAcidRowBatchReader
   /**
    * There are 2 types of schema from the {@link #baseReader} that this handles.  In the case
    * the data was written to a transactional table from the start, every row is decorated with
-   * transaction related info and looks like <op, owid, writerId, rowid, cwid, <f1, ... fn>>.
+   * transaction related info and looks like &lt;op, owid, writerId, rowid, cwid, &lt;f1, ... fn&gt;&gt;.
    *
    * The other case is when data was written to non-transactional table and thus only has the user
-   * data: <f1, ... fn>.  Then this table was then converted to a transactional table but the data
+   * data: &lt;f1, ... fn&gt;.  Then this table was then converted to a transactional table but the data
    * files are not changed until major compaction.  These are the "original" files.
    *
    * In this case we may need to decorate the outgoing data with transactional column values at
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReader.java
index f3699f9..2359e8c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReader.java
@@ -36,7 +36,7 @@ public interface EncodedReader {
    * @param encodings Externally provided metadata (from metadata reader or external cache).
    * @param streams Externally provided metadata (from metadata reader or external cache).
    * @param physicalFileIncludes The array of booleans indicating whether each column should be read.
-   * @param colRgs Arrays of rgs, per column set to true in included, that are to be read.
+   * @param rgs Arrays of rgs, per column set to true in included, that are to be read.
    *               null in each respective position means all rgs for this column need to be read.
    * @param consumer The sink for data that has been read.
    */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/package-info.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/package-info.java
index b6f68c9..450f008 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/package-info.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/package-info.java
@@ -43,7 +43,7 @@
  *   <li>Support for additional generic compression: LZO, SNAPPY, ZLIB.</li>
  * </ul>
  *
- * <p>
+ * <br>
  * <b>Format:</b>
  * <pre>
  * {@code
@@ -54,9 +54,8 @@
  * PS LENGTH (1 byte)
  * }
  * </pre>
- * </p>
  *
- * <p>
+ * <br>
  * <b>Stripe:</b>
  * <pre>
  * {@code
@@ -65,6 +64,5 @@
  * STRIPE-FOOTER
  * }
  * </pre>
- * </p>
  */
 package org.apache.hadoop.hive.ql.io.orc;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java
index d83376d..ba69795 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.io.ArrayWritable;
 
 /**
  *
- * The ArrayWritableObjectInspector will inspect an ArrayWritable, considering it as a Hive struct.<br />
+ * The ArrayWritableObjectInspector will inspect an ArrayWritable, considering it as a Hive struct.<br>
  * It can also inspect a List if Hive decides to inspect the result of an inspection.
  *
  */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/DeepParquetHiveMapInspector.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/DeepParquetHiveMapInspector.java
index 75b1ad1..3d49006 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/DeepParquetHiveMapInspector.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/DeepParquetHiveMapInspector.java
@@ -21,8 +21,8 @@ import org.apache.hadoop.io.ArrayWritable;
 import org.apache.hadoop.io.Writable;
 
 /**
- * The DeepParquetHiveMapInspector will inspect an ArrayWritable, considering it as a Hive map.<br />
- * It can also inspect a Map if Hive decides to inspect the result of an inspection.<br />
+ * The DeepParquetHiveMapInspector will inspect an ArrayWritable, considering it as a Hive map.<br>
+ * It can also inspect a Map if Hive decides to inspect the result of an inspection.<br>
  * When trying to access elements from the map it will iterate over all keys, inspecting them and comparing them to the
  * desired key.
  *
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
index aec7423..8da396e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.io.ArrayWritable;
 import org.apache.hadoop.io.Writable;
 
 /**
- * The ParquetHiveArrayInspector will inspect an ArrayWritable, considering it as an Hive array.<br />
+ * The ParquetHiveArrayInspector will inspect an ArrayWritable, considering it as an Hive array.<br>
  * It can also inspect a List if Hive decides to inspect the result of an inspection.
  *
  */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/StandardParquetHiveMapInspector.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/StandardParquetHiveMapInspector.java
index 2cb2deb..1f28bb6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/StandardParquetHiveMapInspector.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/StandardParquetHiveMapInspector.java
@@ -20,7 +20,7 @@ import org.apache.hadoop.io.ArrayWritable;
 import org.apache.hadoop.io.Writable;
 
 /**
- * The StandardParquetHiveMapInspector will inspect an ArrayWritable, considering it as a Hive map.<br />
+ * The StandardParquetHiveMapInspector will inspect an ArrayWritable, considering it as a Hive map.<br>
  * It can also inspect a Map if Hive decides to inspect the result of an inspection.
  *
  */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java
index dc854d9..8fbe9d0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java
@@ -30,7 +30,7 @@ public interface Node {
    * Gets the vector of children nodes. This is used in the graph walker
    * algorithms.
    * 
-   * @return List<? extends Node>
+   * @return List&lt;? extends Node&gt;
    */
   List<? extends Node> getChildren();
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java
index 9ddfe68..10409b6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java
@@ -36,13 +36,13 @@ public class RuleExactMatch implements Rule {
    * The rule specified as operator names separated by % symbols, the left side represents the
    * bottom of the stack.
    *
-   * E.g. TS%FIL%RS -> means
+   * E.g. TS%FIL%RS -&gt; means
    * TableScan Node followed by Filter followed by ReduceSink in the tree, or, in terms of the
    * stack, ReduceSink on top followed by Filter followed by TableScan
    *
    * @param ruleName
    *          name of the rule
-   * @param regExp
+   * @param pattern
    *          string specification of the rule
    **/
   public RuleExactMatch(String ruleName, String[] pattern) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
index 1ab8cd8..db62db2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
@@ -90,7 +90,7 @@ public class RuleRegExp implements Rule {
 
   /**
    * The rule specified by the regular expression. Note that, the regular
-   * expression is specified in terms of Node name. For eg: TS.*RS -> means
+   * expression is specified in terms of Node name. For eg: TS.*RS -&gt; means
    * TableScan Node followed by anything any number of times followed by
    * ReduceSink
    * 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
index 616277f..43dba73 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
@@ -177,7 +177,7 @@ public interface HiveTxnManager {
    * {@link ValidTxnWriteIdList} object can be passed as string to the processing
    * tasks for use in the reading the data.  This call will return same results as long as validTxnString
    * passed is same.
-   * @param tableList list of tables (<db_name>.<table_name>) read/written by current transaction.
+   * @param tableList list of tables (&lt;db_name&gt;.&lt;table_name&gt;) read/written by current transaction.
    * @param validTxnList snapshot of valid txns for the current txn
    * @return list of valid table write Ids.
    * @throws LockException
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java
index c49f53f..8dc5eb0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java
@@ -26,13 +26,13 @@ import org.apache.logging.log4j.core.pattern.ArrayPatternConverter;
 import org.apache.logging.log4j.core.pattern.ConverterKeys;
 
 /**
- * FilePattern converter that converts %pid pattern to <process-id>@<hostname> information
+ * FilePattern converter that converts %pid pattern to &lt;process-id&gt;@&lt;hostname&gt; information
  * obtained at runtime.
  *
  * Example usage:
- * <RollingFile name="Rolling-default" fileName="test.log" filePattern="test.log.%pid.gz">
+ * &lt;RollingFile name="Rolling-default" fileName="test.log" filePattern="test.log.%pid.gz"&gt;
  *
- * Will generate output file with name containing <process-id>@<hostname> like below
+ * Will generate output file with name containing &lt;process-id&gt;@&lt;hostname&gt; like below
  * test.log.95232@localhost.gz
  */
 @Plugin(name = "PidFilePatternConverter", category = "FileConverter")
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 3a313b0..17576ff 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -433,7 +433,7 @@ public class Hive {
 
   /**
    * Gets the allowClose flag which determines if it is allowed to close metastore connections.
-   * @returns allowClose flag
+   * @return allowClose flag
    */
   public boolean allowClose() {
     return isAllowClose;
@@ -649,9 +649,7 @@ public class Hive {
    *          new name of the table. could be the old name
    * @param transactional
    *          Need to generate and save a table snapshot into the metastore?
-   * @throws InvalidOperationException
-   *           if the changes in metadata is not acceptable
-   * @throws TException
+   * @throws HiveException
    */
   public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext,
                          boolean transactional)
@@ -738,7 +736,7 @@ public class Hive {
    *          new partition
    * @throws InvalidOperationException
    *           if the changes in metadata is not acceptable
-   * @throws TException
+   * @throws HiveException
    */
   @Deprecated
   public void alterPartition(String tblName, Partition newPart,
@@ -763,7 +761,7 @@ public class Hive {
    *          indicates this call is for transaction stats
    * @throws InvalidOperationException
    *           if the changes in metadata is not acceptable
-   * @throws TException
+   * @throws HiveException
    */
   public void alterPartition(String catName, String dbName, String tblName, Partition newPart,
                              EnvironmentContext environmentContext, boolean transactional)
@@ -820,7 +818,7 @@ public class Hive {
    *          Need to generate and save a table snapshot into the metastore?
    * @throws InvalidOperationException
    *           if the changes in metadata is not acceptable
-   * @throws TException
+   * @throws HiveException
    */
   public void alterPartitions(String tblName, List<Partition> newParts,
                               EnvironmentContext environmentContext, boolean transactional)
@@ -863,9 +861,7 @@ public class Hive {
    *          spec of old partition
    * @param newPart
    *          new partition
-   * @throws InvalidOperationException
-   *           if the changes in metadata is not acceptable
-   * @throws TException
+   * @throws HiveException
    */
   public void renamePartition(Table tbl, Map<String, String> oldPartSpec, Partition newPart)
       throws HiveException {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java
index 6418bd5..e635670 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java
@@ -25,11 +25,11 @@ import java.util.List;
 import java.util.Map;
 
 /**
- * PartitionIterable - effectively a lazy Iterable<Partition>
+ * PartitionIterable - effectively a lazy Iterable&lt;Partition&gt;
  *
  * Sometimes, we have a need for iterating through a list of partitions,
  * but the list of partitions can be too big to fetch as a single object.
- * Thus, the goal of PartitionIterable is to act as an Iterable<Partition>
+ * Thus, the goal of PartitionIterable is to act as an Iterable&lt;Partition&gt;
  * while lazily fetching each relevant partition, one after the other as
  * independent metadata calls.
  *
@@ -134,7 +134,7 @@ public class PartitionIterable implements Iterable<Partition> {
   /**
    * Dummy constructor, which simply acts as an iterator on an already-present
    * list of partitions, allows for easy drop-in replacement for other methods
-   * that already have a List<Partition>
+   * that already have a List&lt;Partition&gt;
    */
   public PartitionIterable(Collection<Partition> ptnsProvided){
     this.currType = Type.LIST_PROVIDED;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index cd483eb..fb1c8d4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -681,7 +681,7 @@ public class Table implements Serializable {
    * Returns a list of all the columns of the table (data columns + partition
    * columns in that order.
    *
-   * @return List<FieldSchema>
+   * @return List&lt;FieldSchema&gt;
    */
   public List<FieldSchema> getAllCols() {
     ArrayList<FieldSchema> f_list = new ArrayList<FieldSchema>();
@@ -919,7 +919,7 @@ public class Table implements Serializable {
   }
 
   /**
-   * Creates a partition name -> value spec map object
+   * Creates a partition name -&gt; value spec map object
    *
    * @param tp
    *          Use the information from this partition.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
index 0f53ae4..80e3d8b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
@@ -92,7 +92,6 @@ public interface MetaDataFormatter {
    * @param cols
    * @param isFormatted - describe with formatted keyword
    * @param isExt
-   * @param isPretty
    * @param isOutputPadded - if true, add spacing and indentation
    * @param colStats
    * @param fkInfo  foreign keys information
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
index 60d56e9..d5f51bf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
@@ -303,7 +303,7 @@ public final class ColumnPrunerProcFactory {
    * - add column names referenced in WindowFn args and in WindowFn expressions
    *   to the pruned list of the child Select Op.
    * - finally we set the prunedColList on the ColumnPrunerContx;
-   *   and update the RR & signature on the PTFOp.
+   *   and update the RR &amp; signature on the PTFOp.
    */
   public static class ColumnPrunerPTFProc extends ColumnPrunerScriptProc {
     @Override
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
index d9686b0..a048253 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
@@ -47,7 +47,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 /**
  * This class implements the processor context for Constant Propagate.
  *
- * ConstantPropagateProcCtx keeps track of propagated constants in a column->const map for each
+ * ConstantPropagateProcCtx keeps track of propagated constants in a column-&gt;const map for each
  * operator, enabling constants to be revolved across operators.
  */
 public class ConstantPropagateProcCtx implements NodeProcessorCtx {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
index c08168a..736185f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
@@ -120,7 +120,7 @@ public final class ConstantPropagateProcFactory {
   /**
    * Get ColumnInfo from column expression.
    *
-   * @param rr
+   * @param rs
    * @param desc
    * @return
    */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java
index b82b509..542d356 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java
@@ -62,14 +62,14 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode;
 
 /**
  * Queries of form : select max(c), count(distinct c) from T; generates a plan
- * of form TS->mGBy->RS->rGBy->FS This plan suffers from a problem that vertex
- * containing rGBy->FS necessarily need to have 1 task. This limitation results
+ * of form TS-&gt;mGBy-&gt;RS-&gt;rGBy-&gt;FS This plan suffers from a problem that vertex
+ * containing rGBy-&gt;FS necessarily need to have 1 task. This limitation results
  * in slow execution because that task gets all the data. This optimization if
  * successful will rewrite above plan to mGby1-rs1-mGby2-mGby3-rs2-rGby1 This
  * introduces extra vertex of mGby2-mGby3-rs2. Note this vertex can have
  * multiple tasks and since we are doing aggregation, output of this must
  * necessarily be smaller than its input, which results in much less data going
- * in to original rGby->FS vertex, which continues to have single task. Also
+ * in to original rGby-&gt;FS vertex, which continues to have single task. Also
  * note on calcite tree we have HiveExpandDistinctAggregatesRule rule which does
  * similar plan transformation but has different conditions which needs to be
  * satisfied. Additionally, we don't do any costing here but this is possibly
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java
index 782ce16..a6e2f53 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java
@@ -176,8 +176,6 @@ public class GenMRProcContext implements NodeProcessorCtx {
    *          hive configuration
    * @param opTaskMap
    *          reducer to task mapping
-   * @param seenOps
-   *          operator already visited
    * @param parseCtx
    *          current parse context
    * @param rootTasks
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
index bb575d3..3277765 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
@@ -434,8 +434,8 @@ public final class GenMapRedUtils {
    *          current alias
    * @param topOp
    *          the top operator of the stack
-   * @param plan
-   *          current plan
+   * @param task
+   *          current task
    * @param local
    *          whether you need to add to map-reduce or local work
    * @param opProcCtx
@@ -454,8 +454,8 @@ public final class GenMapRedUtils {
    *          current alias
    * @param topOp
    *          the top operator of the stack
-   * @param plan
-   *          current plan
+   * @param task
+   *          current task
    * @param local
    *          whether you need to add to map-reduce or local work
    * @param opProcCtx
@@ -476,13 +476,11 @@ public final class GenMapRedUtils {
    *
    * @param alias_id
    *          current alias
-   * @param topOp
-   *          the top operator of the stack
    * @param plan
    *          map work to initialize
    * @param local
    *          whether you need to add to map-reduce or local work
-   * @param pList
+   * @param partsList
    *          pruned partition list. If it is null it will be computed on-the-fly.
    * @param inputs
    *          read entities for the map work
@@ -764,7 +762,7 @@ public final class GenMapRedUtils {
    *          whether you need to add to map-reduce or local work
    * @param tt_desc
    *          table descriptor
-   * @throws SerDeException
+   * @throws SemanticException
    */
   public static void setTaskPlan(Path path, String alias,
       Operator<? extends OperatorDesc> topOp, MapWork plan, boolean local,
@@ -1254,11 +1252,11 @@ public final class GenMapRedUtils {
    *          v
    *          FileSinkOperator (fsMerge)
    *
-   *          Here the pathToPartitionInfo & pathToAlias will remain the same, which means the paths
+   *          Here the pathToPartitionInfo &amp; pathToAlias will remain the same, which means the paths
    *          do
    *          not contain the dynamic partitions (their parent). So after the dynamic partitions are
    *          created (after the first job finished before the moveTask or ConditionalTask start),
-   *          we need to change the pathToPartitionInfo & pathToAlias to include the dynamic
+   *          we need to change the pathToPartitionInfo &amp; pathToAlias to include the dynamic
    *          partition
    *          directories.
    *
@@ -1616,8 +1614,8 @@ public final class GenMapRedUtils {
    *
    * @param fsInputDesc
    * @param finalName
+   * @param hasDynamicPartitions
    * @param ctx
-   * @param inputFormatClass
    * @return MergeWork if table is stored as RCFile or ORCFile,
    *         null otherwise
    */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
index bd0cbab..e368570 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
@@ -50,11 +50,11 @@ import com.google.common.collect.Multimap;
 /**
  * This optimizer is used to reduce the input size for the query for queries which are
  * specifying a limit.
- * <p/>
+ * <br>
  * For eg. for a query of type:
- * <p/>
- * select expr from T where <filter> limit 100;
- * <p/>
+ * <br>
+ * select expr from T where &lt;filter&lt; limit 100;
+ * <br>
  * Most probably, the whole table T need not be scanned.
  * Chances are that even if we scan the first file of T, we would get the 100 rows
  * needed by this query.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
index 5b4b098..7c841ba 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
@@ -55,15 +55,15 @@ import org.apache.hadoop.hive.ql.plan.OperatorDesc;
  *
  *  Without this optimization:
  *
- *  TS -> FIL -> SEL -> RS ->
- *                             JOIN -> SEL -> FS
- *  TS -> FIL -> SEL -> RS ->
+ *  TS -&gt; FIL -&gt; SEL -&gt; RS -&gt;
+ *                             JOIN -&gt; SEL -&gt; FS
+ *  TS -&gt; FIL -&gt; SEL -&gt; RS -&gt;
  *
  *  With this optimization
  *
- *  TS -> FIL -> RS ->
- *                      JOIN -> FS
- *  TS -> FIL -> RS ->
+ *  TS -&gt; FIL -&gt; RS -&gt;
+ *                      JOIN -&gt; FS
+ *  TS -&gt; FIL -&gt; RS -&gt;
  *
  *  Note absence of select operator after filter and after join operator.
  *  Also, see : identity_proj_remove.q
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java
index 1dbe160..6cea72f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java
@@ -54,7 +54,7 @@ import org.apache.hadoop.hive.ql.plan.LimitDesc;
  * If RS is only for limiting rows, RSHash counts row with same key separately.
  * But if RS is for GBY, RSHash should forward all the rows with the same key.
  *
- * Legend : A(a) --> key A, value a, row A(a)
+ * Legend : A(a) --&gt; key A, value a, row A(a)
  *
  * If each RS in mapper tasks is forwarded rows like this
  *
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
index ceeeb8f..1256e1c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
@@ -276,11 +276,8 @@ public class MapJoinProcessor extends Transform {
   /**
    * convert a regular join to a a map-side join.
    *
-   * @param opParseCtxMap
    * @param op
    *          join operator
-   * @param joinTree
-   *          qb join tree
    * @param mapJoinPos
    *          position of the source to be read as part of map-reduce framework. All other sources
    *          are cached in memory
@@ -624,11 +621,8 @@ public class MapJoinProcessor extends Transform {
   /**
    * convert a sortmerge join to a a map-side join.
    *
-   * @param opParseCtxMap
    * @param smbJoinOp
    *          join operator
-   * @param joinTree
-   *          qb join tree
    * @param bigTablePos
    *          position of the source to be read as part of map-reduce framework. All other sources
    *          are cached in memory
@@ -798,7 +792,6 @@ public class MapJoinProcessor extends Transform {
    * @param mapJoinPos the position of big table as determined by either hints or auto conversion.
    * @param condns the join conditions
    * @return if given mapjoin position is a feasible big table position return same else -1.
-   * @throws SemanticException if given position is not in the big table candidates.
    */
   public static int checkMapJoin(int mapJoinPos, JoinCondDesc[] condns) {
     Set<Integer> bigTableCandidates =
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java
index 1626e26..ab86a80 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java
@@ -93,7 +93,7 @@ public abstract class PrunerOperatorFactory {
     /**
      * Generate predicate.
      *
-     * Subclass should implement the function. Please refer to {@link OpProcFactory.FilterPPR}
+     * Subclass should implement the function. Please refer to {@link org.apache.hadoop.hive.ql.optimizer.ppr.OpProcFactory.FilterPPR}
      *
      * @param procCtx
      * @param fop
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java
index c953e03..f70a6dc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java
@@ -96,7 +96,7 @@ import static org.apache.hadoop.hive.ql.plan.ReduceSinkDesc.ReducerTraits.UNSET;
  * in the query plan and merge them if they met some preconditions.
  *
  *  TS   TS             TS
- *  |    |     ->      /  \
+ *  |    |     -&gt;      /  \
  *  Op   Op           Op  Op
  *
  * <p>Now the rule has been extended to find opportunities to other operators
@@ -105,7 +105,7 @@ import static org.apache.hadoop.hive.ql.plan.ReduceSinkDesc.ReducerTraits.UNSET;
  *  TS1   TS2    TS1   TS2            TS1   TS2
  *   |     |      |     |              |     |
  *   |    RS      |    RS              |    RS
- *    \   /        \   /       ->       \   /
+ *    \   /        \   /       -&gt;       \   /
  *   MapJoin      MapJoin              MapJoin
  *      |            |                  /   \
  *      Op           Op                Op   Op
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java
index a5400d6..e581665 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java
@@ -37,9 +37,7 @@ public class SparkMapJoinProcessor extends MapJoinProcessor {
    * convert a regular join to a a map-side join.
    *
    * @param conf
-   * @param opParseCtxMap
    * @param op join operator
-   * @param joinTree qb join tree
    * @param bigTablePos position of the source to be read as part of
    *                   map-reduce framework. All other sources are cached in memory
    * @param noCheckOuterJoin
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
index 1f8a48c..6ed8b92 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
@@ -613,7 +613,7 @@ public class HiveCalciteUtil {
 
   /**
    * Get top level select starting from root. Assumption here is root can only
-   * be Sort & Project. Also the top project should be at most 2 levels below
+   * be Sort &amp; Project. Also the top project should be at most 2 levels below
    * Sort; i.e Sort(Limit)-Sort(OB)-Select
    *
    * @param rootRel
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
index 67312a4..f29b1f3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
@@ -120,7 +120,7 @@ public class HiveProject extends Project implements HiveRelNode {
    * are projected multiple times.
    *
    * <p>
-   * This method could optimize the result as {@link #permute} does, but does
+   * This method could optimize the result as permute does, but does
    * not at present.
    *
    * @param rel
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java
index 600c7c0..1d10c60 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java
@@ -68,8 +68,8 @@ import com.google.common.collect.Lists;
  * have m+n=a, 2m+n=b where m is the #row in R1 and n is the #row in R2 then
  * m=b-a, n=2a-b, m-n=2b-3a
  * if it is except (distinct)
- * then R5 = Fil (b-a>0 && 2a-b=0) R6 = select only keys from R5
- * else R5 = Fil (2b-3a>0) R6 = UDTF (R5) which will explode the tuples based on 2b-3a.
+ * then R5 = Fil (b-a&gt;0 &amp;&amp; 2a-b=0) R6 = select only keys from R5
+ * else R5 = Fil (2b-3a&gt; 0) R6 = UDTF (R5) which will explode the tuples based on 2b-3a.
  * Note that NULLs are handled the same as other values. Please refer to the test cases.
  */
 public class HiveExceptRewriteRule extends RelOptRule {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
index c331eab..0c8c5e1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
@@ -46,10 +46,10 @@ import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Sets;
 
 /** Not an optimization rule.
- * Rule to aid in translation from Calcite tree -> Hive tree.
+ * Rule to aid in translation from Calcite tree -&gt; Hive tree.
  * Transforms :
  *   Left     Right                  Left                    Right
- *       \   /           ->             \                   /
+ *       \   /           -&gt;             \                   /
  *       Join                          HashExchange       HashExchange
  *                                             \         /
  *                                                 Join
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
index e231b1d..1e39a1b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
@@ -74,8 +74,8 @@ import com.google.common.collect.Sets;
  *
  * Similarily
  * <pre>
- * v1 <= c1 and c1 <= v2
- * <pre>
+ * v1 &lt;= c1 and c1 &lt;= v2
+ * </pre>
  * is rewritten to <p>c1 between v1 and v2</p>
  */
 public abstract class HivePointLookupOptimizerRule extends RelOptRule {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java
index f7712e6..cdc94d5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java
@@ -52,12 +52,12 @@ import com.google.common.collect.Lists;
  * column statistics (if available).
  *
  * For instance, given the following predicate:
- *   a > 5
+ *   a &gt; 5
  * we can infer that the predicate will evaluate to false if the max
  * value for column a is 4.
  *
  * Currently we support the simplification of:
- *  - =, >=, <=, >, <
+ *  - =, &gt;=, &lt;=, &gt;, &lt;
  *  - IN
  *  - IS_NULL / IS_NOT_NULL
  */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
index 7ab4e12..50ed8ed 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
@@ -69,9 +69,9 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
  * <p>Sub-queries are represented by {@link RexSubQuery} expressions.
  *
  * <p>A sub-query may or may not be correlated. If a sub-query is correlated,
- * the wrapped {@link RelNode} will contain a {@link RexCorrelVariable} before
- * the rewrite, and the product of the rewrite will be a {@link Correlate}.
- * The Correlate can be removed using {@link RelDecorrelator}.
+ * the wrapped {@link RelNode} will contain a {@link org.apache.calcite.rex.RexCorrelVariable} before
+ * the rewrite, and the product of the rewrite will be a {@link org.apache.calcite.rel.core.Correlate}.
+ * The Correlate can be removed using {@link org.apache.calcite.sql2rel.RelDecorrelator}.
  */
 public class HiveSubQueryRemoveRule extends RelOptRule {
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCAggregationPushDownRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCAggregationPushDownRule.java
index 8f96288..c51ae0d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCAggregationPushDownRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCAggregationPushDownRule.java
@@ -36,7 +36,7 @@ import org.slf4j.LoggerFactory;
 
 /**
  * JDBCAggregationPushDownRule convert a {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate}
- * into a {@link org.apache.calcite.adapter.jdbc.JdbcRules.JdbcAggregateRule.JdbcAggregate}
+ * into a {@link JdbcAggregate}
  * and pushes it down below the {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.jdbc.HiveJdbcConverter}
  * operator so it will be sent to the external table.
  */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCProjectPushDownRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCProjectPushDownRule.java
index 5c03f87..0e88f53 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCProjectPushDownRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCProjectPushDownRule.java
@@ -33,7 +33,7 @@ import org.slf4j.LoggerFactory;
 
 /**
  * JDBCProjectPushDownRule convert a {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject}
- * into a {@link org.apache.calcite.adapter.jdbc.JdbcRules.JdbcAggregateRule.JdbcProject}
+ * into a {@link JdbcProject}
  * and pushes it down below the {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.jdbc.HiveJdbcConverter}}
  * operator so it will be sent to the external table.
  */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateIncrementalRewritingRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateIncrementalRewritingRule.java
index aabd75e..a8eb070 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateIncrementalRewritingRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateIncrementalRewritingRule.java
@@ -51,7 +51,7 @@ import java.util.List;
  *   SELECT a, b, SUM(x) AS s, COUNT(*) AS c --NEW DATA
  *   FROM TAB_A
  *   JOIN TAB_B ON (TAB_A.a = TAB_B.z)
- *   WHERE TAB_A.ROW_ID > 5
+ *   WHERE TAB_A.ROW_ID &gt; 5
  *   GROUP BY a, b) inner_subq
  * GROUP BY a, b;
  *
@@ -61,10 +61,10 @@ import java.util.List;
  *   SELECT a, b, SUM(x) AS s, COUNT(*) AS c --NEW DATA
  *   FROM TAB_A
  *   JOIN TAB_B ON (TAB_A.a = TAB_B.z)
- *   WHERE TAB_A.ROW_ID > 5
+ *   WHERE TAB_A.ROW_ID &gt; 5
  *   GROUP BY a, b) source
  * ON (mv.a = source.a AND mv.b = source.b)
- * WHEN MATCHED AND mv.c + source.c <> 0
+ * WHEN MATCHED AND mv.c + source.c &lt;&gt; 0
  *   THEN UPDATE SET mv.s = mv.s + source.s, mv.c = mv.c + source.c
  * WHEN NOT MATCHED
  *   THEN INSERT VALUES (source.a, source.b, s, c);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
index 70f8343..b304e38 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
@@ -69,7 +69,7 @@ import com.google.common.collect.ImmutableList;
  * 1. Change the output col/ExprNodeColumn names to external names.<br>
  * 2. Verify if we need to use the "KEY."/"VALUE." in RS cols; switch to
  * external names if possible.<br>
- * 3. In ExprNode & in ColumnInfo the tableAlias/VirtualColumn is specified
+ * 3. In ExprNode &amp; in ColumnInfo the tableAlias/VirtualColumn is specified
  * differently for different GB/RS in pipeline. Remove the different treatments.
  * 4. VirtualColMap needs to be maintained
  *
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java
index 40cfcf5..9377fd2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java
@@ -203,7 +203,7 @@ public class CorrelationOptimizer extends Transform {
   /**
    * Detect correlations and transform the query tree.
    *
-   * @param pactx
+   * @param pctx
    *          current parse context
    * @throws SemanticException
    */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
index c553dca..d2cf78b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
@@ -97,7 +97,7 @@ public final class CorrelationUtilities {
    * @param throwException if throw a exception when the input operator has multiple parents
    * @return the single parent or null when the input operator has multiple parents and
    *         throwException is false;
-   * @throws HiveException
+   * @throws SemanticException
    */
   protected static Operator<?> getSingleParent(Operator<?> operator,
       boolean throwException) throws SemanticException {
@@ -127,7 +127,7 @@ public final class CorrelationUtilities {
    * @param throwException if throw a exception when the input operator has multiple children
    * @return the single child or null when the input operator has multiple children and
    *         throwException is false;
-   * @throws HiveException
+   * @throws SemanticException
    */
   protected static Operator<?> getSingleChild(Operator<?> operator,
       boolean throwException) throws SemanticException {
@@ -477,8 +477,7 @@ public final class CorrelationUtilities {
    * @param newOperator the operator will be inserted between child and parent
    * @param child
    * @param parent
-   * @param context
-   * @throws HiveException
+   * @throws SemanticException
    */
   protected static void insertOperatorBetween(
       Operator<?> newOperator, Operator<?> parent, Operator<?> child)
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java
index 06498eb..076a996 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java
@@ -119,18 +119,18 @@ public class ListBucketingPruner extends Transform {
    *
    * Complete dynamic-multi-dimension collection
    *
-   * (0,0) (1,a) * -> T
-   * (0,1) (1,b) -> T
-   * (0,2) (1,c) *-> F
-   * (0,3) (1,other)-> F
-   * (1,0) (2,a)-> F
-   * (1,1) (2,b) * -> T
-   * (1,2) (2,c)-> F
-   * (1,3) (2,other)-> F
-   * (2,0) (other,a) -> T
-   * (2,1) (other,b) -> T
-   * (2,2) (other,c) -> T
-   * (2,3) (other,other) -> T
+   * (0,0) (1,a) * -&gt; T
+   * (0,1) (1,b) -&gt; T
+   * (0,2) (1,c) *-&gt; F
+   * (0,3) (1,other)-&gt; F
+   * (1,0) (2,a)-&gt; F
+   * (1,1) (2,b) * -&gt; T
+   * (1,2) (2,c)-&gt; F
+   * (1,3) (2,other)-&gt; F
+   * (2,0) (other,a) -&gt; T
+   * (2,1) (other,b) -&gt; T
+   * (2,2) (other,c) -&gt; T
+   * (2,3) (other,other) -&gt; T
    * * is skewed value entry
    *
    * Expression Tree : ((c1=1) and (c2=a)) or ( (c1=3) or (c2=b))
@@ -171,7 +171,7 @@ public class ListBucketingPruner extends Transform {
    *
    * <pre>
    *     child_nd instanceof ExprNodeConstantDesc
-   *               && ((ExprNodeConstantDesc) child_nd).getValue() == null)
+   *               &amp;&amp; ((ExprNodeConstantDesc) child_nd).getValue() == null)
    * </pre>
    *
    * </blockquote>
@@ -410,7 +410,7 @@ public class ListBucketingPruner extends Transform {
    * 2. all other cases, select the directory
    * Use case #2:
    * Multiple dimension collection represents skewed elements so that walk through tree one by one.
-   * Cell is a List<String> representing the value mapping from index path and skewed value.
+   * Cell is a List&lt;String&gt; representing the value mapping from index path and skewed value.
    * skewed column: C1, C2, C3
    * skewed value: (1,a,x), (2,b,x), (1,c,x), (2,a,y)
    * Other: represent value for the column which is not part of skewed value.
@@ -428,8 +428,8 @@ public class ListBucketingPruner extends Transform {
    * ==============
    * please see another example in {@link ListBucketingPruner#prune}
    * We will use a HasMap to represent the Dynamic-Multiple-Dimension collection:
-   * 1. Key is List<Integer> representing the index path to the cell
-   * 2. value represents the cell (Boolean for use case #1, List<String> for case #2)
+   * 1. Key is List&lt;Integer&gt; representing the index path to the cell
+   * 2. value represents the cell (Boolean for use case #1, List&lt;String&gt; for case #2)
    * For example:
    * 1. skewed column (list): C1, C2, C3
    * 2. skewed value (list of list): (1,a,x), (2,b,x), (1,c,x), (2,a,y)
@@ -446,7 +446,7 @@ public class ListBucketingPruner extends Transform {
    *
    * We use the index,starting at 0. to construct hashmap representing dynamic-multi-dimension
    * collection:
-   * key (what skewed value key represents) -> value (Boolean for use case #1, List<String> for case
+   * key (what skewed value key represents) -&gt; value (Boolean for use case #1, List&lt;String&gt; for case
    * #2).
    * (0,0,0) (1,a,x)
    * (0,0,1) (1,a,y)
@@ -572,18 +572,18 @@ public class ListBucketingPruner extends Transform {
      * Index: (0,1,2) (0,1,2,3)
      *
      * Complete dynamic-multi-dimension collection
-     * (0,0) (1,a) * -> T
-     * (0,1) (1,b) -> T
-     * (0,2) (1,c) *-> F
-     * (0,3) (1,other)-> F
-     * (1,0) (2,a)-> F
-     * (1,1) (2,b) * -> T
-     * (1,2) (2,c)-> F
-     * (1,3) (2,other)-> F
-     * (2,0) (other,a) -> T
-     * (2,1) (other,b) -> T
-     * (2,2) (other,c) -> T
-     * (2,3) (other,other) -> T
+     * (0,0) (1,a) * -&gt; T
+     * (0,1) (1,b) -&gt; T
+     * (0,2) (1,c) *-&gt; F
+     * (0,3) (1,other)-&gt; F
+     * (1,0) (2,a)-&gt; F
+     * (1,1) (2,b) * -&gt; T
+     * (1,2) (2,c)-&gt; F
+     * (1,3) (2,other)-&gt; F
+     * (2,0) (other,a) -&gt; T
+     * (2,1) (other,b) -&gt; T
+     * (2,2) (other,c) -&gt; T
+     * (2,3) (other,other) -&gt; T
      * * is skewed value entry
      *
      * @param uniqSkewedElements
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java
index 6c6908a..8903eb7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java
@@ -104,7 +104,7 @@ public class PhysicalOptimizer {
    * invoke all the resolvers one-by-one, and alter the physical plan.
    *
    * @return PhysicalContext
-   * @throws HiveException
+   * @throws SemanticException
    */
   public PhysicalContext optimize() throws SemanticException {
     for (PhysicalPlanResolver r : resolvers) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java
index 691e942..03324a6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java
@@ -47,7 +47,6 @@ public class PartExprEvalUtils {
    * Evaluate expression with partition columns
    *
    * @param expr
-   * @param partSpec
    * @param rowObjectInspector
    * @return value returned by the expression
    * @throws HiveException
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
index 6aeb2a8..6ba3f90 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
@@ -224,45 +224,32 @@ public class StatsRulesProcFactory {
   /**
    * FILTER operator does not change the average row size but it does change the number of rows
    * emitted. The reduction in the number of rows emitted is dependent on the filter expression.
-   * <ul>
    * <i>Notations:</i>
+   * <ul>
    * <li>T(S) - Number of tuples in relations S</li>
    * <li>V(S,A) - Number of distinct values of attribute A in relation S</li>
    * </ul>
+   * <i>Rules:</i> 
    * <ul>
-   * <i>Rules:</i> <b>
-   * <li>Column equals a constant</li></b> T(S) = T(R) / V(R,A)
-   * <p>
-   * <b>
-   * <li>Inequality conditions</li></b> T(S) = T(R) / 3
-   * <p>
-   * <b>
-   * <li>Not equals comparison</li></b> - Simple formula T(S) = T(R)
-   * <p>
-   * - Alternate formula T(S) = T(R) (V(R,A) - 1) / V(R,A)
-   * <p>
-   * <b>
-   * <li>NOT condition</li></b> T(S) = 1 - T(S'), where T(S') is the satisfying condition
-   * <p>
-   * <b>
-   * <li>Multiple AND conditions</li></b> Cascadingly apply the rules 1 to 3 (order doesn't matter)
-   * <p>
-   * <b>
-   * <li>Multiple OR conditions</li></b> - Simple formula is to evaluate conditions independently
-   * and sum the results T(S) = m1 + m2
-   * <p>
-   * - Alternate formula T(S) = T(R) * ( 1 - ( 1 - m1/T(R) ) * ( 1 - m2/T(R) ))
-   * <p>
+   * <li><b>Column equals a constant</b> T(S) = T(R) / V(R,A)</li>
+   * <li><b>Inequality conditions</b> T(S) = T(R) / 3</li>
+   * <li><b>Not equals comparison</b> - Simple formula T(S) = T(R)</li>
+   * <li>- Alternate formula T(S) = T(R) (V(R,A) - 1) / V(R,A) </li>
+   * <li><b>NOT condition</b> T(S) = 1 - T(S'), where T(S') is the satisfying condition</li>
+   * <li><b>Multiple AND conditions</b> Cascadingly apply the rules 1 to 3 (order doesn't matter)</li>
+   * <li><b>Multiple OR conditions</b> - Simple formula is to evaluate conditions independently
+   * and sum the results T(S) = m1 + m2</li>
+   * <li>- Alternate formula T(S) = T(R) * ( 1 - ( 1 - m1/T(R) ) * ( 1 - m2/T(R) ))
+   * <br>
    * where, m1 is the number of tuples that satisfy condition1 and m2 is the number of tuples that
-   * satisfy condition2
+   * satisfy condition2 </li>
    * </ul>
-   * <p>
    * <i>Worst case:</i> If no column statistics are available, then evaluation of predicate
    * expression will assume worst case (i.e; half the input rows) for each of predicate expression.
-   * <p>
+   * <br>
    * <i>For more information, refer 'Estimating The Cost Of Operations' chapter in
    * "Database Systems: The Complete Book" by Garcia-Molina et. al.</i>
-   * </p>
+   * <br>
    */
   public static class FilterStatsRule extends DefaultStatsRule implements NodeProcessor {
 
@@ -1201,7 +1188,7 @@ public class StatsRulesProcFactory {
    * available then a better estimate can be found by taking the smaller of product of V(R,[A,B,C])
    * (product of distinct cardinalities of A,B,C) and T(R)/2.
    * <p>
-   * T(R) = min (T(R)/2 , V(R,[A,B,C]) ---> [1]
+   * T(R) = min (T(R)/2 , V(R,[A,B,C]) ---&gt; [1]
    * <p>
    * In the presence of grouping sets, map-side GBY will emit more rows depending on the size of
    * grouping set (input rows * size of grouping set). These rows will get reduced because of
@@ -1645,12 +1632,12 @@ public class StatsRulesProcFactory {
   }
 
   /**
-   * JOIN operator can yield any of the following three cases <li>The values of join keys are
+   * JOIN operator can yield any of the following three cases <ul><li>The values of join keys are
    * disjoint in both relations in which case T(RXS) = 0 (we need histograms for this)</li> <li>Join
    * key is primary key on relation R and foreign key on relation S in which case every tuple in S
-   * will have a tuple in R T(RXS) = T(S) (we need histograms for this)</li> <li>Both R & S relation
+   * will have a tuple in R T(RXS) = T(S) (we need histograms for this)</li> <li>Both R &amp; S relation
    * have same value for join-key. Ex: bool column with all true values T(RXS) = T(R) * T(S) (we
-   * need histograms for this. counDistinct = 1 and same value)</li>
+   * need histograms for this. counDistinct = 1 and same value)</li></ul>
    * <p>
    * In the absence of histograms, we can use the following general case
    * <p>
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
index 7b32020..746d0dc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
@@ -100,7 +100,7 @@ public class ASTNode extends CommonTree implements Node,Serializable {
   /**
    * For every node in this subtree, make sure it's start/stop token's
    * are set.  Walk depth first, visit bottom up.  Only updates nodes
-   * with at least one token index < 0.
+   * with at least one token index &lt; 0.
    *
    * In contrast to the method in the parent class, this method is
    * iterative.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java
index 41e3754..4b2958a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java
@@ -77,7 +77,7 @@ public class AcidExportSemanticAnalyzer extends RewriteSemanticAnalyzer {
    * were generated.  It may also contain insert events that belong to transactions that aborted
    * where the same constraints apply.
    * In order to make the export artifact free of these constraints, the export does a
-   * insert into tmpTable select * from <export table> to filter/apply the events in current
+   * insert into tmpTable select * from &lt;export table&gt; to filter/apply the events in current
    * context and then export the tmpTable.  This export artifact can now be imported into any
    * table on any cluster (subject to schema checks etc).
    * See {@link #analyzeAcidExport(ASTNode)}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java
index f5d79ed..e385d4e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java
@@ -413,7 +413,7 @@ public class PTFInvocationSpec {
 
     /**
      * Add order expressions from the list of expressions in the format of ASTNode
-     * @param args
+     * @param nodes
      */
     public void addExpressions(ArrayList<ASTNode> nodes) {
       for (int i = 0; i < nodes.size(); i++) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
index 055d454..48213d1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
@@ -210,7 +210,7 @@ public class ReplicationSpec {
   }
 
   /**
-   * Returns a predicate filter to filter an Iterable<Partition> to return all partitions
+   * Returns a predicate filter to filter an Iterable&lt;Partition&gt; to return all partitions
    * that the current replication event specification is allowed to replicate-replace-into
    */
   public Predicate<Partition> allowEventReplacementInto() {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
index c31666e..3734882 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
@@ -120,7 +120,7 @@ public class TableSample {
   /**
    * Gets the ON part's expression list.
    * 
-   * @return ArrayList<ASTNode>
+   * @return ArrayList&lt;ASTNode&gt;
    */
   public ArrayList<ASTNode> getExprs() {
     return exprs;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
index 93641af..d70353e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
@@ -929,7 +929,7 @@ public class AlterTableDesc extends DDLDesc implements Serializable, DDLDesc.DDL
   }
 
   /**
-   * @param cascade the isCascade to set
+   * @param isCascade the isCascade to set
    */
   public void setIsCascade(boolean isCascade) {
     this.isCascade = isCascade;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
index ce85d40..b693fdb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
@@ -86,12 +86,11 @@ public class CreateViewDesc extends DDLDesc implements Serializable {
    * @param tblProps
    * @param partColNames
    * @param ifNotExists
-   * @param orReplace
+   * @param replace
    * @param isAlterViewAs
    * @param inputFormat
    * @param outputFormat
    * @param location
-   * @param serName
    * @param serde
    * @param storageHandler
    * @param serdeProps
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExportWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExportWork.java
index f9d545f..ffb81b5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExportWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExportWork.java
@@ -102,7 +102,7 @@ public class ExportWork implements Serializable {
    * For exporting Acid table, change the "pointer" to the temp table.
    * This has to be done after the temp table is populated and all necessary Partition objects
    * exist in the metastore.
-   * See {@link org.apache.hadoop.hive.ql.parse.AcidExportAnalyzer#isAcidExport(ASTNode)}
+   * See {@link org.apache.hadoop.hive.ql.parse.AcidExportSemanticAnalyzer#isAcidExport(ASTNode)}
    * for more info.
    */
   public void acidPostProcess(Hive db) throws HiveException {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
index 9febee4..80ce787 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
@@ -416,7 +416,7 @@ public class ExprNodeDescUtils {
   /**
    * Join keys are expressions based on the select operator. Resolve the expressions so they
    * are based on the ReduceSink operator
-   *   SEL -> RS -> JOIN
+   *   SEL -&gt; RS -&gt; JOIN
    * @param source
    * @param reduceSinkOp
    * @return
@@ -666,10 +666,10 @@ public class ExprNodeDescUtils {
    * @param inputOp
    *          Input Hive Operator
    * @param startPos
-   *          starting position in the input operator schema; must be >=0 and <=
+   *          starting position in the input operator schema; must be &gt;=0 and &lt;=
    *          endPos
    * @param endPos
-   *          end position in the input operator schema; must be >=0.
+   *          end position in the input operator schema; must be &gt;=0.
    * @return List of ExprNodeDesc
    */
   public static ArrayList<ExprNodeDesc> genExprNodeDesc(Operator inputOp, int startPos, int endPos,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java
index 5f8cf54..86d4fef 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java
@@ -184,7 +184,6 @@ public class ListBucketingCtx implements Serializable {
   /**
    * check if list bucketing is enabled.
    *
-   * @param ctx
    * @return
    */
   public  boolean isSkewedStoredAsDir() {
@@ -201,7 +200,6 @@ public class ListBucketingCtx implements Serializable {
    * 0: not list bucketing
    * int: no. of skewed columns
    *
-   * @param ctx
    * @return
    */
   public  int calculateListBucketingLevel() {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
index d5a30da..bb063c5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
@@ -214,10 +214,10 @@ public class MapWork extends BaseWork {
   }
 
   /**
-   * This is used to display and verify output of "Path -> Alias" in test framework.
+   * This is used to display and verify output of "Path -&gt; Alias" in test framework.
    *
-   * QTestUtil masks "Path -> Alias" and makes verification impossible.
-   * By keeping "Path -> Alias" intact and adding a new display name which is not
+   * QTestUtil masks "Path -&gt; Alias" and makes verification impossible.
+   * By keeping "Path -&gt; Alias" intact and adding a new display name which is not
    * masked by QTestUtil by removing prefix.
    *
    * Notes: we would still be masking for intermediate directories.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
index 76cf54e..33a5371 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
@@ -1008,7 +1008,7 @@ public final class PlanUtils {
   }
 
   /**
-   * Remove prefix from "Path -> Alias"
+   * Remove prefix from "Path -&gt; Alias"
    * This is required for testing.
    * In order to verify that path is right, we need to display it in expected test result.
    * But, mask pattern masks path with some patterns.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java
index d24c4ef..ba5d06e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java
@@ -85,7 +85,7 @@ public class ShowCreateDatabaseDesc extends DDLDesc implements Serializable {
   }
 
   /**
-   * @param databaseName
+   * @param dbName
    *          the dbName to set
    */
   public void setDatabaseName(String dbName) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java
index 18cf12c..609d174 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java
@@ -77,7 +77,7 @@ public class ShowFunctionsDesc extends DDLDesc implements Serializable {
   /**
    * @param pattern
    *          names of tables to show
-   * @param like
+   * @param isLikePattern
    *          is like keyword used
    */
   public ShowFunctionsDesc(Path resFile, String pattern, boolean isLikePattern) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java
index 8bb40ab..52a5d1b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java
@@ -32,7 +32,7 @@ package org.apache.hadoop.hive.ql.plan;
  * 1. It's position in table column is 1.
  * 2. It's position in skewed column list is 0.
  *
- * This information will be used in {@FileSinkOperator} generateListBucketingDirName
+ * This information will be used in {@link org.apache.hadoop.hive.ql.exec.FileSinkOperator} generateListBucketingDirName
  */
 public class SkewedColumnPositionPair {
   private int tblColPosition;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
index 3ed5cb2..2f1ec27 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
@@ -279,7 +279,6 @@ public class SparkWork extends AbstractOperatorDesc {
   /**
    * connect adds an edge between a and b. Both nodes have
    * to be added prior to calling connect.
-   * @param
    */
   public void connect(BaseWork a, BaseWork b, SparkEdgeProperty edgeProp) {
     workGraph.get(a).add(b);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java
index 3539f0d..ac43778 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java
@@ -370,7 +370,6 @@ public class TezWork extends AbstractOperatorDesc {
   /**
    * connect adds an edge between a and b. Both nodes have
    * to be added prior to calling connect.
-   * @param
    */
   public void connect(BaseWork a, BaseWork b,
       TezEdgeProperty edgeProp) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java
index adcf707..bf5bb24 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java
@@ -27,8 +27,8 @@ import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
 
 /**
- * All member variables should have a setters and getters of the form get<member
- * name> and set<member name> or else they won't be recreated properly at run
+ * All member variables should have a setters and getters of the form get&lt;member
+ * name&gt; and set&lt;member name&gt; or else they won't be recreated properly at run
  * time.
  *
  */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java
index caf0c67..a69f762 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java
@@ -35,17 +35,17 @@ public class VectorGroupByDesc extends AbstractVectorDesc  {
   private static final long serialVersionUID = 1L;
 
   /**
-   *     GLOBAL         No key.  All rows --> 1 full aggregation on end of input
+   *     GLOBAL         No key.  All rows --&gt; 1 full aggregation on end of input
    *
-   *     HASH           Rows aggregated in to hash table on group key -->
+   *     HASH           Rows aggregated in to hash table on group key --&gt;
    *                        1 partial aggregation per key (normally, unless there is spilling)
    *
    *     MERGE_PARTIAL  As first operator in a REDUCER, partial aggregations come grouped from
-   *                    reduce-shuffle -->
+   *                    reduce-shuffle --&gt;
    *                        aggregate the partial aggregations and emit full aggregation on
    *                        endGroup / closeOp
    *
-   *     STREAMING      Rows come from PARENT operator already grouped -->
+   *     STREAMING      Rows come from PARENT operator already grouped --&gt;
    *                        aggregate the rows and emit full aggregation on key change / closeOp
    *
    *     NOTE: Hash can spill partial result rows prematurely if it runs low on memory.
@@ -123,16 +123,16 @@ public class VectorGroupByDesc extends AbstractVectorDesc  {
    *
    *     Decides using GroupByDesc.Mode and whether there are keys.
    *
-   *         Mode.COMPLETE      --> (numKeys == 0 ? ProcessingMode.GLOBAL : ProcessingMode.STREAMING)
+   *         Mode.COMPLETE      --&gt; (numKeys == 0 ? ProcessingMode.GLOBAL : ProcessingMode.STREAMING)
    *
-   *         Mode.HASH          --> ProcessingMode.HASH
+   *         Mode.HASH          --&gt; ProcessingMode.HASH
    *
-   *         Mode.MERGEPARTIAL  --> (numKeys == 0 ? ProcessingMode.GLOBAL : ProcessingMode.MERGE_PARTIAL)
+   *         Mode.MERGEPARTIAL  --&gt; (numKeys == 0 ? ProcessingMode.GLOBAL : ProcessingMode.MERGE_PARTIAL)
    *
    *         Mode.PARTIAL1,
    *         Mode.PARTIAL2,
    *         Mode.PARTIALS,
-   *         Mode.FINAL        --> ProcessingMode.STREAMING
+   *         Mode.FINAL        --&gt; ProcessingMode.STREAMING
    *
    */
   public static ProcessingMode groupByDescModeToVectorProcessingMode(GroupByDesc.Mode mode,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java b/ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java
index f1e3267..b3d59e3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java
@@ -53,9 +53,9 @@ import org.apache.hadoop.hive.ql.parse.SemanticException;
  * plan generation adds filters where they are seen but in some instances some
  * of the filter expressions can be pushed nearer to the operator that sees this
  * particular data for the first time. e.g. select a.*, b.* from a join b on
- * (a.col1 = b.col1) where a.col1 > 20 and b.col2 > 40
+ * (a.col1 = b.col1) where a.col1 &gt; 20 and b.col2 &gt; 40
  *
- * For the above query, the predicates (a.col1 > 20) and (b.col2 > 40), without
+ * For the above query, the predicates (a.col1 &gt; 20) and (b.col2 &gt; 40), without
  * predicate pushdown, would be evaluated after the join processing has been
  * done. Suppose the two predicates filter out most of the rows from a and b,
  * the join is unnecessarily processing these rows. With predicate pushdown,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java
index bc473ee..94cfa51 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.ErrorMsg;
  * <code>CommandProcessor</code> interface. Typically <code>errorMessage</code>
  * and <code>SQLState</code> will only be set if the <code>responseCode</code>
  * is not 0.  Note that often {@code responseCode} ends up the exit value of
- * command shell process so should keep it to < 127.
+ * command shell process so should keep it to &lt; 127.
  */
 public class CommandProcessorResponse extends Exception {
   private final int responseCode;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java
index 77421b5..a8a97a0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java
@@ -36,7 +36,7 @@ import java.util.Arrays;
 
 /**
  * This class processes HADOOP commands used for HDFS encryption. It is meant to be run
- * only by Hive unit & queries tests.
+ * only by Hive unit &amp; queries tests.
  */
 public class CryptoProcessor implements CommandProcessor {
   public static final Logger LOG = LoggerFactory.getLogger(CryptoProcessor.class.getName());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java
index e19c053..f690422 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java
@@ -169,7 +169,6 @@ public class AuthorizationUtils {
    * Convert authorization plugin principal type to thrift principal type
    * @param type
    * @return
-   * @throws HiveException
    */
   public static PrincipalType getThriftPrincipalType(HivePrincipalType type) {
     if(type == null){
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java
index 7678e8f..7037f2c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java
@@ -62,7 +62,7 @@ public enum PrivilegeType {
 
   /**
    * Do case lookup of PrivilegeType associated with this antlr token
-   * @param privilegeName
+   * @param token
    * @return corresponding PrivilegeType
    */
   public static PrivilegeType getPrivTypeByToken(int token) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java
index a4079b8..9352aa2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java
@@ -87,7 +87,7 @@ public interface HiveAuthorizer {
   /**
    * Create role
    * @param roleName
-   * @param adminGrantor - The user in "[ WITH ADMIN <user> ]" clause of "create role"
+   * @param adminGrantor - The user in "[ WITH ADMIN &lt;user&gt; ]" clause of "create role"
    * @throws HiveAuthzPluginException
    * @throws HiveAccessControlException
    */
@@ -232,7 +232,7 @@ public interface HiveAuthorizer {
    * returned, the Object has to be of type HiveAuthorizationTranslator
    *
    * @return
-   * @throws HiveException
+   * @throws HiveAuthzPluginException
    */
   Object getHiveAuthorizationTranslator() throws HiveAuthzPluginException;
 
@@ -246,19 +246,19 @@ public interface HiveAuthorizer {
    * (part 1) It expects a valid filter condition to be returned. Null indicates no filtering is
    * required.
    *
-   * Example: table foo(c int) -> "c > 0 && c % 2 = 0"
+   * Example: table foo(c int) -&gt; "c &gt; 0 &amp;&amp; c % 2 = 0"
    *
    * (part 2) It expects a valid expression as used in a select clause. Null
    * is NOT a valid option. If no transformation is needed simply return the
    * column name.
    *
-   * Example: column a -> "a" (no transform)
+   * Example: column a -&gt; "a" (no transform)
    *
-   * Example: column a -> "reverse(a)" (call the reverse function on a)
+   * Example: column a -&gt; "reverse(a)" (call the reverse function on a)
    *
-   * Example: column a -> "5" (replace column a with the constant 5)
+   * Example: column a -&gt; "5" (replace column a with the constant 5)
    *
-   * @return List<HivePrivilegeObject>
+   * @return List&lt;HivePrivilegeObject&gt;
    * please return the list of HivePrivilegeObjects that need to be rewritten.
    *
    * @throws SemanticException
@@ -271,7 +271,6 @@ public interface HiveAuthorizer {
    * Returning false short-circuits the generation of row/column transforms.
    *
    * @return
-   * @throws SemanticException
    */
   boolean needTransform();
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
index 0b3b19b..87d2e68 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
@@ -146,7 +146,7 @@ public class HivePrivilegeObject implements Comparable<HivePrivilegeObject> {
   }
 
   /**
-   * Create HivePrivilegeObject of type {@link HivePrivilegeObjectType.COMMAND_PARAMS}
+   * Create HivePrivilegeObject of type {@link HivePrivilegeObjectType#COMMAND_PARAMS}
    * @param cmdParams
    * @return
    */
@@ -215,7 +215,7 @@ public class HivePrivilegeObject implements Comparable<HivePrivilegeObject> {
   }
 
   /**
-   * Applicable columns in this object, when the type is {@link HivePrivilegeObjectType.TABLE}
+   * Applicable columns in this object, when the type is {@link HivePrivilegeObjectType#TABLE_OR_VIEW}
    * In case of DML read operations, this is the set of columns being used.
    * Column information is not set for DDL operations and for tables being written into
    * @return list of applicable columns
@@ -225,7 +225,7 @@ public class HivePrivilegeObject implements Comparable<HivePrivilegeObject> {
   }
 
   /**
-   * The class name when the type is {@link HivePrivilegeObjectType.FUNCTION}
+   * The class name when the type is {@link HivePrivilegeObjectType#FUNCTION}
    * @return the class name
    */
   public String getClassName() {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java
index 988d235..1d79082 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java
@@ -31,9 +31,6 @@ public interface StatsAggregator {
   /**
    * This method connects to the temporary storage.
    *
-   * @param hconf
-   *          HiveConf that contains the connection parameters.
-   * @param sourceTask
    * @return true if connection is successful, false otherwise.
    */
   public boolean connect(StatsCollectionContext scc);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java
index bae732c..1230663 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java
@@ -35,15 +35,12 @@ public interface StatsPublisher {
    * database (if not exist).
    * This method is usually called in the Hive client side rather than by the mappers/reducers
    * so that it is initialized only once.
-   * @param hconf HiveConf that contains the configurations parameters used to connect to
-   * intermediate stats database.
    * @return true if initialization is successful, false otherwise.
    */
   public boolean init(StatsCollectionContext context);
 
   /**
    * This method connects to the intermediate statistics database.
-   * @param hconf HiveConf that contains the connection parameters.
    * @return true if connection is successful, false otherwise.
    */
   public boolean connect(StatsCollectionContext context);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
index 2a7cf8c..f00c720 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
@@ -787,7 +787,7 @@ public class StatsUtils {
   }
 
   /**
-   * Get sum of all values in the list that are >0
+   * Get sum of all values in the list that are &gt;0
    * @param vals
    *          - list of values
    * @return sum
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/SettableUDF.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/SettableUDF.java
index 675853d..21bde4a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/SettableUDF.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/SettableUDF.java
@@ -30,7 +30,6 @@ public interface SettableUDF {
   /**
    * Add data to UDF prior to initialization.
    * An exception may be thrown if the UDF doesn't know what to do with this data.
-   * @param params UDF-specific data to add to the UDF
    */
   void setTypeInfo(TypeInfo typeInfo) throws UDFArgumentException;
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java
index ed5882b..7a590b8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java
@@ -139,7 +139,7 @@ public class UDFConv extends UDF {
   }
 
   /**
-   * Convert numbers between different number bases. If toBase>0 the result is
+   * Convert numbers between different number bases. If toBase&gt;0 the result is
    * unsigned, otherwise it is signed.
    *
    */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java
index cd20783..63b18fd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hive.ql.exec.UDF;
  * 'Ref' parse_url('http://facebook.com/path/p1.php?query=1#Ref', 'PROTOCOL')
  * will return 'http' Possible values are
  * HOST,PATH,QUERY,REF,PROTOCOL,AUTHORITY,FILE,USERINFO Also you can get a value
- * of particular key in QUERY, using syntax QUERY:<KEY_NAME> eg: QUERY:k1.
+ * of particular key in QUERY, using syntax QUERY:&lt;KEY_NAME&gt; eg: QUERY:k1.
  */
 @Description(name = "parse_url",
     value = "_FUNC_(url, partToExtract[, key]) - extracts a part from a URL",
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java
index 738fd95..c657a60 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java
@@ -75,7 +75,7 @@ public class UDFSign extends UDF {
   /**
    * Get the sign of the decimal input
    *
-   * @param dec decimal input
+   * @param decWritable decimal input
    *
    * @return -1, 0, or 1 representing the sign of the input decimal
    */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java
index d1517ab..360ae46 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java
@@ -47,12 +47,12 @@ import org.apache.hadoop.io.LongWritable;
  * Donald Knuth.
  *
  *  Incremental:
- *   n : <count>
- *   mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : <xavg>
- *   my_n = my_(n-1) + [y_n - my_(n-1)]/n : <yavg>
- *   c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : <covariance * n>
- *   vx_n = vx_(n-1) + (x_n - mx_n)(x_n - mx_(n-1)): <variance * n>
- *   vy_n = vy_(n-1) + (y_n - my_n)(y_n - my_(n-1)): <variance * n>
+ *   n : &lt;count&gt;
+ *   mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : &lt;xavg&gt;
+ *   my_n = my_(n-1) + [y_n - my_(n-1)]/n : &lt;yavg&gt;
+ *   c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : &lt;covariance * n&gt;
+ *   vx_n = vx_(n-1) + (x_n - mx_n)(x_n - mx_(n-1)): &lt;variance * n&gt;
+ *   vy_n = vy_(n-1) + (y_n - my_n)(y_n - my_(n-1)): &lt;variance * n&gt;
  *
  *  Merge:
  *   c_(A,B) = c_A + c_B + (mx_A - mx_B)*(my_A - my_B)*n_A*n_B/(n_A+n_B)
@@ -136,12 +136,12 @@ public class GenericUDAFCorrelation extends AbstractGenericUDAFResolver {
    * algorithm, based on work by Philippe Pébay and Donald Knuth.
    *
    *  Incremental:
-   *   n : <count>
-   *   mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : <xavg>
-   *   my_n = my_(n-1) + [y_n - my_(n-1)]/n : <yavg>
-   *   c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : <covariance * n>
-   *   vx_n = vx_(n-1) + (x_n - mx_n)(x_n - mx_(n-1)): <variance * n>
-   *   vy_n = vy_(n-1) + (y_n - my_n)(y_n - my_(n-1)): <variance * n>
+   *   n : &lt;count&gt;
+   *   mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : &lt;xavg&gt;
+   *   my_n = my_(n-1) + [y_n - my_(n-1)]/n : &lt;yavg&gt;
+   *   c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : &lt;covariance * n&gt;
+   *   vx_n = vx_(n-1) + (x_n - mx_n)(x_n - mx_(n-1)): &lt;variance * n&gt;
+   *   vy_n = vy_(n-1) + (y_n - my_n)(y_n - my_(n-1)): &lt;variance * n&gt;
    *
    *  Merge:
    *   c_X = c_A + c_B + (mx_A - mx_B)*(my_A - my_B)*n_A*n_B/n_X
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java
index 8b088f8..b1de957 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java
@@ -44,10 +44,10 @@ import org.apache.hadoop.io.LongWritable;
  *  Arbitrary-Order Statistical Moments", Philippe Pebay, Sandia Labs):
  *
  *  Incremental:
- *   n : <count>
- *   mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : <xavg>
- *   my_n = my_(n-1) + [y_n - my_(n-1)]/n : <yavg>
- *   c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : <covariance * n>
+ *   n : &lt;count&gt;
+ *   mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : &lt;xavg&gt;
+ *   my_n = my_(n-1) + [y_n - my_(n-1)]/n : &lt;yavg&gt;
+ *   c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : &lt;covariance * n&gt;
  *
  *  Merge:
  *   c_X = c_A + c_B + (mx_A - mx_B)*(my_A - my_B)*n_A*n_B/n_X
@@ -128,10 +128,10 @@ public class GenericUDAFCovariance extends AbstractGenericUDAFResolver {
    * http://infoserve.sandia.gov/sand_doc/2008/086212.pdf
    *
    *  Incremental:
-   *   n : <count>
-   *   mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : <xavg>
-   *   my_n = my_(n-1) + [y_n - my_(n-1)]/n : <yavg>
-   *   c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : <covariance * n>
+   *   n : &lt;count&gt;
+   *   mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : &lt;xavg&gt;
+   *   my_n = my_(n-1) + [y_n - my_(n-1)]/n : &lt;yavg&gt;
+   *   c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : &lt;covariance * n&gt;
    *
    *  Merge:
    *   c_X = c_A + c_B + (mx_A - mx_B)*(my_A - my_B)*n_A*n_B/n_X
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java
index 960d8fd..6125977 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java
@@ -46,7 +46,7 @@ import org.apache.hive.common.util.AnnotationUtils;
  * accept arguments of complex types, and return complex types. 2. It can accept
  * variable length of arguments. 3. It can accept an infinite number of function
  * signature - for example, it's easy to write a GenericUDAF that accepts
- * array<int>, array<array<int>> and so on (arbitrary levels of nesting).
+ * array&lt;int&gt;, array&lt;array&lt;int&gt;&gt; and so on (arbitrary levels of nesting).
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java
index 568a7ec..53c657b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
 import org.apache.hadoop.io.IntWritable;
 
 /**
- * abstract class for Lead & lag UDAFs GenericUDAFLeadLag.
+ * abstract class for Lead &amp; lag UDAFs GenericUDAFLeadLag.
  *
  */
 public abstract class GenericUDAFLeadLag extends AbstractGenericUDAFResolver {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java
index 0d8d659..6597f4b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java
@@ -64,7 +64,7 @@ import org.apache.hadoop.io.LongWritable;
  * accept arguments of complex types, and return complex types. 2. It can accept
  * variable length of arguments. 3. It can accept an infinite number of function
  * signature - for example, it's easy to write a GenericUDF that accepts
- * array<int>, array<array<int>> and so on (arbitrary levels of nesting). 4. It
+ * array&lt;int&gt;, array&lt;array&lt;int&gt;&gt; and so on (arbitrary levels of nesting). 4. It
  * can do short-circuit evaluations using DeferedObject.
  */
 @InterfaceAudience.Public
@@ -222,7 +222,7 @@ public abstract class GenericUDF implements Closeable {
 
   /**
    * Some functions like comparisons may be affected by appearing order of arguments.
-   * This is to convert a function, such as 3 > x to x < 3. The flip function of
+   * This is to convert a function, such as 3 &gt; x to x &lt; 3. The flip function of
    * GenericUDFOPGreaterThan is GenericUDFOPLessThan.
    */
   public GenericUDF flip() {
@@ -233,7 +233,6 @@ public abstract class GenericUDF implements Closeable {
    * Gets the negative function of the current one. E.g., GenericUDFOPNotEqual for
    * GenericUDFOPEqual, or GenericUDFOPNull for GenericUDFOPNotNull.
    * @return Negative function
-   * @throws UDFArgumentException
    */
   public GenericUDF negative() {
     throw new UnsupportedOperationException("Negative function doesn't exist for " + getFuncName());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java
index ea9a59e..5d3f171 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.io.Text;
 
 /**
  * Generic UDF for string function
- * <code>CONCAT_WS(sep, [string | array(string)]+)<code>.
+ * <code>CONCAT_WS(sep, [string | array(string)]+)</code>.
  * This mimics the function from
  * MySQL http://dev.mysql.com/doc/refman/5.0/en/string-functions.html#
  * function_concat-ws
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java
index 25c54e9..23708dc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java
@@ -76,7 +76,7 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.IfExprVarCharScalarStri
 
 /**
  * IF(expr1,expr2,expr3) <br>
- * If expr1 is TRUE (expr1 <> 0 and expr1 <> NULL) then IF() returns expr2;
+ * If expr1 is TRUE (expr1 &lt;&gt; 0 and expr1 &lt;&gt; NULL) then IF() returns expr2;
  * otherwise it returns expr3. IF() returns a numeric or string value, depending
  * on the context in which it is used.
  */
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java
index ee869db..70f57b7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java
@@ -42,7 +42,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn
  * GenericUDFTimestamp
  *
  * Example usage:
- * ... CAST(<Timestamp string> as TIMESTAMP) ...
+ * ... CAST(&lt;Timestamp string&gt; as TIMESTAMP) ...
  *
  * Creates a TimestampWritableV2 object using PrimitiveObjectInspectorConverter
  *
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java
index e5a25c3..530794e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn
 * GenericUDFIntervalDayTime
 *
 * Example usage:
-* ... CAST(<Interval string> as INTERVAL DAY TO SECOND) ...
+* ... CAST(&lt;Interval string&gt; as INTERVAL DAY TO SECOND) ...
 *
 * Creates a HiveIntervalDayTimeWritable object using PrimitiveObjectInspectorConverter
 *
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java
index 804b8e7..8baf26c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn
 * GenericUDFIntervalYearMonth
 *
 * Example usage:
-* ... CAST(<Interval string> as INTERVAL YEAR TO MONTH) ...
+* ... CAST(&lt;Interval string&gt; as INTERVAL YEAR TO MONTH) ...
 *
 * Creates a HiveIntervalYearMonthWritable object using PrimitiveObjectInspectorConverter
 *
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java
index e187355..ac23e50 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java
@@ -44,7 +44,7 @@ public abstract class GenericUDTF {
    * Additionally setup GenericUDTF with MapredContext before initializing.
    * This is only called in runtime of MapRedTask.
    *
-   * @param context context
+   * @param mapredContext context
    */
   public void configure(MapredContext mapredContext) {
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java
index 8f3dfdb..1fbfa4f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java
@@ -70,7 +70,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
  * where the first occurrence was LATE, followed by zero or more EARLY flights,
  * followed by a ONTIME or EARLY flight.
  * <li><b>symbols</b> specify a list of name, expression pairs. For e.g.
- * 'LATE', arrival_delay > 0, 'EARLY', arrival_delay < 0 , 'ONTIME', arrival_delay == 0.
+ * 'LATE', arrival_delay &gt; 0, 'EARLY', arrival_delay &lt; 0 , 'ONTIME', arrival_delay == 0.
  * These symbols can be used in the Pattern defined above.
  * <li><b>resultSelectList</b> specified as a select list.
  * The expressions in the selectList are evaluated in the context where all the
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java
index e2b7035..f1c4b73 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java
@@ -62,7 +62,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
  * Based on Hive {@link GenericUDAFEvaluator}. Break up the responsibility of the old AbstractTableFunction
  * class into a Resolver and Evaluator.
  * <p>
- * The Evaluator also holds onto the {@link TableFunctionDef}. This provides information
+ * The Evaluator also holds onto the {@link PartitionedTableFunctionDef}. This provides information
  * about the arguments to the function, the shape of the Input partition and the Partitioning details.
  * The Evaluator is responsible for providing the 2 execute methods:
  * <ol>
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java
index dbc7693..bf012dd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java
@@ -37,15 +37,15 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
  * old AbstractTableFunction class into a Resolver and Evaluator.
  * The Resolver is responsible for:
  * <ol>
- * <li> setting up the {@link tableFunctionEvaluator}
+ * <li> setting up the {@link TableFunctionEvaluator}
  * <li> Setting up the The raw and output ObjectInspectors of the Evaluator.
- * <li> The Evaluator also holds onto the {@link TableFunctionDef}. This provides information
+ * <li> The Evaluator also holds onto the {@link PartitionedTableFunctionDef}. This provides information
  * about the arguments to the function, the shape of the Input partition and the Partitioning details.
  * </ol>
  * The Resolver for a function is obtained from the {@link FunctionRegistry}. The Resolver is initialized
  * by the following 4 step process:
  * <ol>
- * <li> The initialize method is called; which is passed the {@link PTFDesc} and the {@link TableFunctionDef}.
+ * <li> The initialize method is called; which is passed the {@link PTFDesc} and the {@link PartitionedTableFunctionDef}.
  * <li> The resolver is then asked to setup the Raw ObjectInspector. This is only required if the Function reshapes
  * the raw input.
  * <li> Once the Resolver has had a chance to compute the shape of the Raw Input that is fed to the partitioning
@@ -113,8 +113,6 @@ public abstract class TableFunctionResolver {
    * exist for all the Def (ArgDef, ColumnDef, WindowDef..). It is the responsibility of
    * the TableFunction to construct the {@link ExprNodeEvaluator evaluators} and setup the OI.
    *
-   * @param tblFuncDef
-   * @param ptfDesc
    * @throws HiveException
    */
   public abstract void initializeOutputOI() throws HiveException;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java b/ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java
index cb966a7..58e6289 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java
@@ -83,7 +83,7 @@ import java.util.function.Function;
  * but can be made insert-only transactional tables and generate corresponding Alter Table commands.
  *
  * Note that to convert a table to full CRUD table requires that all files follow a naming
- * convention, namely 0000N_0 or 0000N_0_copy_M, N >= 0, M > 0.  This utility can perform this
+ * convention, namely 0000N_0 or 0000N_0_copy_M, N &gt;= 0, M &gt; 0.  This utility can perform this
  * rename with "execute" option.  It will also produce a script (with and w/o "execute" to
  * perform the renames).
  *
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/wm/Expression.java b/ql/src/java/org/apache/hadoop/hive/ql/wm/Expression.java
index 76753488..92651cd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/wm/Expression.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/wm/Expression.java
@@ -17,7 +17,7 @@ package org.apache.hadoop.hive.ql.wm;
 
 /**
  * Expression that is defined in triggers.
- * Most expressions will get triggered only after exceeding a limit. As a result, only greater than (>) expression
+ * Most expressions will get triggered only after exceeding a limit. As a result, only greater than (&gt;) expression
  * is supported.
  */
 public interface Expression {
@@ -43,7 +43,7 @@ public interface Expression {
   }
 
   /**
-   * Evaluate current value against this expression. Return true if expression evaluates to true (current > limit)
+   * Evaluate current value against this expression. Return true if expression evaluates to true (current &gt; limit)
    * else false otherwise
    *
    * @param current - current value against which expression will be evaluated