You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ab...@apache.org on 2023/02/24 07:33:16 UTC
[hive] branch master updated: HIVE-27055: hive-exec typos part 3 (#4035) (Michal Lorek reviewed by Laszlo Bodor)
This is an automated email from the ASF dual-hosted git repository.
abstractdog pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 7469510c206 HIVE-27055: hive-exec typos part 3 (#4035) (Michal Lorek reviewed by Laszlo Bodor)
7469510c206 is described below
commit 7469510c206249f3fffaf5a8ae5113eba44b7d6f
Author: M Lorek <ml...@users.noreply.github.com>
AuthorDate: Fri Feb 24 07:33:05 2023 +0000
HIVE-27055: hive-exec typos part 3 (#4035) (Michal Lorek reviewed by Laszlo Bodor)
---
.../apache/hadoop/hive/llap/LlapCacheAwareFs.java | 2 +-
.../java/org/apache/hadoop/hive/ql/Compiler.java | 2 +-
.../hadoop/hive/ql/HiveQueryLifeTimeHook.java | 10 +--
ql/src/java/org/apache/hadoop/hive/ql/IDriver.java | 2 +-
.../org/apache/hadoop/hive/ql/QueryDisplay.java | 16 ++--
.../ql/ddl/table/info/desc/DescTableOperation.java | 1 -
.../apache/hadoop/hive/ql/exec/ArchiveUtils.java | 6 +-
.../apache/hadoop/hive/ql/exec/BoundaryCache.java | 4 +-
.../hadoop/hive/ql/exec/FileSinkOperator.java | 6 +-
.../hadoop/hive/ql/exec/GroupByOperator.java | 2 +-
.../org/apache/hadoop/hive/ql/exec/JoinUtil.java | 2 +-
.../hadoop/hive/ql/exec/SkewJoinHandler.java | 5 +-
.../exec/persistence/HybridHashTableContainer.java | 4 +-
.../hive/ql/exec/persistence/RowContainer.java | 4 +-
.../hadoop/hive/ql/exec/repl/ReplDumpTask.java | 9 +--
.../hadoop/hive/ql/exec/repl/ReplDumpWork.java | 4 +-
.../hive/ql/exec/repl/ReplExternalTables.java | 2 +-
.../hadoop/hive/ql/exec/repl/ReplLoadTask.java | 9 +--
.../hadoop/hive/ql/exec/repl/ReplLoadWork.java | 4 +-
.../hive/ql/exec/vector/VectorGroupKeyHelper.java | 6 +-
.../vector/expressions/ListIndexColColumn.java | 2 +-
.../expressions/StringGroupConcatColCol.java | 2 +-
.../hadoop/hive/ql/hooks/HiveProtoLoggingHook.java | 10 +--
.../apache/hadoop/hive/ql/hooks/HookContext.java | 2 +-
.../apache/hadoop/hive/ql/hooks/WriteEntity.java | 34 ++++-----
.../org/apache/hadoop/hive/ql/io/AcidUtils.java | 6 +-
.../org/apache/hadoop/hive/ql/io/BucketCodec.java | 12 +--
.../hadoop/hive/ql/io/FlatFileInputFormat.java | 2 +-
.../ql/io/SchemaAwareCompressionOutputStream.java | 2 +-
.../esriJson/UnenclosedBaseJsonRecordReader.java | 2 +-
.../hive/ql/io/orc/encoded/EncodedReaderImpl.java | 2 +-
.../hadoop/hive/ql/io/orc/encoded/IoTrace.java | 8 +-
.../vector/ParquetDataColumnReaderFactory.java | 8 +-
.../apache/hadoop/hive/ql/lib/RuleExactMatch.java | 2 +-
.../hive/ql/log/syslog/SyslogInputFormat.java | 2 +-
.../hadoop/hive/ql/log/syslog/SyslogParser.java | 16 ++--
.../hadoop/hive/ql/metadata/PartitionIterable.java | 8 +-
.../org/apache/hadoop/hive/ql/metadata/Table.java | 4 +-
.../ql/optimizer/BigTableSelectorForAutoSMJ.java | 4 +-
.../BucketingSortingReduceSinkOptimizer.java | 6 +-
.../ql/optimizer/PartitionColumnsSeparator.java | 10 +--
.../HiveRelOptMaterializationValidator.java | 2 +-
.../hive/ql/optimizer/calcite/RelOptHiveTable.java | 8 +-
.../hive/ql/optimizer/calcite/cost/HiveCost.java | 2 +-
.../calcite/rules/HiveProjectMergeRule.java | 2 +-
.../calcite/rules/HiveRelFieldTrimmer.java | 2 +-
.../rules/HiveSemiJoinProjectTransposeRule.java | 24 +++---
.../calcite/rules/HiveSortUnionReduceRule.java | 4 +-
.../optimizer/calcite/rules/RelFieldTrimmer.java | 2 +-
.../calcite/stats/HiveRelMdPredicates.java | 6 +-
.../calcite/stats/HiveRelMdSelectivity.java | 2 +-
.../optimizer/calcite/translator/ASTBuilder.java | 2 +-
.../translator/opconventer/HiveGBOpConvUtil.java | 2 +-
.../opconventer/HiveTableScanVisitor.java | 4 +-
.../ql/optimizer/physical/BucketingSortingCtx.java | 4 +-
.../BucketingSortingInferenceOptimizer.java | 2 +-
.../stats/annotation/StatsRulesProcFactory.java | 2 +-
.../hadoop/hive/ql/parse/BaseSemanticAnalyzer.java | 6 +-
.../hadoop/hive/ql/parse/CalcitePlanner.java | 2 +-
.../apache/hadoop/hive/ql/parse/ParseUtils.java | 4 +-
.../apache/hadoop/hive/ql/parse/QBSubQuery.java | 20 ++---
.../hive/ql/parse/ReplicationSemanticAnalyzer.java | 2 +-
.../hadoop/hive/ql/parse/SemanticAnalyzer.java | 2 +-
.../hadoop/hive/ql/parse/TableAccessAnalyzer.java | 2 +-
.../apache/hadoop/hive/ql/parse/TableSample.java | 24 +++---
.../apache/hadoop/hive/ql/parse/TaskCompiler.java | 6 +-
.../apache/hadoop/hive/ql/parse/WindowingSpec.java | 2 +-
.../parse/repl/dump/log/BootstrapDumpLogger.java | 2 +-
.../parse/repl/dump/log/IncrementalDumpLogger.java | 2 +-
.../parse/repl/load/log/IncrementalLoadLogger.java | 2 +-
.../org/apache/hadoop/hive/ql/plan/BaseWork.java | 2 +-
.../apache/hadoop/hive/ql/plan/BasicStatsWork.java | 4 +-
.../apache/hadoop/hive/ql/plan/ColStatistics.java | 18 ++---
.../apache/hadoop/hive/ql/plan/FileSinkDesc.java | 4 +-
.../hadoop/hive/ql/processors/ResetProcessor.java | 48 ++++++------
.../hadoop/hive/ql/processors/SetProcessor.java | 88 +++++++++++-----------
.../scheduled/ScheduledQueryExecutionService.java | 2 +-
.../hadoop/hive/ql/secrets/SecretSource.java | 2 +-
.../hadoop/hive/ql/session/SessionState.java | 2 +-
.../hadoop/hive/ql/stats/BasicStatsNoJobTask.java | 4 +-
.../hadoop/hive/ql/stats/BasicStatsTask.java | 4 +-
.../org/apache/hadoop/hive/ql/stats/Partish.java | 4 +-
.../hive/ql/stats/estimator/StatEstimator.java | 2 +-
.../hadoop/hive/ql/txn/compactor/Worker.java | 2 +-
.../org/apache/hadoop/hive/ql/udf/UDFConv.java | 2 +-
.../hive/ql/udf/ptf/TableFunctionResolver.java | 4 +-
86 files changed, 290 insertions(+), 292 deletions(-)
diff --git a/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java b/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java
index 6e9b6beacd6..28fa415b43e 100644
--- a/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java
+++ b/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java
@@ -310,7 +310,7 @@ public class LlapCacheAwareFs extends FileSystem {
arrayOffset + offsetFromReadStart + extraDiskDataOffset,
smallSize, bb, cacheRanges, largeBufCount, chunkFrom + extraOffsetInChunk);
extraDiskDataOffset += smallSize;
- extraOffsetInChunk += smallSize; // Not strictly necessary, noone will look at it.
+ extraOffsetInChunk += smallSize; // Not strictly necessary, no one will look at it.
if (newCacheData == null) {
newCacheData = smallBuffer;
} else {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Compiler.java b/ql/src/java/org/apache/hadoop/hive/ql/Compiler.java
index b6c955485fe..26377ed9374 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Compiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Compiler.java
@@ -89,7 +89,7 @@ public class Compiler {
/**
* @param deferClose indicates if the close/destroy should be deferred when the process has been interrupted
- * it should be set to true if the compile is called within another method like runInternal,
+ * it should be set to true if the compile method is called within another method like runInternal,
* which defers the close to the called in that method.
*/
public QueryPlan compile(String rawCommand, boolean deferClose) throws CommandProcessorException {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/HiveQueryLifeTimeHook.java b/ql/src/java/org/apache/hadoop/hive/ql/HiveQueryLifeTimeHook.java
index 3e125da0845..9e21b586e0f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/HiveQueryLifeTimeHook.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/HiveQueryLifeTimeHook.java
@@ -87,11 +87,11 @@ public class HiveQueryLifeTimeHook implements QueryLifeTimeHook {
if (table != null) {
LOG.info("Performing cleanup as part of rollback: {}", table.getFullTableName().toString());
try {
- CompactionRequest rqst = new CompactionRequest(table.getDbName(), table.getTableName(), CompactionType.MAJOR);
- rqst.setRunas(TxnUtils.findUserToRunAs(tblPath.toString(), table.getTTable(), conf));
- rqst.putToProperties(META_TABLE_LOCATION, tblPath.toString());
- rqst.putToProperties(IF_PURGE, Boolean.toString(true));
- boolean success = Hive.get(conf).getMSC().submitForCleanup(rqst, writeId,
+ CompactionRequest request = new CompactionRequest(table.getDbName(), table.getTableName(), CompactionType.MAJOR);
+ request.setRunas(TxnUtils.findUserToRunAs(tblPath.toString(), table.getTTable(), conf));
+ request.putToProperties(META_TABLE_LOCATION, tblPath.toString());
+ request.putToProperties(IF_PURGE, Boolean.toString(true));
+ boolean success = Hive.get(conf).getMSC().submitForCleanup(request, writeId,
pCtx.getQueryState().getTxnManager().getCurrentTxnId());
if (success) {
LOG.info("The cleanup request has been submitted");
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/IDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/IDriver.java
index 73bca0bbebd..efa1223db5a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/IDriver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/IDriver.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hive.ql.processors.CommandProcessorException;
import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
/**
- * Hive query executer driver.
+ * Hive query executor driver.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java
index cdba54c5b65..35eccd2909a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java
@@ -47,7 +47,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
public class QueryDisplay {
/**
- * Preffered objectmapper for this class.
+ * Preferred objectMapper for this class.
*
* It must be used to have things work in shaded environment (and its also more performant).
*/
@@ -307,7 +307,7 @@ public class QueryDisplay {
/**
* @param phase phase of query
- * @return map of HMS Client method-calls and duration in miliseconds, during given phase.
+ * @return map of HMS Client method-calls and duration in milliseconds, during given phase.
*/
public synchronized Map<String, Long> getHmsTimings(Phase phase) {
return hmsTimingMap.get(phase);
@@ -315,7 +315,7 @@ public class QueryDisplay {
/**
* @param phase phase of query
- * @param hmsTimings map of HMS Client method-calls and duration in miliseconds, during given phase.
+ * @param hmsTimings map of HMS Client method-calls and duration in milliseconds, during given phase.
*/
public synchronized void setHmsTimings(Phase phase, Map<String, Long> hmsTimings) {
hmsTimingMap.put(phase, hmsTimings);
@@ -323,7 +323,7 @@ public class QueryDisplay {
/**
* @param phase phase of query
- * @return map of PerfLogger call-trace name and start time in miliseconds, during given phase.
+ * @return map of PerfLogger call-trace name and start time in milliseconds, during given phase.
*/
public synchronized Map<String, Long> getPerfLogStarts(Phase phase) {
return perfLogStartMap.get(phase);
@@ -331,7 +331,7 @@ public class QueryDisplay {
/**
* @param phase phase of query
- * @param perfLogStarts map of PerfLogger call-trace name and start time in miliseconds, during given phase.
+ * @param perfLogStarts map of PerfLogger call-trace name and start time in milliseconds, during given phase.
*/
public synchronized void setPerfLogStarts(Phase phase, Map<String, Long> perfLogStarts) {
perfLogStartMap.put(phase, perfLogStarts);
@@ -339,7 +339,7 @@ public class QueryDisplay {
/**
* @param phase phase of query
- * @return map of PerfLogger call-trace name and end time in miliseconds, during given phase.
+ * @return map of PerfLogger call-trace name and end time in milliseconds, during given phase.
*/
public synchronized Map<String, Long> getPerfLogEnds(Phase phase) {
return perfLogEndMap.get(phase);
@@ -347,7 +347,7 @@ public class QueryDisplay {
/**
* @param phase phase of query
- * @param perfLogEnds map of PerfLogger call-trace name and end time in miliseconds, during given phase.
+ * @param perfLogEnds map of PerfLogger call-trace name and end time in milliseconds, during given phase.
*/
public synchronized void setPerfLogEnds(Phase phase, Map<String, Long> perfLogEnds) {
perfLogEndMap.put(phase, perfLogEnds);
@@ -355,7 +355,7 @@ public class QueryDisplay {
/**
* @param phase phase of query
- * @return map of PerfLogger call-trace name and duration in miliseconds, during given phase.
+ * @return map of PerfLogger call-trace name and duration in milliseconds, during given phase.
*/
public synchronized Map<String, Long> getPerfLogTimes(Phase phase) {
Map<String, Long> times = new HashMap<>();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java
index 5e4f9b23425..53db08b8a08 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
import org.apache.hadoop.hive.ql.ddl.ShowUtils;
import org.apache.hadoop.hive.ql.ddl.table.info.desc.formatter.DescTableFormatter;
import org.apache.hadoop.hive.ql.exec.ColumnInfo;
-import org.apache.hadoop.hive.ql.lockmgr.LockException;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.ddl.DDLOperation;
import org.apache.hadoop.hive.ql.metadata.Hive;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java
index 6ad0556b552..ebe8f2f5277 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java
@@ -234,10 +234,10 @@ public final class ArchiveUtils {
/**
* Determines if one can insert into partition(s), or there's a conflict with
- * archive. It can be because partition is itself archived or it is to be
+ * archive. It can be because partition is itself archived, or it is to be
* created inside existing archive. The second case is when partition doesn't
- * exist yet, but it would be inside of an archive if it existed. This one is
- * quite tricky to check, we need to find at least one partition inside of
+ * exist yet, but it would be inside an archive if it existed. This one is
+ * quite tricky to check, we need to find at least one partition inside
* the parent directory. If it is archived and archiving level tells that
* the archival was done of directory partition is in it means we cannot
* insert; otherwise we can.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/BoundaryCache.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/BoundaryCache.java
index 7cf278ca05e..3c8b7c567cf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/BoundaryCache.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/BoundaryCache.java
@@ -97,7 +97,7 @@ public class BoundaryCache extends TreeMap<Integer, Object> {
}
/**
- * Returns entry corresponding to highest row index.
+ * Returns entry corresponding to the highest row index.
* @return max entry.
*/
public Map.Entry<Integer, Object> getMaxEntry() {
@@ -105,7 +105,7 @@ public class BoundaryCache extends TreeMap<Integer, Object> {
}
/**
- * Removes eldest entry from the boundary cache.
+ * Removes the eldest entry from the boundary cache.
*/
public void evictOne() {
if (queue.isEmpty()) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 9d30093c239..03b0c76b40f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -1136,8 +1136,8 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
}
rowOutWriters = fpaths.outWriters;
- // check if all record writers implement statistics. if atleast one RW
- // doesn't implement stats interface we will fallback to conventional way
+ // check if all record writers implement statistics. if at least one RW
+ // doesn't implement stats interface we will fall back to conventional way
// of gathering stats
isCollectRWStats = areAllTrue(statsFromRecordWriter);
if (conf.isGatherStats() && !isCollectRWStats) {
@@ -1637,7 +1637,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
}
}
if (conf.getTableInfo().isNonNative()) {
- //check the ouput specs only if it is a storage handler (native tables's outputformats does
+ //check the output specs only if it is a storage handler (native tables's outputformats does
//not set the job's output properties correctly)
try {
hiveOutputFormat.checkOutputSpecs(ignored, job);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
index f118aa19bb3..f548afd5240 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
@@ -1129,7 +1129,7 @@ public class GroupByOperator extends Operator<GroupByDesc> implements IConfigure
int groupingSetPosition = desc.getGroupingSetPosition();
List<Long> listGroupingSets = desc.getListGroupingSets();
// groupingSets are known at map/reducer side; but have to do real processing
- // hence grouppingSetsPresent is true only at map side
+ // hence groupingSetsPresent is true only at map side
if (groupingSetPosition >= 0 && listGroupingSets != null) {
Long emptyGrouping = (1L << groupingSetPosition) - 1;
if (listGroupingSets.contains(emptyGrouping)) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java
index e9920d65665..cda449be283 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java
@@ -426,7 +426,7 @@ public class JoinUtil {
private static ObjectInspector unflattenObjInspector(ObjectInspector oi) {
if (oi instanceof StructObjectInspector) {
// Check if all fields start with "key." or "value."
- // If so, then unflatten by adding an additional level of nested key and value structs
+ // If so, then unflatten by adding a level of nested key and value structs
// Example: { "key.reducesinkkey0":int, "key.reducesinkkey1": int, "value._col6":int }
// Becomes
// { "key": { "reducesinkkey0":int, "reducesinkkey1":int }, "value": { "_col6":int } }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java
index 94b63f2e263..d18e53ef3d5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hive.ql.exec;
-import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
@@ -181,7 +180,7 @@ public class SkewJoinHandler {
RowContainer<ArrayList<Object>> bigKey = (RowContainer)joinOp.storage[currBigKeyTag];
Path outputPath = getOperatorOutputPath(specPath);
FileSystem destFs = outputPath.getFileSystem(hconf);
- bigKey.copyToDFSDirecory(destFs, outputPath);
+ bigKey.copyToDFSDirectory(destFs, outputPath);
for (int i = 0; i < numAliases; i++) {
if (((byte) i) == currBigKeyTag) {
@@ -191,7 +190,7 @@ public class SkewJoinHandler {
if (values != null) {
specPath = conf.getSmallKeysDirMap().get((byte) currBigKeyTag).get(
(byte) i);
- values.copyToDFSDirecory(destFs, getOperatorOutputPath(specPath));
+ values.copyToDFSDirectory(destFs, getOperatorOutputPath(specPath));
}
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
index 545a7296526..e66977f758a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
@@ -468,7 +468,7 @@ public class HybridHashTableContainer
if (hp.hashMap != null) {
memUsed += hp.hashMap.memorySize();
} else {
- // also include the still-in-memory sidefile, before it has been truely spilled
+ // also include the still-in-memory sidefile, before it has been truly spilled
if (hp.sidefileKVContainer != null) {
memUsed += hp.sidefileKVContainer.numRowsInReadBuffer() * tableRowSize;
}
@@ -627,7 +627,7 @@ public class HybridHashTableContainer
}
}
- // It can happen that although there're some partitions in memory, but their sizes are all 0.
+ // It can happen that although there are some partitions in memory, but their sizes are all 0.
// In that case we just pick one and spill.
if (res == -1) {
for (int i = 0; i < hashPartitions.length; i++) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
index 4a47ca0278b..67ab1e97945 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
@@ -295,7 +295,7 @@ public class RowContainer<ROW extends List<Object>>
}
private final ArrayList<Object> row = new ArrayList<Object>(2);
-
+
private void spillBlock(ROW[] block, int length) throws HiveException {
try {
if (tmpFile == null) {
@@ -405,7 +405,7 @@ public class RowContainer<ROW extends List<Object>>
}
}
- public void copyToDFSDirecory(FileSystem destFs, Path destPath) throws IOException, HiveException {
+ public void copyToDFSDirectory(FileSystem destFs, Path destPath) throws IOException, HiveException {
if (addCursor > 0) {
this.spillBlock(this.currentWriteBlock, addCursor);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
index 0c0022c1f49..564d99ee7cd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
@@ -109,7 +109,6 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Serializable;
import java.nio.charset.StandardCharsets;
-import java.util.LinkedHashMap;
import java.util.Set;
import java.util.HashSet;
import java.util.List;
@@ -808,7 +807,7 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
return !ReplUtils.tableIncludedInReplScope(work.oldReplScope, table.getTableName());
}
- private boolean isTableSatifiesConfig(Table table) {
+ private boolean doesTableSatisfyConfig(Table table) {
if (table == null) {
return false;
}
@@ -1105,7 +1104,7 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
dumpTable(exportService, matchedDbName, tableName, validTxnList, dbRootMetadata, dbRootData, bootDumpBeginReplId,
hiveDb, tableTuple, managedTblList, dataCopyAtLoad);
}
- if (tableList != null && isTableSatifiesConfig(table)) {
+ if (tableList != null && doesTableSatisfyConfig(table)) {
tableList.add(tableName);
}
} catch (InvalidTableException te) {
@@ -1428,7 +1427,7 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
LOG.debug(te.getMessage());
}
dumpConstraintMetadata(dbName, tblName, dbRoot, hiveDb, table != null ? table.getTTable().getId() : -1);
- if (tableList != null && isTableSatifiesConfig(table)) {
+ if (tableList != null && doesTableSatisfyConfig(table)) {
tableList.add(tblName);
}
}
@@ -1660,7 +1659,7 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
// phase won't be able to replicate those txns. So, the logic is to wait for the given amount
// of time to see if all open txns < current txn is getting aborted/committed. If not, then
// we forcefully abort those txns just like AcidHouseKeeperService.
- //Exclude readonly and repl created tranasactions
+ //Exclude readonly and repl created transactions
HiveTxnManager hiveTxnManager = getTxnMgr();
ValidTxnList validTxnList = hiveTxnManager.getValidTxns(excludedTxns);
while (System.currentTimeMillis() < waitUntilTime) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java
index 65d9c17a675..2e626be7ebc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java
@@ -146,8 +146,8 @@ public class ReplDumpWork implements Serializable {
void overrideLastEventToDump(Hive fromDb, long bootstrapLastId, long failoverEventId) throws Exception {
// If we are bootstrapping ACID tables, we need to dump all the events upto the event id at
// the beginning of the bootstrap dump and also not dump any event after that. So we override
- // both, the last event as well as any user specified limit on the number of events. See
- // bootstrampDump() for more details.
+ // both, the last event and any user specified limit on the number of events. See
+ // bootstrapDump() for more details.
if (failoverEventId > 0) {
LOG.info("eventTo : {} marked as failover eventId.", eventTo);
eventTo = failoverEventId;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplExternalTables.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplExternalTables.java
index 14e3b59cff9..8f48a6ddda1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplExternalTables.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplExternalTables.java
@@ -84,7 +84,7 @@ public class ReplExternalTables {
}
if (!TableType.EXTERNAL_TABLE.equals(table.getTableType())) {
throw new IllegalArgumentException(
- "only External tables can be writen via this writer, provided table is " + table
+ "only External tables can be written via this writer, provided table is " + table
.getTableType());
}
Path fullyQualifiedDataLocation = PathBuilder.fullyQualifiedHDFSUri(table.getDataLocation(), FileSystem.get(hiveConf));
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java
index b9fcfbcb426..6ce83ee3e70 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc;
import org.apache.hadoop.hive.ql.exec.repl.util.SnapshotUtils;
import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
import org.apache.hadoop.hive.ql.parse.repl.load.log.IncrementalLoadLogger;
-import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata;
import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status;
import org.apache.thrift.TException;
import com.google.common.collect.Collections2;
@@ -164,7 +163,7 @@ public class ReplLoadTask extends Task<ReplLoadWork> implements Serializable {
addAtlasLoadTask();
}
if (conf.getBoolVar(HiveConf.ConfVars.REPL_RANGER_HANDLE_DENY_POLICY_TARGET)) {
- initiateRangerDenytask();
+ initiateRangerDenyTask();
}
if (shouldLoadAuthorizationMetadata()) {
initiateAuthorizationLoadTask();
@@ -203,7 +202,7 @@ public class ReplLoadTask extends Task<ReplLoadWork> implements Serializable {
return conf.getBoolVar(HiveConf.ConfVars.REPL_INCLUDE_AUTHORIZATION_METADATA);
}
- private void initiateRangerDenytask() throws SemanticException {
+ private void initiateRangerDenyTask() throws SemanticException {
if (RANGER_AUTHORIZER.equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.REPL_AUTHORIZATION_PROVIDER_SERVICE))) {
LOG.info("Adding Ranger Deny Policy Task for {} ", work.dbNameToLoadIn);
RangerDenyWork rangerDenyWork = new RangerDenyWork(new Path(work.getDumpDirectory()), work.getSourceDbName(),
@@ -669,7 +668,7 @@ public class ReplLoadTask extends Task<ReplLoadWork> implements Serializable {
db.setParameters(params);
hiveDb.alterDatabase(work.getTargetDatabase(), db);
- LOG.debug("Database {} poperties after removal {}", work.getTargetDatabase(), params);
+ LOG.debug("Database {} properties after removal {}", work.getTargetDatabase(), params);
} catch (HiveException e) {
throw new SemanticException(e);
}
@@ -851,7 +850,7 @@ public class ReplLoadTask extends Task<ReplLoadWork> implements Serializable {
Hive db = getHive();
for (String table : work.tablesToDrop) {
- LOG.info("Dropping table {} for optimised bootstarap", work.dbNameToLoadIn + "." + table);
+ LOG.info("Dropping table {} for optimised bootstrap", work.dbNameToLoadIn + "." + table);
db.dropTable(work.dbNameToLoadIn + "." + table, true);
}
Database sourceDb = getSourceDbMetadata(); //This sourceDb was the actual target prior to failover.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java
index ea5c0fd1503..6ef95ea55ed 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java
@@ -218,7 +218,7 @@ public class ReplLoadWork implements Serializable, ReplLoadWorkMBean {
return null;
}
- // Unregisters MBeans by forming the Metrics same as how the Hadoop code forms during MBean registeration.
+ // Unregisters MBeans by forming the Metrics same as how the Hadoop code forms during MBean registration.
private void unRegisterMBeanIfRegistered(String serviceName, String nameName,
Map<String, String> additionalParameters) {
@@ -284,7 +284,7 @@ public class ReplLoadWork implements Serializable, ReplLoadWorkMBean {
@Override
public String getDumpDirectory() {return dumpDirectory;}
-
+
public void setRootTask(Task<?> rootTask) {
this.rootTask = rootTask;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java
index 82dc4a704e1..4a9758808e4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java
@@ -40,7 +40,7 @@ public class VectorGroupKeyHelper extends VectorColumnSetInfo {
void init(VectorExpression[] keyExpressions) throws HiveException {
- // NOTE: To support pruning the grouping set id dummy key by VectorGroupbyOpeator MERGE_PARTIAL
+ // NOTE: To support pruning the grouping set id dummy key by VectorGroupByOperator MERGE_PARTIAL
// case, we use the keyCount passed to the constructor and not keyExpressions.length.
// Inspect the output type of each key expression. And, remember the output columns.
@@ -62,7 +62,7 @@ public class VectorGroupKeyHelper extends VectorColumnSetInfo {
/*
* This helper method copies the group keys from one vectorized row batch to another,
* but does not increment the outputBatch.size (i.e. the next output position).
- *
+ *
* It was designed for VectorGroupByOperator's sorted reduce group batch processing mode
* to copy the group keys at startGroup.
*/
@@ -75,7 +75,7 @@ public class VectorGroupKeyHelper extends VectorColumnSetInfo {
LongColumnVector inputColumnVector = (LongColumnVector) inputBatch.cols[inputColumnNum];
LongColumnVector outputColumnVector = (LongColumnVector) outputBatch.cols[outputColumnNum];
- // This vectorized code pattern says:
+ // This vectorized code pattern says:
// If the input batch has no nulls at all (noNulls is true) OR
// the input row is NOT NULL, copy the value.
//
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ListIndexColColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ListIndexColColumn.java
index 9d2cdeffd17..6f5b176b77f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ListIndexColColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ListIndexColColumn.java
@@ -210,7 +210,7 @@ public class ListIndexColColumn extends VectorExpression {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
- * Same LIST for entire batch. Still need to validate the LIST upper limit against varing
+ * Same LIST for entire batch. Still need to validate the LIST upper limit against varying
* INDEX.
*
* (Repeated INDEX case handled above).
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupConcatColCol.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupConcatColCol.java
index 35937deeeba..19be05691db 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupConcatColCol.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupConcatColCol.java
@@ -402,7 +402,7 @@ public class StringGroupConcatColCol extends VectorExpression {
* @param sel selected value position array
* @param n number of qualifying rows
* @param inV input vector
- * @param outV ouput vector
+ * @param outV output vector
*/
private static void propagateNulls(boolean selectedInUse, int n, int[] sel, ColumnVector inV,
ColumnVector outV) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
index 904dd4bebde..618c7108f67 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
@@ -465,11 +465,11 @@ public class HiveProtoLoggingHook implements ExecuteWithHookContext {
}
private String getRequestUser(HookContext hookContext) {
- String requestuser = hookContext.getUserName();
- if (requestuser == null) {
- requestuser = hookContext.getUgi().getUserName();
+ String requestUser = hookContext.getUserName();
+ if (requestUser == null) {
+ requestUser = hookContext.getUgi().getUserName();
}
- return requestuser;
+ return requestUser;
}
private String getQueueName(ExecutionMode mode, HiveConf conf) {
@@ -555,7 +555,7 @@ public class HiveProtoLoggingHook implements ExecuteWithHookContext {
EventLogger logger = EventLogger.getInstance(hookContext.getConf());
logger.handle(hookContext);
} catch (Exception e) {
- LOG.error("Got exceptoin while processing event: ", e);
+ LOG.error("Got exception while processing event: ", e);
}
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
index 3a86d24180e..cd23b247063 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
@@ -62,7 +62,7 @@ public class HookContext {
DRIVER_RUN_HOOKS(HiveConf.ConfVars.HIVE_DRIVER_RUN_HOOKS, HiveDriverRunHook.class,
"Hooks that Will be run at the beginning and end of Driver.run"),
QUERY_REDACTOR_HOOKS(HiveConf.ConfVars.QUERYREDACTORHOOKS, Redactor.class,
- "Hooks to be invoked for each query which can tranform the query before it's placed in the job.xml file"),
+ "Hooks to be invoked for each query which can transform the query before it's placed in the job.xml file"),
// The HiveSessionHook.class cannot access, use Hook.class instead
HIVE_SERVER2_SESSION_HOOK(HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK, Hook.class,
"Hooks to be executed when session manager starts a new session");
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
index f4e285c8b75..be64185f7a9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
@@ -125,11 +125,11 @@ public class WriteEntity extends Entity implements Serializable {
*
* @param d
* The name of the directory that is being written to.
- * @param islocal
+ * @param isLocal
* Flag to decide whether this directory is local or in dfs.
*/
- public WriteEntity(Path d, boolean islocal) {
- this(d, islocal, false);
+ public WriteEntity(Path d, boolean isLocal) {
+ this(d, isLocal, false);
}
/**
@@ -137,13 +137,13 @@ public class WriteEntity extends Entity implements Serializable {
*
* @param d
* The name of the directory that is being written to.
- * @param islocal
+ * @param isLocal
* Flag to decide whether this directory is local or in dfs.
* @param isTemp
* True if this is a temporary location such as scratch dir
*/
- public WriteEntity(Path d, boolean islocal, boolean isTemp) {
- super(d, islocal, true);
+ public WriteEntity(Path d, boolean isLocal, boolean isTemp) {
+ super(d, isLocal, true);
this.isTempURI = isTemp;
this.writeType = WriteType.PATH_WRITE;
}
@@ -222,7 +222,7 @@ public class WriteEntity extends Entity implements Serializable {
// Not used, @see org.apache.hadoop.hive.ql.ddl.table.storage.skewed.AlterTableSkewedByAnalyzer
// alter table {table_name} skewed by (col_name1, col_name2, ...)
// on ([(col_name1_value, col_name2_value, ...) [, (col_name1_value, col_name2_value), ...] [stored as directories]
- case SET_SKEWED_LOCATION:
+ case SET_SKEWED_LOCATION:
// alter table {table_name} set skewed location (col_name1="location1" [, col_name2="location2", ...] )
case INTO_BUCKETS:
// Not used, @see org.apache.hadoop.hive.ql.ddl.table.storage.cluster.AlterTableIntoBucketsAnalyzer
@@ -241,16 +241,16 @@ public class WriteEntity extends Entity implements Serializable {
} else {
return WriteType.DDL_EXCLUSIVE;
}
-
+
case CLUSTERED_BY:
- // alter table {table_name} clustered by (col_name, col_name, ...) [sorted by (col_name, ...)]
+ // alter table {table_name} clustered by (col_name, col_name, ...) [sorted by (col_name, ...)]
// into {num_buckets} buckets;
case NOT_SORTED:
case NOT_CLUSTERED:
case SET_FILE_FORMAT:
// alter table {table_name} [partition ({partition_spec})] set fileformat {file_format}
case SET_SERDE:
- // alter table {table_name} [PARTITION ({partition_spec})] set serde '{serde_class_name}'
+ // alter table {table_name} [PARTITION ({partition_spec})] set serde '{serde_class_name}'
case ADDCOLS:
case REPLACE_COLUMNS:
// alter table {table_name} [partition ({partition_spec})] add/replace columns ({col_name} {data_type})
@@ -261,10 +261,10 @@ public class WriteEntity extends Entity implements Serializable {
case OWNER:
case RENAME:
// alter table {table_name} rename to {new_table_name}
- case DROPPROPS:
- return AcidUtils.isLocklessReadsEnabled(table, conf) ?
+ case DROPPROPS:
+ return AcidUtils.isLocklessReadsEnabled(table, conf) ?
WriteType.DDL_EXCL_WRITE : WriteType.DDL_EXCLUSIVE;
-
+
case ADDPARTITION:
// Not used: @see org.apache.hadoop.hive.ql.ddl.table.partition.add.AbstractAddPartitionAnalyzer
// alter table {table_name} add [if not exists] partition ({partition_spec}) [location '{location}']
@@ -273,19 +273,19 @@ public class WriteEntity extends Entity implements Serializable {
case ADDPROPS:
case UPDATESTATS:
return WriteType.DDL_SHARED;
-
+
case COMPACT:
- // alter table {table_name} [partition (partition_key = 'partition_value' [, ...])]
+ // alter table {table_name} [partition (partition_key = 'partition_value' [, ...])]
// compact 'compaction_type'[and wait] [with overwrite tblproperties ("property"="value" [, ...])];
case TOUCH:
// alter table {table_name} touch [partition ({partition_spec})]
return WriteType.DDL_NO_LOCK;
-
+
default:
throw new RuntimeException("Unknown operation " + op.toString());
}
}
-
+
public boolean isDynamicPartitionWrite() {
return isDynamicPartitionWrite;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 9853818f09b..50f642841f4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -1742,8 +1742,8 @@ public class AcidUtils {
sb.append("Path: " + dirPath);
sb.append("; ");
sb.append("Files: { ");
- for (FileStatus fstatus : files) {
- sb.append(fstatus);
+ for (FileStatus fStatus : files) {
+ sb.append(fStatus);
sb.append(", ");
}
sb.append(" }");
@@ -2608,7 +2608,7 @@ public class AcidUtils {
*/
public static final int ORC_ACID_VERSION = 2;
/**
- * Inlucde current acid version in file footer.
+ * Include current acid version in file footer.
* @param writer - file written
*/
public static void setAcidVersionInDataFile(Writer writer) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/BucketCodec.java b/ql/src/java/org/apache/hadoop/hive/ql/io/BucketCodec.java
index c6f39a157a9..9299699608d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/BucketCodec.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/BucketCodec.java
@@ -53,26 +53,26 @@ public enum BucketCodec {
* next 4 bits reserved for future
* remaining 12 bits - the statement ID - 0-based numbering of all statements within a
* transaction. Each leg of a multi-insert statement gets a separate statement ID.
- * The reserved bits align it so that it easier to interpret it in Hex.
- *
+ * The reserved bits align it so that it's easier to interpret it in Hex.
+ *
* Constructs like Merge and Multi-Insert may have multiple tasks writing data that belongs to
* the same physical bucket file. For example, a Merge stmt with update and insert clauses,
* (and split update enabled - should be the default in 3.0). A task on behalf of insert may
* be writing a row into bucket 0 and another task in the update branch may be writing an insert
- * event into bucket 0. Each of these task are writing to different delta directory - distinguished
+ * event into bucket 0. Each of these tasks are writing to different delta directory - distinguished
* by statement ID. By including both bucket ID and statement ID in {@link RecordIdentifier}
* we ensure that {@link RecordIdentifier} is unique.
- *
+ *
* The intent is that sorting rows by {@link RecordIdentifier} groups rows in the same physical
* bucket next to each other.
* For any row created by a given version of Hive, top 3 bits are constant. The next
* most significant bits are the bucket ID, then the statement ID. This ensures that
* {@link org.apache.hadoop.hive.ql.optimizer.SortedDynPartitionOptimizer} works which is
* designed so that each task only needs to keep 1 writer opened at a time. It could be
- * configured such that a single writer sees data for multiple buckets so it must "group" data
+ * configured such that a single writer sees data for multiple buckets, so it must "group" data
* by bucket ID (and then sort within each bucket as required) which is achieved via sorting
* by {@link RecordIdentifier} which includes the {@link RecordIdentifier#getBucketProperty()}
- * which has the actual bucket ID in the high order bits. This scheme also ensures that
+ * which has the actual bucket ID in the high order bits. This scheme also ensures that
* {@link org.apache.hadoop.hive.ql.exec.FileSinkOperator#process(Object, int)} works in case
* there numBuckets > numReducers. (The later could be fixed by changing how writers are
* initialized in "if (fpaths.acidLastBucket != bucketNum) {")
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java
index 746bd32b937..c6bbe1ab06a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java
@@ -212,7 +212,7 @@ public class FlatFileInputFormat<T> extends
* (potentially decompressed) and creates the deserializer.
*
* @param conf
- * the jobconf
+ * the JobConf
* @param split
* the split for this file
*/
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/SchemaAwareCompressionOutputStream.java b/ql/src/java/org/apache/hadoop/hive/ql/io/SchemaAwareCompressionOutputStream.java
index f91b0702b12..7e3b6704566 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/SchemaAwareCompressionOutputStream.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/SchemaAwareCompressionOutputStream.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.io.compress.*;
/**
*
- * SchemaAwareCompressionOutputStream adds the ability to inform the comression stream
+ * SchemaAwareCompressionOutputStream adds the ability to inform the compression stream
* the current column being compressed.
*
*/
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/esriJson/UnenclosedBaseJsonRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/esriJson/UnenclosedBaseJsonRecordReader.java
index 797cd82ba78..6e43dab7102 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/esriJson/UnenclosedBaseJsonRecordReader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/esriJson/UnenclosedBaseJsonRecordReader.java
@@ -155,7 +155,7 @@ public abstract class UnenclosedBaseJsonRecordReader extends RecordReader<LongWr
if (chr < 0) {
if (first_brace_found) {
// last record was invalid
- LOG.error("Parsing error : EOF occured before record ended");
+ LOG.error("Parsing error : EOF occurred before record ended");
}
return false;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
index 1ebad2d7f6e..775005ac0f9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
@@ -1454,7 +1454,7 @@ class EncodedReaderImpl implements EncodedReader {
BufferChunk chunk = (i == 0) ? candidateCached.getChunk() : (BufferChunk)next;
dest.put(chunk.getData());
if (isValid) {
- trace.logValidUncompresseedChunk(startLim - startPos, chunk);
+ trace.logValidUncompressedChunk(startLim - startPos, chunk);
}
next = chunk.next;
if (i == 0) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/IoTrace.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/IoTrace.java
index 63af0647689..141df96de30 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/IoTrace.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/IoTrace.java
@@ -60,7 +60,7 @@ public final class IoTrace {
SARG_RESULT = 4, RANGES = 5, COLUMN_READ = 6, SKIP_STREAM = 7,
ADD_STREAM = 8, START_RG = 9, START_COL = 10, START_STRIPE_STREAM = 11,
START_STREAM = 12, START_READ = 13, UNCOMPRESSED_DATA = 14,
- PARTIAL_UNCOMPRESSED_DATA = 15, VALID_UNCOMPRESSEED_CHUNK = 16, CACHE_COLLISION = 17,
+ PARTIAL_UNCOMPRESSED_DATA = 15, VALID_UNCOMPRESSED_CHUNK = 16, CACHE_COLLISION = 17,
ORC_CB = 18, INVALID_ORC_CB = 19, PARTIAL_CB = 20, COMPOSITE_ORC_CB = 21, SARG_RESULT2 = 22;
public void reset() {
@@ -183,7 +183,7 @@ public final class IoTrace {
+ offset + ", " + (offset + getSecondInt(log[ix])) + ")");
return ix + 2;
}
- case VALID_UNCOMPRESSEED_CHUNK: {
+ case VALID_UNCOMPRESSED_CHUNK: {
logger.info(ix + ": Combining uncompressed data for cache buffer of length "
+ getSecondInt(log[ix]) + " from 0x" + Integer.toHexString((int)log[ix + 1]));
return ix + 2;
@@ -396,11 +396,11 @@ public final class IoTrace {
this.offset += 2;
}
- public void logValidUncompresseedChunk(int totalLength, DiskRange chunk) {
+ public void logValidUncompressedChunk(int totalLength, DiskRange chunk) {
if (log == null) return;
int offset = this.offset;
if (offset + 2 > log.length) return;
- log[offset] = makeIntPair(VALID_UNCOMPRESSEED_CHUNK, totalLength);
+ log[offset] = makeIntPair(VALID_UNCOMPRESSED_CHUNK, totalLength);
log[offset + 1] = chunk.hasData() ? System.identityHashCode(chunk.getData()) : 0;
this.offset += 2;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDataColumnReaderFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDataColumnReaderFactory.java
index 5e7a1ddfcfb..6b44459b806 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDataColumnReaderFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDataColumnReaderFactory.java
@@ -83,7 +83,7 @@ public final class ParquetDataColumnReaderFactory {
protected Dictionary dict;
// After the data is read in the parquet type, isValid will be set to true if the data can be
- // returned in the type defined in HMS. Otherwise isValid is set to false.
+ // returned in the type defined in HMS. Otherwise, isValid is set to false.
boolean isValid = true;
protected int hivePrecision = 0;
@@ -272,7 +272,7 @@ public final class ParquetDataColumnReaderFactory {
}
/**
- * Enforce the max legnth of varchar or char.
+ * Enforce the max length of varchar or char.
*/
protected String enforceMaxLength(String value) {
return HiveBaseChar.enforceMaxLength(value, length);
@@ -1909,8 +1909,8 @@ public final class ParquetDataColumnReaderFactory {
case INT96:
ZoneId targetZone =
skipTimestampConversion ? ZoneOffset.UTC : firstNonNull(writerTimezone, TimeZone.getDefault().toZoneId());
- return isDictionary ?
- new TypesFromInt96PageReader(dictionary, length, targetZone, legacyConversionEnabled) :
+ return isDictionary ?
+ new TypesFromInt96PageReader(dictionary, length, targetZone, legacyConversionEnabled) :
new TypesFromInt96PageReader(valuesReader, length, targetZone, legacyConversionEnabled);
case BOOLEAN:
return isDictionary ? new TypesFromBooleanPageReader(dictionary, length) : new
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java
index fe407c13a2a..262427d2c86 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java
@@ -23,7 +23,7 @@ import java.util.Stack;
import org.apache.hadoop.hive.ql.parse.SemanticException;
/**
- * Implentation of the Rule interface for Nodes Used in Node dispatching to dispatch
+ * Implementation of the Rule interface for Nodes Used in Node dispatching to dispatch
* process/visitor functions for Nodes. The cost method returns 1 if there is an exact
* match between the expression and the stack, otherwise -1.
*/
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogInputFormat.java
index 810788104a1..c7edb768aad 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogInputFormat.java
@@ -273,7 +273,7 @@ public class SyslogInputFormat extends TextInputFormat {
jobConf = projectionPusher.pushProjectionsAndFilters(job, finalPath.getParent());
}
// textIF considers '\r' or '\n' as line ending but syslog uses '\r' for escaping new lines. So to read multi-line
- // exceptions correctly we explictly use only '\n'
+ // exceptions correctly we explicitly use only '\n'
jobConf.set("textinputformat.record.delimiter", "\n");
return super.getRecordReader(genericSplit, jobConf, reporter);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogParser.java b/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogParser.java
index 66ed2e53f01..d840f3f31d4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogParser.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogParser.java
@@ -295,13 +295,13 @@ public class SyslogParser implements Closeable {
expect(' ');
- byte[] appname = null;
+ byte[] appName = null;
byte[] procId = null;
byte[] msgId = null;
Map<String, String> structuredData = null;
if (version >= 1) {
- appname = readWordOrNil(48);
+ appName = readWordOrNil(48);
expect(' ');
procId = readWordOrNil(12);
expect(' ');
@@ -310,7 +310,7 @@ public class SyslogParser implements Closeable {
structuredData = readAndParseStructuredData();
} else if (version == 0 && parseTag) {
// Try to find a colon terminated tag.
- appname = readTag();
+ appName = readTag();
if (peek() == '[') {
procId = readPid();
}
@@ -323,7 +323,7 @@ public class SyslogParser implements Closeable {
if (c != -1) {
msg = readLine();
}
- createEvent(version, priority, cal, hostname, appname, procId, msgId, structuredData, msg, row);
+ createEvent(version, priority, cal, hostname, appName, procId, msgId, structuredData, msg, row);
return row;
}
@@ -339,25 +339,27 @@ public class SyslogParser implements Closeable {
/**
* Create a log event from the given parameters.
+ * https://www.rfc-editor.org/rfc/rfc3164
+ * https://www.rfc-editor.org/rfc/rfc5424
*
* @param version the syslog version, 0 for RFC 3164
* @param priority the syslog priority, according to RFC 5424
* @param cal the timestamp of the message. Note that timezone matters
* @param hostname the hostname
- * @param appname the RFC 5424 appname
+ * @param appName the RFC 5424 app-name
* @param procId the RFC 5424 proc-id
* @param msgId the RFC 5424 msg-id
* @param structuredData the RFC 5424 structured-data
* @param body the message body
*/
private void createEvent(int version, int priority, Calendar cal, String hostname,
- byte[] appname, byte[] procId, byte[] msgId, Map<String, String> structuredData, byte[] body, List<Object> row) {
+ byte[] appName, byte[] procId, byte[] msgId, Map<String, String> structuredData, byte[] body, List<Object> row) {
row.add(FACILITIES[priority / 8]);
row.add(getEventPriorityBySyslog(priority));
row.add(version == 0 ? "RFC3164" : "RFC5424");
row.add(Timestamp.ofEpochMilli(cal.getTimeInMillis()));
row.add(hostname != null ? hostname : "");
- row.add(appname != null ? new String(appname) : "");
+ row.add(appName != null ? new String(appName) : "");
row.add(procId != null ? new String(procId) : "");
row.add(msgId != null ? new String(msgId) : "");
row.add(structuredData);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java
index 79c329d19b1..e31f9ef2d15 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java
@@ -51,7 +51,7 @@ public class PartitionIterable implements Iterable<Partition> {
return new Iterator<Partition>(){
private boolean initialized = false;
- private Iterator<Partition> ptnsIterator = null;
+ private Iterator<Partition> partitionIterator = null;
private Iterator<String> partitionNamesIter = null;
private Iterator<Partition> batchIter = null;
@@ -59,7 +59,7 @@ public class PartitionIterable implements Iterable<Partition> {
private void initialize(){
if(!initialized){
if (currType == Type.LIST_PROVIDED){
- ptnsIterator = ptnsProvided.iterator();
+ partitionIterator = ptnsProvided.iterator();
} else {
partitionNamesIter = partitionNames.iterator();
}
@@ -71,7 +71,7 @@ public class PartitionIterable implements Iterable<Partition> {
public boolean hasNext() {
initialize();
if (currType == Type.LIST_PROVIDED){
- return ptnsIterator.hasNext();
+ return partitionIterator.hasNext();
} else {
return ((batchIter != null) && batchIter.hasNext()) || partitionNamesIter.hasNext();
}
@@ -81,7 +81,7 @@ public class PartitionIterable implements Iterable<Partition> {
public Partition next() {
initialize();
if (currType == Type.LIST_PROVIDED){
- return ptnsIterator.next();
+ return partitionIterator.next();
}
if ((batchIter == null) || !batchIter.hasNext()){
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index 57e02cf3699..a9ecad39ace 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -246,7 +246,7 @@ public class Table implements Serializable {
// set create time
t.setCreateTime((int) (System.currentTimeMillis() / 1000));
}
- // Explictly set the bucketing version
+ // Explicitly set the bucketing version
t.getParameters().put(hive_metastoreConstants.TABLE_BUCKETING_VERSION,
"2");
return t;
@@ -493,7 +493,7 @@ public class Table implements Serializable {
}
// Please note : Be very careful in using this function. If not used carefully,
- // you may end up overwriting all the existing properties. If the usecase is to
+ // you may end up overwriting all the existing properties. If the use case is to
// add or update certain properties use setProperty() instead.
public void setParameters(Map<String, String> params) {
tTable.setParameters(params);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BigTableSelectorForAutoSMJ.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BigTableSelectorForAutoSMJ.java
index 75719548909..27518928a4c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BigTableSelectorForAutoSMJ.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BigTableSelectorForAutoSMJ.java
@@ -25,8 +25,8 @@ import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
/*
- * This is a plug-able policy to chose the candidate map-join table for converting a join to a
- * sort merge join. The policy can decide the big table position. Some of the existing policies
+ * This is a plug-able policy to choose the candidate map-join table for converting a join to a
+ * sort merge join. The policy can decide the big table position. Some existing policies
* decide the big table based on size or position of the tables.
*/
public interface BigTableSelectorForAutoSMJ {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
index f74ac2feeef..84d29ee7ca0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
@@ -477,7 +477,7 @@ public class BucketingSortingReduceSinkOptimizer extends Transform {
SMBJoinDesc smbJoinDesc = smbOp.getConf();
int posBigTable = smbJoinDesc.getPosBigTable();
- // join keys dont match the bucketing keys
+ // join keys don't match the bucketing keys
List<ExprNodeDesc> keysBigTable = smbJoinDesc.getKeys().get((byte) posBigTable);
if (keysBigTable.size() != bucketPositions.size()) {
return null;
@@ -621,8 +621,8 @@ public class BucketingSortingReduceSinkOptimizer extends Transform {
if (selectDesc.getColList().size() < bucketPositions.size()
|| selectDesc.getColList().size() != fsOp.getSchema().getSignature().size()) {
// Some columns in select are pruned. This may happen if those are constants.
- // TODO: the best solution is to hook the operator before fs with the select operator.
- // See smb_mapjoin_20.q for more details.
+ // TODO: the best solution is to hook the operator before fs with the select operator.
+ // See smb_mapjoin_20.q for more details.
return null;
}
// Only columns can be selected for both sorted and bucketed positions
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java
index 3ed22721739..f788ff1a912 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java
@@ -209,10 +209,10 @@ public class PartitionColumnsSeparator extends Transform {
* Has atleast one subexpression containing a partition/virtualcolumn and has
* exactly refer to a single table alias.
* @param en Expression Node Descriptor
- * @return true if there is atleast one subexpression with partition/virtual column
+ * @return true if there is at least one subexpression with partition/virtual column
* and has exactly refer to a single table alias. If not, return false.
*/
- private boolean hasAtleastOneSubExprWithPartColOrVirtualColWithOneTableAlias(ExprNodeDesc en) {
+ private boolean hasAtLeastOneSubExprWithPartColOrVirtualColWithOneTableAlias(ExprNodeDesc en) {
if (en == null || en.getChildren() == null) {
return false;
}
@@ -362,19 +362,19 @@ public class PartitionColumnsSeparator extends Transform {
return null;
}
- // 3. See if the IN (STRUCT(EXP1, EXP2,..) has atleast one expression with partition
+ // 3. See if the IN (STRUCT(EXP1, EXP2,..) has at least one expression with partition
// column with single table alias. If not bail out.
// We might have expressions containing only partitioning columns, say, T1.A + T2.B
// where T1.A and T2.B are both partitioning columns.
// However, these expressions should not be considered as valid expressions for separation.
- if (!hasAtleastOneSubExprWithPartColOrVirtualColWithOneTableAlias(children.get(0))) {
+ if (!hasAtLeastOneSubExprWithPartColOrVirtualColWithOneTableAlias(children.get(0))) {
LOG.debug(
"Partition columns not separated for {}, there are no expression containing partition columns in struct fields",
fd);
return null;
}
- // 4. See if all the field expressions of the left hand side of IN are expressions
+ // 4. See if all the field expressions of the left hand side of IN are expressions
// containing constants or only partition columns coming from same table.
// If so, we need not perform this optimization and we should bail out.
if (hasAllSubExprWithConstOrPartColOrVirtualColWithOneTableAlias(children.get(0))) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptMaterializationValidator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptMaterializationValidator.java
index d8f0a3041cd..cf419b170c1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptMaterializationValidator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptMaterializationValidator.java
@@ -59,7 +59,7 @@ import org.slf4j.LoggerFactory;
* Checks the query plan for conditions that would make the plan unsuitable for
* materialized views or query caching:
* - References to temporary or external tables
- * - References to non-determinisitc functions.
+ * - References to non-deterministic functions.
*/
public class HiveRelOptMaterializationValidator extends HiveRelShuttleImpl {
static final Logger LOG = LoggerFactory.getLogger(HiveRelOptMaterializationValidator.class);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
index 385fe9afecc..b1dd697b86f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
@@ -275,7 +275,7 @@ public class RelOptHiveTable implements RelOptTable {
final PrimaryKeyInfo primaryKeyInfo = hiveTblMetadata.getPrimaryKeyInfo();
final UniqueConstraint uniqueKeyInfo = hiveTblMetadata.getUniqueKeyInfo();
ImmutableList.Builder<ImmutableBitSet> builder = ImmutableList.builder();
- ImmutableList.Builder<ImmutableBitSet> nonNullbuilder = ImmutableList.builder();
+ ImmutableList.Builder<ImmutableBitSet> nonNullBuilder = ImmutableList.builder();
// First PK
if (primaryKeyInfo != null && !primaryKeyInfo.getColNames().isEmpty()) {
ImmutableBitSet.Builder keys = ImmutableBitSet.builder();
@@ -294,7 +294,7 @@ public class RelOptHiveTable implements RelOptTable {
}
ImmutableBitSet key = keys.build();
builder.add(key);
- nonNullbuilder.add(key);
+ nonNullBuilder.add(key);
}
// Then UKs
if (uniqueKeyInfo != null && !uniqueKeyInfo.getUniqueConstraints().isEmpty()) {
@@ -321,11 +321,11 @@ public class RelOptHiveTable implements RelOptTable {
ImmutableBitSet key = keys.build();
builder.add(key);
if (isNonNullable) {
- nonNullbuilder.add(key);
+ nonNullBuilder.add(key);
}
}
}
- return new Pair<>(builder.build(), nonNullbuilder.build());
+ return new Pair<>(builder.build(), nonNullBuilder.build());
}
private List<RelReferentialConstraint> generateReferentialConstraints() {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java
index a52c6978ada..6b075307025 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java
@@ -23,7 +23,7 @@ import org.apache.calcite.plan.RelOptUtil;
/***
* NOTE:<br>
- * 1. Hivecost normalizes cpu and io in to time.<br>
+ * 1. HiveCost normalizes cpu and io in to time.<br>
* 2. CPU, IO cost is added together to find the query latency.<br>
* 3. If query latency is equal then row count is compared.
*/
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectMergeRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectMergeRule.java
index 49303b29ae6..880a052bea5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectMergeRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectMergeRule.java
@@ -51,7 +51,7 @@ public class HiveProjectMergeRule extends ProjectMergeRule {
@Override
public boolean matches(RelOptRuleCall call) {
- // Currently we do not support merging windowing functions with other
+ // Currently, we do not support merging windowing functions with other
// windowing functions i.e. embedding windowing functions within each
// other
final Project topProject = call.rel(0);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
index 8fced8fd708..e7dd8bf2b1d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
@@ -386,7 +386,7 @@ public class HiveRelFieldTrimmer extends RelFieldTrimmer {
// Given a groupset this tries to find out if the cardinality of the grouping columns could have changed
- // because if not and it consist of keys (unique + not null OR pk), we can safely remove rest of the columns
+ // because if not, and it consists of keys (unique + not null OR pk), we can safely remove rest of the columns
// if those are columns are not being used further up
private ImmutableBitSet generateGroupSetIfCardinalitySame(final Aggregate aggregate,
final ImmutableBitSet originalGroupSet, final ImmutableBitSet fieldsUsed) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSemiJoinProjectTransposeRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSemiJoinProjectTransposeRule.java
index 22c0e0ff6a5..079530fa442 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSemiJoinProjectTransposeRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSemiJoinProjectTransposeRule.java
@@ -42,7 +42,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSemiJoin;
* This rule is similar to {@link org.apache.calcite.rel.rules.SemiJoinProjectTransposeRule}.
* However, it works on Hive nodes rather than logical nodes.
*
- * <p>The rule pushes a Semijoin down in a tree past a Project if the
+ * <p>The rule pushes a Semi-join down in a tree past a Project if the
* Project is followed by a Join. The intention is to remove Projects
* between Joins.
*
@@ -66,7 +66,7 @@ public class HiveSemiJoinProjectTransposeRule extends RelOptRule {
}
//~ Methods ----------------------------------------------------------------
-
+ @Override
public void onMatch(RelOptRuleCall call) {
Join semiJoin = call.rel(0);
Project project = call.rel(1);
@@ -75,7 +75,7 @@ public class HiveSemiJoinProjectTransposeRule extends RelOptRule {
// expression; all projection expressions must be RexInputRefs,
// otherwise, we wouldn't have created this semi-join.
- // convert the semijoin condition to reflect the LHS with the project
+ // convert the semi-join condition to reflect the LHS with the project
// pulled up
RexNode newCondition = adjustCondition(project, semiJoin);
@@ -85,7 +85,7 @@ public class HiveSemiJoinProjectTransposeRule extends RelOptRule {
// Create the new projection. Note that the projection expressions
// are the same as the original because they only reference the LHS
- // of the semijoin and the semijoin only projects out the LHS
+ // of the semi-join and the semi-join only projects out the LHS
final RelBuilder relBuilder = call.builder();
relBuilder.push(newSemiJoin);
relBuilder.project(project.getProjects(), project.getRowType().getFieldNames());
@@ -105,15 +105,15 @@ public class HiveSemiJoinProjectTransposeRule extends RelOptRule {
*/
private RexNode adjustCondition(Project project, Join semiJoin) {
// create two RexPrograms -- the bottom one representing a
- // concatenation of the project and the RHS of the semijoin and the
- // top one representing the semijoin condition
+ // concatenation of the project and the RHS of the semi-join and the
+ // top one representing the semi-join condition
RexBuilder rexBuilder = project.getCluster().getRexBuilder();
RelDataTypeFactory typeFactory = rexBuilder.getTypeFactory();
RelNode rightChild = semiJoin.getRight();
// for the bottom RexProgram, the input is a concatenation of the
- // child of the project and the RHS of the semijoin
+ // child of the project and the RHS of the semi-join
RelDataType bottomInputRowType =
SqlValidatorUtil.deriveJoinRowType(
project.getInput().getRowType(),
@@ -126,7 +126,7 @@ public class HiveSemiJoinProjectTransposeRule extends RelOptRule {
new RexProgramBuilder(bottomInputRowType, rexBuilder);
// add the project expressions, then add input references for the RHS
- // of the semijoin
+ // of the semi-join
for (Pair<RexNode, String> pair : project.getNamedProjects()) {
bottomProgramBuilder.addProject(pair.left, pair.right);
}
@@ -143,8 +143,8 @@ public class HiveSemiJoinProjectTransposeRule extends RelOptRule {
}
RexProgram bottomProgram = bottomProgramBuilder.getProgram();
- // input rowtype into the top program is the concatenation of the
- // project and the RHS of the semijoin
+ // input rowType into the top program is the concatenation of the
+ // project and the RHS of the semi-join
RelDataType topInputRowType =
SqlValidatorUtil.deriveJoinRowType(
project.getRowType(),
@@ -162,8 +162,8 @@ public class HiveSemiJoinProjectTransposeRule extends RelOptRule {
RexProgram topProgram = topProgramBuilder.getProgram();
// merge the programs and expand out the local references to form
- // the new semijoin condition; it now references a concatenation of
- // the project's child and the RHS of the semijoin
+ // the new semi-join condition; it now references a concatenation of
+ // the project's child and the RHS of the semi-join
RexProgram mergedProgram =
RexProgramBuilder.mergePrograms(
topProgram,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortUnionReduceRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortUnionReduceRule.java
index 70020cf5785..9ce0c07a1fb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortUnionReduceRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortUnionReduceRule.java
@@ -66,7 +66,7 @@ public class HiveSortUnionReduceRule extends RelOptRule {
// We only apply this rule if Union.all is true.
// And Sort.fetch is not null and it is more than 0.
return union.all && sort.fetch != null
- // Calite bug CALCITE-987
+ // Calcite bug CALCITE-987
&& RexLiteral.intValue(sort.fetch) > 0;
}
@@ -74,7 +74,7 @@ public class HiveSortUnionReduceRule extends RelOptRule {
final HiveSortLimit sort = call.rel(0);
final HiveUnion union = call.rel(1);
List<RelNode> inputs = new ArrayList<>();
- // Thus we use 'finishPushSortPastUnion' as a flag to identify if we have finished pushing the
+ // Thus, we use 'finishPushSortPastUnion' as a flag to identify if we have finished pushing the
// sort past a union.
boolean finishPushSortPastUnion = true;
final int offset = sort.offset == null ? 0 : RexLiteral.intValue(sort.offset);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/RelFieldTrimmer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/RelFieldTrimmer.java
index adc090c19fe..5cefd025db2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/RelFieldTrimmer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/RelFieldTrimmer.java
@@ -1094,7 +1094,7 @@ public class RelFieldTrimmer implements ReflectiveVisitor {
* <p>The mapping is a
* {@link org.apache.calcite.util.mapping.Mappings.SourceMapping}, which means
* that no column can be used more than once, and some columns are not used.
- * {@code columnsUsed.getSource(i)} returns the source of the i'th output
+ * {@code columnsUsed.getSource(i)} returns the source of the i-th output
* field.
*
* <p>For example, consider the mapping for a relational expression that
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
index 818fcf45a84..0b778decf24 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
@@ -184,7 +184,7 @@ public class HiveRelMdPredicates extends RelMdPredicates {
return jI.inferPredicates(false);
}
-
+
/**
* Utility to infer predicates from one side of the join that apply on the
* other side.
@@ -414,7 +414,7 @@ public class HiveRelMdPredicates extends RelMdPredicates {
}
private void infer(List<RexNode> predicates, Set<String> allExprsDigests,
- List<RexNode> inferedPredicates, List<RexNode> nonFieldsPredicates,
+ List<RexNode> inferredPredicates, List<RexNode> nonFieldsPredicates,
boolean includeEqualityInference, ImmutableBitSet inferringFields) {
for (RexNode r : predicates) {
if (!includeEqualityInference
@@ -430,7 +430,7 @@ public class HiveRelMdPredicates extends RelMdPredicates {
if (inferringFields.contains(RelOptUtil.InputFinder.bits(tr))
&& !allExprsDigests.contains(tr.toString())
&& !isAlwaysTrue(tr)) {
- inferedPredicates.add(tr);
+ inferredPredicates.add(tr);
allExprsDigests.add(tr.toString());
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
index 19bd13de9a1..d30ba9cbdc4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
@@ -215,7 +215,7 @@ public class HiveRelMdSelectivity extends RelMdSelectivity {
}
/*
- * a) Order predciates based on ndv in reverse order. b) ndvCrossProduct =
+ * a) Order predicates based on ndv in reverse order. b) ndvCrossProduct =
* ndv(pe0) * ndv(pe1) ^(1/2) * ndv(pe2) ^(1/4) * ndv(pe3) ^(1/8) ...
*/
protected double exponentialBackoff(List<JoinLeafPredicateInfo> peLst,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
index 6702f8ae9d6..39ebf61d1a0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
@@ -173,7 +173,7 @@ public class ASTBuilder {
// NOTE: Calcite considers tbls to be equal if their names are the same. Hence
// we need to provide Calcite the fully qualified table name (dbname.tblname)
// and not the user provided aliases.
- // However in HIVE DB name can not appear in select list; in case of join
+ // However, in HIVE DB name can not appear in select list; in case of join
// where table names differ only in DB name, Hive would require user
// introducing explicit aliases for tbl.
b.add(HiveParser.Identifier, hts.getTableAlias());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveGBOpConvUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveGBOpConvUtil.java
index 34d4ee380c6..7a363aafd1f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveGBOpConvUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveGBOpConvUtil.java
@@ -158,7 +158,7 @@ final class HiveGBOpConvUtil {
return gbPhysicalPipelineMode;
}
- // For each of the GB op in the logical GB this should be called seperately;
+ // For each of the GB op in the logical GB this should be called separately;
// otherwise GBevaluator and expr nodes may get shared among multiple GB ops
private static GBInfo getGBInfo(HiveAggregate aggRel, OpAttr inputOpAf, HiveConf hc) throws SemanticException {
GBInfo gbInfo = new GBInfo();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveTableScanVisitor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveTableScanVisitor.java
index 14958aa674d..1fb5368594d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveTableScanVisitor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveTableScanVisitor.java
@@ -109,8 +109,8 @@ class HiveTableScanVisitor extends HiveRelNodeVisitor<HiveTableScan> {
TableScanOperator ts = (TableScanOperator) OperatorFactory.get(
hiveOpConverter.getSemanticAnalyzer().getOpContext(), tsd, new RowSchema(colInfos));
- //now that we let Calcite process subqueries we might have more than one
- // tablescan with same alias.
+ // now that we let Calcite process sub-queries we might have more than one
+ // tableScan with same alias.
if (hiveOpConverter.getTopOps().get(tableAlias) != null) {
tableAlias = tableAlias + hiveOpConverter.getUniqueCounter();
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java
index ba34470dd18..5af598d363c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java
@@ -107,8 +107,8 @@ public class BucketingSortingCtx implements NodeProcessorCtx {
* Classes that implement this interface provide a way to store information about equivalent
* columns as their names and indexes in the schema change going into and out of operators. The
* definition of equivalent columns is up to the class which uses these classes, e.g.
- * BucketingSortingOpProcFactory. For example, two columns are equivalent if they
- * contain exactly the same data. Though, it's possible that two columns contain exactly the
+ * BucketingSortingOpProcFactory. For example, two columns are equivalent if they
+ * contain exactly the same data. Though, it's possible that two columns contain exactly the
* same data and are not known to be equivalent.
*
* E.g. SELECT key a, key b FROM (SELECT key, count(*) c FROM src GROUP BY key) s;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingInferenceOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingInferenceOptimizer.java
index 27c38cea813..b0cf872b3e5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingInferenceOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingInferenceOptimizer.java
@@ -53,7 +53,7 @@ import org.apache.hadoop.hive.ql.plan.OperatorDesc;
*
* For each map reduce task, attempts to infer bucketing and sorting metadata for the outputs.
*
- * Currently only map reduce tasks which produce final output have there output metadata inferred,
+ * Currently, only map reduce tasks which produce final output have there output metadata inferred,
* but it can be extended to intermediate tasks as well.
*
* This should be run as the last physical optimizer, as other physical optimizers may invalidate
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
index 66f0891fecf..a6e4ffaced0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
@@ -1471,7 +1471,7 @@ public class StatsRulesProcFactory {
/**
* GROUPBY operator changes the number of rows. The number of rows emitted by GBY operator will be
- * atleast 1 or utmost T(R) (number of rows in relation T) based on the aggregation. A better
+ * at least 1 or utmost T(R) (number of rows in relation T) based on the aggregation. A better
* estimate can be found if we have column statistics on the columns that we are grouping on.
* <p>
* Suppose if we are grouping by attributes A,B,C and if statistics for columns A,B,C are
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index d35baa18683..f5d9a8d3cdd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -738,7 +738,7 @@ public abstract class BaseSemanticAnalyzer {
* Escapes the string for AST; doesn't enclose it in quotes, however.
*/
public static String escapeSQLString(String b) {
- // There's usually nothing to escape so we will be optimistic.
+ // There's usually nothing to escape, so we will be optimistic.
String result = b;
for (int i = 0; i < result.length(); ++i) {
char currentChar = result.charAt(i);
@@ -1302,7 +1302,7 @@ public abstract class BaseSemanticAnalyzer {
specType = SpecType.STATIC_PARTITION;
}
} else if(createDynPartSpec(ast) && allowDynamicPartitionsSpec) {
- // if user hasn't specify partition spec generate it from table's partition spec
+ // if user hasn't specified partition spec generate it from table's partition spec
// do this only if it is INSERT/INSERT INTO/INSERT OVERWRITE/ANALYZE
List<FieldSchema> parts = tableHandle.getPartitionKeys();
partSpec = new LinkedHashMap<String, String>(parts.size());
@@ -1714,7 +1714,7 @@ public abstract class BaseSemanticAnalyzer {
TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(expectedType);
// Since partVal is a constant, it is safe to cast ExprNodeDesc to ExprNodeConstantDesc.
// Its value should be in normalized format (e.g. no leading zero in integer, date is in
- // format of YYYY-MM-DD etc)
+ // format of YYYY-MM-DD etc.)
Object value = ((ExprNodeConstantDesc)astExprNodePair.getValue()).getValue();
Object convertedValue = value;
if (!inputOI.getTypeName().equals(outputOI.getTypeName())) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index a500d2be957..ac69a5e7595 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -1920,7 +1920,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
// 8. Rerun PPD through Project as column pruning would have introduced
// DT above scans; By pushing filter just above TS, Hive can push it into
- // storage (incase there are filters on non partition cols). This only
+ // storage (in case there are filters on non partition cols). This only
// matches FIL-PROJ-TS
// Also merge, remove and reduce Project if possible
generatePartialProgram(program, true, HepMatchOrder.TOP_DOWN,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java
index 8b4fa0f4619..61b6f67c968 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java
@@ -378,7 +378,7 @@ public final class ParseUtils {
return;
}
- // Then, find the leftmost logical sibling select, because that's what Hive uses for aliases.
+ // Then, find the leftmost logical sibling select, because that's what Hive uses for aliases.
while (true) {
CommonTree queryOfSelect = select.parent;
while (queryOfSelect != null && queryOfSelect.getType() != HiveParser.TOK_QUERY) {
@@ -480,7 +480,7 @@ public final class ParseUtils {
return false;
}
if (!aliases.add(colAlias)) {
- // TODO: if a side of the union has 2 columns with the same name, noone on the higher
+ // TODO: if a side of the union has 2 columns with the same name, none on the higher
// level can refer to them. We could change the alias in the original node.
LOG.debug("Replacing SETCOLREF with ALLCOLREF because of duplicate alias " + colAlias);
return false;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java
index affd608c38c..2f16abdc2b0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java
@@ -469,7 +469,7 @@ public class QBSubQuery implements ISubQueryJoinInfo {
*/
private int numOfCorrelationExprsAddedToSQSelect;
- private boolean groupbyAddedToSQ;
+ private boolean groupByAddedToSQ;
private int numOuterCorrExprsForHaving;
@@ -497,7 +497,7 @@ public class QBSubQuery implements ISubQueryJoinInfo {
originalSQAST.getTokenStartIndex(), originalSQAST.getTokenStopIndex());
originalSQASTOrigin = new ASTNodeOrigin("SubQuery", alias, s, alias, originalSQAST);
numOfCorrelationExprsAddedToSQSelect = 0;
- groupbyAddedToSQ = false;
+ groupByAddedToSQ = false;
if ( operator.getType() == SubQueryType.NOT_IN ) {
notInCheck = new NotInCheck();
@@ -670,7 +670,7 @@ public class QBSubQuery implements ISubQueryJoinInfo {
*/
if ( operator.getType() == SubQueryType.EXISTS &&
containsAggregationExprs &&
- groupbyAddedToSQ ) {
+ groupByAddedToSQ) {
throw new SemanticException(ASTErrorUtils.getMsg(
ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(),
subQueryAST,
@@ -679,7 +679,7 @@ public class QBSubQuery implements ISubQueryJoinInfo {
}
if ( operator.getType() == SubQueryType.NOT_EXISTS &&
containsAggregationExprs &&
- groupbyAddedToSQ ) {
+ groupByAddedToSQ) {
throw new SemanticException(ASTErrorUtils.getMsg(
ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(),
subQueryAST,
@@ -761,7 +761,7 @@ public class QBSubQuery implements ISubQueryJoinInfo {
"Correlating expression contains ambiguous column references."));
}
}
-
+
parentQueryJoinCond = SubQueryUtils.buildOuterQryToSQJoinCond(
parentExpr,
alias,
@@ -903,9 +903,9 @@ public class QBSubQuery implements ISubQueryJoinInfo {
rewriteCorrConjunctForHaving(conjunctAST, false, outerQueryAlias,
parentQueryRR, conjunct.getRightOuterColInfo());
}
- ASTNode joinPredciate = SubQueryUtils.alterCorrelatedPredicate(
+ ASTNode joinPredicate = SubQueryUtils.alterCorrelatedPredicate(
conjunctAST, sqExprForCorr, true);
- joinConditionAST = SubQueryUtils.andAST(joinConditionAST, joinPredciate);
+ joinConditionAST = SubQueryUtils.andAST(joinConditionAST, joinPredicate);
subQueryJoinAliasExprs.add(sqExprForCorr);
ASTNode selExpr = SubQueryUtils.createSelectItem(conjunct.getLeftExpr(), sqExprAlias);
selectClause.addChild(selExpr);
@@ -927,9 +927,9 @@ public class QBSubQuery implements ISubQueryJoinInfo {
rewriteCorrConjunctForHaving(conjunctAST, true, outerQueryAlias,
parentQueryRR, conjunct.getLeftOuterColInfo());
}
- ASTNode joinPredciate = SubQueryUtils.alterCorrelatedPredicate(
+ ASTNode joinPredicate = SubQueryUtils.alterCorrelatedPredicate(
conjunctAST, sqExprForCorr, false);
- joinConditionAST = SubQueryUtils.andAST(joinConditionAST, joinPredciate);
+ joinConditionAST = SubQueryUtils.andAST(joinConditionAST, joinPredicate);
subQueryJoinAliasExprs.add(sqExprForCorr);
ASTNode selExpr = SubQueryUtils.createSelectItem(conjunct.getRightExpr(), sqExprAlias);
selectClause.addChild(selExpr);
@@ -982,7 +982,7 @@ public class QBSubQuery implements ISubQueryJoinInfo {
}
groupBy = SubQueryUtils.buildGroupBy();
- groupbyAddedToSQ = true;
+ groupByAddedToSQ = true;
List<ASTNode> newChildren = new ArrayList<ASTNode>();
newChildren.add(groupBy);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
index 0e810f13d6b..1a586646109 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
@@ -88,7 +88,7 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
@Override
public void analyzeInternal(ASTNode ast) throws SemanticException {
- LOG.debug("ReplicationSemanticAanalyzer: analyzeInternal");
+ LOG.debug("ReplicationSemanticAnalyzer: analyzeInternal");
LOG.debug(ast.getName() + ":" + ast.getToken().getText() + "=" + ast.getText());
// Some of the txn related configs were not set when ReplicationSemanticAnalyzer.conf was initialized.
// It should be set first.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index ad30ef5e1a8..067c35dac40 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -13508,7 +13508,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
/**
* This api is used to determine where to create acid tables are not.
- * if the default table type is set to external, then create transcational table should result in acid tables,
+ * if the default table type is set to external, then create transactional table should result in acid tables,
* else create table should result in external table.
* */
private boolean isExternalTableChanged (Map<String, String> tblProp, boolean isTransactional, boolean isExt, boolean isTableTypeChanged) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java
index 5ddcd31d566..3848cd9a550 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java
@@ -84,7 +84,7 @@ public class TableAccessAnalyzer {
SemanticDispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules, tableAccessCtx);
SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
- // Create a list of topop nodes and walk!
+ // Create a list of topOp nodes and walk!
List<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pGraphContext.getTopOps().values());
ogw.startWalking(topNodes, null);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
index d2b73d924e5..f514f536f32 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
@@ -21,13 +21,13 @@ package org.apache.hadoop.hive.ql.parse;
import java.util.List;
/**
- *
+ *
* This class stores all the information specified in the TABLESAMPLE clause.
* e.g. for the clause "FROM t TABLESAMPLE(1 OUT OF 2 ON c1) it will store the
* numerator 1, the denominator 2 and the list of expressions(in this case c1)
- * in the appropriate fields. The afore-mentioned sampling clause causes the 1st
+ * in the appropriate fields. The aforementioned sampling clause causes the 1st
* bucket to be picked out of the 2 buckets created by hashing on c1.
- *
+ *
*/
public class TableSample {
@@ -59,7 +59,7 @@ public class TableSample {
/**
* Constructs the TableSample given the numerator, denominator and the list of
* ON clause expressions.
- *
+ *
* @param num
* The numerator
* @param den
@@ -81,7 +81,7 @@ public class TableSample {
/**
* Gets the numerator.
- *
+ *
* @return int
*/
public int getNumerator() {
@@ -90,7 +90,7 @@ public class TableSample {
/**
* Sets the numerator.
- *
+ *
* @param num
* The numerator
*/
@@ -100,7 +100,7 @@ public class TableSample {
/**
* Gets the denominator.
- *
+ *
* @return int
*/
public int getDenominator() {
@@ -109,7 +109,7 @@ public class TableSample {
/**
* Sets the denominator.
- *
+ *
* @param den
* The denominator
*/
@@ -119,7 +119,7 @@ public class TableSample {
/**
* Gets the ON part's expression list.
- *
+ *
* @return ArrayList<ASTNode>
*/
public List<ASTNode> getExprs() {
@@ -128,7 +128,7 @@ public class TableSample {
/**
* Sets the expression list.
- *
+ *
* @param exprs
* The expression list
*/
@@ -138,7 +138,7 @@ public class TableSample {
/**
* Gets the flag that indicates whether input pruning is possible.
- *
+ *
* @return boolean
*/
public boolean getInputPruning() {
@@ -147,7 +147,7 @@ public class TableSample {
/**
* Sets the flag that indicates whether input pruning is possible or not.
- *
+ *
* @param inputPruning
* true if input pruning is possible
*/
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
index 3c37e9958fd..3b489535c33 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
@@ -101,7 +101,7 @@ import java.util.Map;
import java.util.Set;
/**
- * TaskCompiler is a the base class for classes that compile
+ * TaskCompiler is the base class for classes that compile
* operator pipelines into tasks.
*/
public abstract class TaskCompiler {
@@ -270,7 +270,7 @@ public abstract class TaskCompiler {
}
if (outerQueryLimit == 0) {
// Believe it or not, some tools do generate queries with limit 0 and than expect
- // query to run quickly. Lets meet their requirement.
+ // query to run quickly. Let's meet their requirement.
LOG.info("Limit 0. No query execution needed.");
return;
}
@@ -751,7 +751,7 @@ public abstract class TaskCompiler {
protected abstract void setInputFormat(Task<?> rootTask);
/*
- * Called to generate the taks tree from the parse context/operator tree
+ * Called to generate the tasks tree from the parse context/operator tree
*/
protected abstract void generateTaskTree(List<Task<?>> rootTasks, ParseContext pCtx,
List<Task<MoveWork>> mvTask, Set<ReadEntity> inputs, Set<WriteEntity> outputs) throws SemanticException;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java
index 48e8a960c91..4f074879b8b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java
@@ -110,7 +110,7 @@ public class WindowingSpec {
* Apply the rules in the Spec. to fill in any missing pieces of every Window Specification,
* also validate that the effective Specification is valid. The rules applied are:
* - For Wdw Specs that refer to Window Defns, inherit missing components.
- * - A Window Spec with no Parition Spec, is Partitioned on a Constant(number 0)
+ * - A Window Spec with no Partition Spec, is Partitioned on a Constant(number 0)
* - For missing Wdw Frames or for Frames with only a Start Boundary, completely specify them
* by the rules in {@link effectiveWindowFrame}
* - Validate the effective Window Frames with the rules in {@link validateWindowFrame}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/log/BootstrapDumpLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/log/BootstrapDumpLogger.java
index 08721346d59..7ba2c279460 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/log/BootstrapDumpLogger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/log/BootstrapDumpLogger.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.parse.repl.ReplState.LogTag;
/**
* BootstrapDumpLogger.
*
- * Repllogger for bootstrap dump.
+ * ReplLogger for bootstrap dump.
**/
public class BootstrapDumpLogger extends ReplLogger<String> {
private String dbName;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/log/IncrementalDumpLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/log/IncrementalDumpLogger.java
index 4f24c0c3d88..4e979e7c455 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/log/IncrementalDumpLogger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/log/IncrementalDumpLogger.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hive.ql.parse.repl.ReplState.LogTag;
/**
* IncrementalDumpLogger.
*
- * Repllogger for incremental dump.
+ * ReplLogger for incremental dump.
**/
public class IncrementalDumpLogger extends ReplLogger<String> {
private String dbName;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/log/IncrementalLoadLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/log/IncrementalLoadLogger.java
index 7c8ba62e724..e29569b1106 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/log/IncrementalLoadLogger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/log/IncrementalLoadLogger.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.parse.repl.ReplState.LogTag;
/**
* IncrementalLoadLogger.
*
- * Repllogger for Incremental Load.
+ * ReplLogger for Incremental Load.
**/
public class IncrementalLoadLogger extends ReplLogger<String> {
private final ReplStatsTracker replStatsTracker;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java
index 35e02e54b7f..87e78444f96 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java
@@ -56,7 +56,7 @@ public abstract class BaseWork extends AbstractOperatorDesc {
protected static final Logger LOG = LoggerFactory.getLogger(BaseWork.class);
// dummyOps is a reference to all the HashTableDummy operators in the
- // plan. These have to be separately initialized when we setup a task.
+ // plan. These have to be separately initialized when we set up a task.
// Their function is mainly as root ops to give the mapjoin the correct
// schema info.
List<HashTableDummyOperator> dummyOps;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java
index 20f7d2e0e40..56399e434ed 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java
@@ -41,11 +41,11 @@ public class BasicStatsWork implements Serializable {
private boolean statsReliable; // are stats completely reliable
// If stats aggregator is not present, clear the current aggregator stats.
- // For eg. if a merge is being performed, stats already collected by aggregator (numrows etc.)
+ // For example, if a merge is being performed, stats already collected by aggregator (numrows etc.)
// are still valid. However, if a load file is being performed, the old stats collected by
// aggregator are not valid. It might be a good idea to clear them instead of leaving wrong
// and old stats.
- // Since HIVE-12661, we maintain the old stats (although may be wrong) for CBO
+ // Since HIVE-12661, we maintain the old stats (although it may be wrong) for CBO
// purpose. We use a flag COLUMN_STATS_ACCURATE to
// show the accuracy of the stats.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ColStatistics.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ColStatistics.java
index 75cdba5abd8..717d1f8b6a7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ColStatistics.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ColStatistics.java
@@ -22,7 +22,7 @@ public class ColStatistics {
private String colName;
private String colType;
- private long countDistint;
+ private long countDistinct;
private long numNulls;
private double avgColLen;
private long numTrues;
@@ -62,11 +62,11 @@ public class ColStatistics {
}
public long getCountDistint() {
- return countDistint;
+ return countDistinct;
}
- public void setCountDistint(long countDistint) {
- this.countDistint = countDistint;
+ public void setCountDistint(long countDistinct) {
+ this.countDistinct = countDistinct;
}
public long getNumNulls() {
@@ -137,7 +137,7 @@ public class ColStatistics {
sb.append(" colType: ");
sb.append(colType);
sb.append(" countDistincts: ");
- sb.append(countDistint);
+ sb.append(countDistinct);
sb.append(" numNulls: ");
sb.append(numNulls);
sb.append(" avgColLen: ");
@@ -162,7 +162,7 @@ public class ColStatistics {
public ColStatistics clone() {
ColStatistics clone = new ColStatistics(colName, colType);
clone.setAvgColLen(avgColLen);
- clone.setCountDistint(countDistint);
+ clone.setCountDistint(countDistinct);
clone.setNumNulls(numNulls);
clone.setNumTrues(numTrues);
clone.setNumFalses(numFalses);
@@ -225,11 +225,11 @@ public class ColStatistics {
private void setIsFilteredColumn(boolean isFilteredColumn2) {
isFilteredColumn=isFilteredColumn2;
-
+
}
-
+
public boolean isFilteredColumn() {
return isFilteredColumn;
}
-
+
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
index 4e7c86807e0..66e386c4e5f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
@@ -513,7 +513,7 @@ public class FileSinkDesc extends AbstractOperatorDesc implements IStatsGatherDe
*/
@Override
@Explain(displayName = "Stats Publishing Key Prefix", explainLevels = { Level.EXTENDED })
- // FIXME: including this in the signature will almost certenly differ even if the operator is doing the same
+ // FIXME: including this in the signature will almost certainly differ even if the operator is doing the same
// there might be conflicting usages of logicalCompare?
@Signature
public String getStatsAggPrefix() {
@@ -685,7 +685,7 @@ public class FileSinkDesc extends AbstractOperatorDesc implements IStatsGatherDe
return getBucketingVersion();
}
/**
- * Whether this is CREATE TABLE SELECT or CREATE MATERIALIZED VIEW statemet
+ * Whether this is CREATE TABLE SELECT or CREATE MATERIALIZED VIEW statement
* Set by semantic analyzer this is required because CTAS/CM requires some special logic
* in mvFileToFinalPath
*/
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/ResetProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/ResetProcessor.java
index 150362417cb..33d0148bca4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/processors/ResetProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/ResetProcessor.java
@@ -59,7 +59,7 @@ public class ResetProcessor implements CommandProcessor {
}
String[] parts = command.split("\\s+");
boolean isDefault = false;
- List<String> varnames = new ArrayList<>(parts.length);
+ List<String> varNames = new ArrayList<>(parts.length);
for (String part : parts) {
if (part.isEmpty()) {
continue;
@@ -67,22 +67,22 @@ public class ResetProcessor implements CommandProcessor {
if (DEFAULT_ARG.equals(part)) {
isDefault = true;
} else {
- varnames.add(part);
+ varNames.add(part);
}
}
- if (varnames.isEmpty()) {
+ if (varNames.isEmpty()) {
throw new CommandProcessorException(1, -1, "No variable names specified", "42000", null);
}
String variableNames = "";
- for (String varname : varnames) {
+ for (String varName : varNames) {
if (isDefault) {
if (!variableNames.isEmpty()) {
variableNames += ", ";
}
- variableNames += varname;
- resetToDefault(ss, varname);
+ variableNames += varName;
+ resetToDefault(ss, varName);
} else {
- resetOverrideOnly(ss, varname);
+ resetOverrideOnly(ss, varName);
}
}
String message = isDefault ? "Resetting " + variableNames + " to default values" : null;
@@ -100,32 +100,32 @@ public class ResetProcessor implements CommandProcessor {
ss.getOverriddenConfigurations().clear();
}
- private static void resetOverrideOnly(SessionState ss, String varname) {
- if (!ss.getOverriddenConfigurations().containsKey(varname)) {
+ private static void resetOverrideOnly(SessionState ss, String varName) {
+ if (!ss.getOverriddenConfigurations().containsKey(varName)) {
return;
}
- setSessionVariableFromConf(ss, varname, new HiveConf());
- ss.getOverriddenConfigurations().remove(varname);
+ setSessionVariableFromConf(ss, varName, new HiveConf());
+ ss.getOverriddenConfigurations().remove(varName);
}
- private static void setSessionVariableFromConf(SessionState ss, String varname, HiveConf conf) {
- String value = conf.get(varname);
+ private static void setSessionVariableFromConf(SessionState ss, String varName, HiveConf conf) {
+ String value = conf.get(varName);
if (value != null) {
- SetProcessor.setConf(ss, varname, varname, value, false);
+ SetProcessor.setConf(ss, varName, varName, value, false);
}
}
- private static CommandProcessorResponse resetToDefault(SessionState ss, String varname)
+ private static CommandProcessorResponse resetToDefault(SessionState ss, String varName)
throws CommandProcessorException {
- varname = varname.trim();
+ varName = varName.trim();
try {
String nonErrorMessage = null;
- if (varname.startsWith(SystemVariables.HIVECONF_PREFIX)){
- String propName = varname.substring(SystemVariables.HIVECONF_PREFIX.length());
+ if (varName.startsWith(SystemVariables.HIVECONF_PREFIX)){
+ String propName = varName.substring(SystemVariables.HIVECONF_PREFIX.length());
nonErrorMessage = SetProcessor.setConf(
- varname, propName, getConfVar(propName).getDefaultValue(), false);
- } else if (varname.startsWith(SystemVariables.METACONF_PREFIX)) {
- String propName = varname.substring(SystemVariables.METACONF_PREFIX.length());
+ varName, propName, getConfVar(propName).getDefaultValue(), false);
+ } else if (varName.startsWith(SystemVariables.METACONF_PREFIX)) {
+ String propName = varName.substring(SystemVariables.METACONF_PREFIX.length());
HiveConf.ConfVars confVars = getConfVar(propName);
Hive.get(ss.getConf()).setMetaConf(propName, new VariableSubstitution(new HiveVariableSource() {
@Override
@@ -134,9 +134,9 @@ public class ResetProcessor implements CommandProcessor {
}
}).substitute(ss.getConf(), confVars.getDefaultValue()));
} else {
- String defaultVal = getConfVar(varname).getDefaultValue();
- nonErrorMessage = SetProcessor.setConf(varname, varname, defaultVal, true);
- if (varname.equals(HiveConf.ConfVars.HIVE_SESSION_HISTORY_ENABLED.toString())) {
+ String defaultVal = getConfVar(varName).getDefaultValue();
+ nonErrorMessage = SetProcessor.setConf(varName, varName, defaultVal, true);
+ if (varName.equals(HiveConf.ConfVars.HIVE_SESSION_HISTORY_ENABLED.toString())) {
SessionState.get().updateHistory(Boolean.parseBoolean(defaultVal), ss);
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java
index f102d61fd09..9c89c3bd3ae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java
@@ -158,72 +158,72 @@ public class SetProcessor implements CommandProcessor {
}
}
- public CommandProcessorResponse executeSetVariable(String varname, String varvalue) throws CommandProcessorException {
+ public CommandProcessorResponse executeSetVariable(String varName, String varValue) throws CommandProcessorException {
try {
- return setVariable(varname, varvalue);
+ return setVariable(varName, varValue);
} catch (Exception e) {
Throwable exception = e instanceof IllegalArgumentException ? null : e;
throw new CommandProcessorException(1, -1, e.getMessage(), "42000", exception);
}
}
- public static CommandProcessorResponse setVariable(String varname, String varvalue) throws Exception {
+ public static CommandProcessorResponse setVariable(String varName, String varValue) throws Exception {
SessionState ss = SessionState.get();
- if (varvalue.contains("\n")){
+ if (varValue.contains("\n")){
ss.err.println("Warning: Value had a \\n character in it.");
}
- varname = varname.trim();
+ varName = varName.trim();
String nonErrorMessage = null;
- if (varname.startsWith(ENV_PREFIX)){
+ if (varName.startsWith(ENV_PREFIX)){
ss.err.println("env:* variables can not be set.");
throw new CommandProcessorException(1); // Should we propagate the error message properly?
- } else if (varname.startsWith(SYSTEM_PREFIX)){
- String propName = varname.substring(SYSTEM_PREFIX.length());
+ } else if (varName.startsWith(SYSTEM_PREFIX)){
+ String propName = varName.substring(SYSTEM_PREFIX.length());
System.getProperties()
.setProperty(propName, new VariableSubstitution(new HiveVariableSource() {
@Override
public Map<String, String> getHiveVariable() {
return SessionState.get().getHiveVariables();
}
- }).substitute(ss.getConf(), varvalue));
- } else if (varname.startsWith(HIVECONF_PREFIX)){
- String propName = varname.substring(HIVECONF_PREFIX.length());
- nonErrorMessage = setConf(varname, propName, varvalue, false);
- } else if (varname.startsWith(HIVEVAR_PREFIX)) {
- String propName = varname.substring(HIVEVAR_PREFIX.length());
+ }).substitute(ss.getConf(), varValue));
+ } else if (varName.startsWith(HIVECONF_PREFIX)){
+ String propName = varName.substring(HIVECONF_PREFIX.length());
+ nonErrorMessage = setConf(varName, propName, varValue, false);
+ } else if (varName.startsWith(HIVEVAR_PREFIX)) {
+ String propName = varName.substring(HIVEVAR_PREFIX.length());
ss.getHiveVariables().put(propName, new VariableSubstitution(new HiveVariableSource() {
@Override
public Map<String, String> getHiveVariable() {
return SessionState.get().getHiveVariables();
}
- }).substitute(ss.getConf(), varvalue));
- } else if (varname.startsWith(METACONF_PREFIX)) {
- String propName = varname.substring(METACONF_PREFIX.length());
+ }).substitute(ss.getConf(), varValue));
+ } else if (varName.startsWith(METACONF_PREFIX)) {
+ String propName = varName.substring(METACONF_PREFIX.length());
Hive hive = Hive.get(ss.getConf());
hive.setMetaConf(propName, new VariableSubstitution(new HiveVariableSource() {
@Override
public Map<String, String> getHiveVariable() {
return SessionState.get().getHiveVariables();
}
- }).substitute(ss.getConf(), varvalue));
+ }).substitute(ss.getConf(), varValue));
} else {
- nonErrorMessage = setConf(varname, varname, varvalue, true);
- if (varname.equals(HiveConf.ConfVars.HIVE_SESSION_HISTORY_ENABLED.toString())) {
- SessionState.get().updateHistory(Boolean.parseBoolean(varvalue), ss);
+ nonErrorMessage = setConf(varName, varName, varValue, true);
+ if (varName.equals(HiveConf.ConfVars.HIVE_SESSION_HISTORY_ENABLED.toString())) {
+ SessionState.get().updateHistory(Boolean.parseBoolean(varValue), ss);
}
}
return new CommandProcessorResponse(null, nonErrorMessage);
}
- static String setConf(String varname, String key, String varvalue, boolean register)
+ static String setConf(String varName, String key, String varValue, boolean register)
throws IllegalArgumentException {
- return setConf(SessionState.get(), varname, key, varvalue, register);
+ return setConf(SessionState.get(), varName, key, varValue, register);
}
/**
* @return A console message that is not strong enough to fail the command (e.g. deprecation).
*/
- static String setConf(SessionState ss, String varname, String key, String varvalue, boolean register)
+ static String setConf(SessionState ss, String varName, String key, String varValue, boolean register)
throws IllegalArgumentException {
String result = null;
HiveConf conf = ss.getConf();
@@ -232,13 +232,13 @@ public class SetProcessor implements CommandProcessor {
public Map<String, String> getHiveVariable() {
return ss.getHiveVariables();
}
- }).substitute(conf, varvalue);
+ }).substitute(conf, varValue);
if (conf.getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) {
HiveConf.ConfVars confVars = HiveConf.getConfVars(key);
if (confVars != null) {
if (!confVars.isType(value)) {
StringBuilder message = new StringBuilder();
- message.append("'SET ").append(varname).append('=').append(varvalue);
+ message.append("'SET ").append(varName).append('=').append(varValue);
message.append("' FAILED because ").append(key).append(" expects ");
message.append(confVars.typeString()).append(" type value.");
throw new IllegalArgumentException(message.toString());
@@ -246,7 +246,7 @@ public class SetProcessor implements CommandProcessor {
String fail = confVars.validate(value);
if (fail != null) {
StringBuilder message = new StringBuilder();
- message.append("'SET ").append(varname).append('=').append(varvalue);
+ message.append("'SET ").append(varName).append('=').append(varValue);
message.append("' FAILED in validation : ").append(fail).append('.');
throw new IllegalArgumentException(message.toString());
}
@@ -285,14 +285,14 @@ public class SetProcessor implements CommandProcessor {
return sortedEnvMap;
}
- private CommandProcessorResponse getVariable(String varname) throws Exception {
+ private CommandProcessorResponse getVariable(String varName) throws Exception {
SessionState ss = SessionState.get();
- if (varname.equals("silent")){
+ if (varName.equals("silent")){
ss.out.println("silent" + "=" + ss.getIsSilent());
return new CommandProcessorResponse(getSchema(), null);
}
- if (varname.startsWith(SYSTEM_PREFIX)) {
- String propName = varname.substring(SYSTEM_PREFIX.length());
+ if (varName.startsWith(SYSTEM_PREFIX)) {
+ String propName = varName.substring(SYSTEM_PREFIX.length());
String result = System.getProperty(propName);
if (result != null) {
if(isHidden(propName)) {
@@ -305,8 +305,8 @@ public class SetProcessor implements CommandProcessor {
ss.out.println(propName + " is undefined as a system property");
throw new CommandProcessorException(1);
}
- } else if (varname.indexOf(ENV_PREFIX) == 0) {
- String var = varname.substring(ENV_PREFIX.length());
+ } else if (varName.indexOf(ENV_PREFIX) == 0) {
+ String var = varName.substring(ENV_PREFIX.length());
if (System.getenv(var) != null) {
if(isHidden(var)) {
ss.out.println(ENV_PREFIX + var + " is a hidden config");
@@ -315,11 +315,11 @@ public class SetProcessor implements CommandProcessor {
}
return new CommandProcessorResponse(getSchema(), null);
} else {
- ss.out.println(varname + " is undefined as an environmental variable");
+ ss.out.println(varName + " is undefined as an environmental variable");
throw new CommandProcessorException(1);
}
- } else if (varname.indexOf(HIVECONF_PREFIX) == 0) {
- String var = varname.substring(HIVECONF_PREFIX.length());
+ } else if (varName.indexOf(HIVECONF_PREFIX) == 0) {
+ String var = varName.substring(HIVECONF_PREFIX.length());
if (ss.getConf().isHiddenConfig(var)) {
ss.out.println(HIVECONF_PREFIX + var + " is a hidden config");
return new CommandProcessorResponse(getSchema(), null);
@@ -327,31 +327,31 @@ public class SetProcessor implements CommandProcessor {
ss.out.println(HIVECONF_PREFIX + var + "=" + ss.getConf().get(var));
return new CommandProcessorResponse(getSchema(), null);
} else {
- ss.out.println(varname + " is undefined as a hive configuration variable");
+ ss.out.println(varName + " is undefined as a hive configuration variable");
throw new CommandProcessorException(1);
}
- } else if (varname.indexOf(HIVEVAR_PREFIX) == 0) {
- String var = varname.substring(HIVEVAR_PREFIX.length());
+ } else if (varName.indexOf(HIVEVAR_PREFIX) == 0) {
+ String var = varName.substring(HIVEVAR_PREFIX.length());
if (ss.getHiveVariables().get(var) != null) {
ss.out.println(HIVEVAR_PREFIX + var + "=" + ss.getHiveVariables().get(var));
return new CommandProcessorResponse(getSchema(), null);
} else {
- ss.out.println(varname + " is undefined as a hive variable");
+ ss.out.println(varName + " is undefined as a hive variable");
throw new CommandProcessorException(1);
}
- } else if (varname.indexOf(METACONF_PREFIX) == 0) {
- String var = varname.substring(METACONF_PREFIX.length());
+ } else if (varName.indexOf(METACONF_PREFIX) == 0) {
+ String var = varName.substring(METACONF_PREFIX.length());
Hive hive = Hive.get(ss.getConf());
String value = hive.getMetaConf(var);
if (value != null) {
ss.out.println(METACONF_PREFIX + var + "=" + value);
return new CommandProcessorResponse(getSchema(), null);
} else {
- ss.out.println(varname + " is undefined as a hive meta variable");
+ ss.out.println(varName + " is undefined as a hive meta variable");
throw new CommandProcessorException(1);
}
} else {
- dumpOption(varname);
+ dumpOption(varName);
return new CommandProcessorResponse(getSchema(), null);
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java b/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java
index 3cbaa60bdf1..d3e6a4722c2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java
@@ -80,7 +80,7 @@ public class ScheduledQueryExecutionService implements Closeable {
synchronized (ScheduledQueryExecutionService.class) {
if (INSTANCE != null) {
throw new IllegalStateException(
- "There is already a ScheduledQueryExecutionService in service; check it and close it explicitly if neccessary");
+ "There is already a ScheduledQueryExecutionService in service; check it and close it explicitly if necessary");
}
INSTANCE = new ScheduledQueryExecutionService(ctx);
return INSTANCE;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/secrets/SecretSource.java b/ql/src/java/org/apache/hadoop/hive/ql/secrets/SecretSource.java
index ed2d887f1e9..357e9ddce80 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/secrets/SecretSource.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/secrets/SecretSource.java
@@ -24,7 +24,7 @@ import java.net.URI;
/**
* Interface representing source of a secret using an uri.
* The URI scheme is used to match an URI to an implementation scheme. The implementations are discovered and loaded
- * using java service loader. Currenty there isn't a way to initialize or reset a SecretSource after construction.
+ * using java service loader. Currently, there isn't a way to initialize or reset a SecretSource after construction.
*
* The secret source is expected to be thread-safe.
*/
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index 32c0891d3f9..620022eda12 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -1092,7 +1092,7 @@ public class SessionState implements ISessionAuthState{
public static Registry getRegistryForWrite() {
Registry registry = getRegistry();
if (registry == null) {
- throw new RuntimeException("Function registery for session is not initialized");
+ throw new RuntimeException("Function registry for session is not initialized");
}
return registry;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
index 145bdd613d1..723f64f594c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
@@ -76,11 +76,11 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
* StatsNoJobTask is used in cases where stats collection is the only task for the given query (no
* parent MR or Tez job). It is used in the following cases 1) ANALYZE with noscan for
* file formats that implement StatsProvidingRecordReader interface: ORC format (implements
- * StatsProvidingRecordReader) stores column statistics for all columns in the file footer. Its much
+ * StatsProvidingRecordReader) stores column statistics for all columns in the file footer. It's much
* faster to compute the table/partition statistics by reading the footer than scanning all the
* rows. This task can be used for computing basic stats like numFiles, numRows, fileSize,
* rawDataSize from ORC footer.
- * However, this cannot be used for full ACID tables, since some of the files may contain updates
+ * However, this cannot be used for full ACID tables, since some files may contain updates
* and deletes to existing rows, so summing up the per-file row counts is invalid.
**/
public class BasicStatsNoJobTask implements IStatsProcessor {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
index c04d5c80322..965d107fdde 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
@@ -167,7 +167,7 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
}
// The collectable stats for the aggregator needs to be cleared.
- // For eg. if a file is being loaded, the old number of rows are not valid
+ // For example, if a file is being loaded, the old number of rows are not valid
// XXX: makes no sense for me... possibly not needed anymore
if (work.isClearAggregatorStats()) {
// we choose to keep the invalid stats and only change the setting.
@@ -492,7 +492,7 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
if (!table.isPartitioned()) {
return null;
}
- // get all partitions that matches with the partition spec
+ // get all partitions that match with the partition spec
return tblSpec.partitions != null ? unmodifiableList(tblSpec.partitions) : emptyList();
} else if (work.getLoadTableDesc() != null) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
index 54916bd3192..152a2f2437e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
@@ -141,9 +141,9 @@ public abstract class Partish {
private Partition partition;
// FIXME: possibly the distinction between table/partition is not need; however it was like this before....will change it later
- public PPart(Table table, Partition partiton) {
+ public PPart(Table table, Partition partition) {
this.table = table;
- partition = partiton;
+ this.partition = partition;
}
@Override
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/estimator/StatEstimator.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/estimator/StatEstimator.java
index d1fc3f27105..94aaa32ecfc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/estimator/StatEstimator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/estimator/StatEstimator.java
@@ -31,7 +31,7 @@ public interface StatEstimator {
/**
* Computes the output statistics of the actual UDF.
*
- * The estimator should return with a prefereably overestimated {@link ColStatistics} object if possible.
+ * The estimator should return with a preferably overestimated {@link ColStatistics} object if possible.
* The actual estimation logic may decide to not give an estimation; it should return with {@link Optional#empty()}.
*
* Note: at the time of the call there will be {@link ColStatistics} for all the arguments; if that is not available - the estimation is skipped.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
index b1e690f16d8..247164cc8a1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
@@ -294,7 +294,7 @@ public class Worker extends RemoteCompactorThread implements MetaStoreThread {
LOG.warn("A timed out copmaction pool entry ({}) is picked up by one of the default compaction pool workers.", ci);
}
if (StringUtils.isNotBlank(getPoolName()) && StringUtils.isNotBlank(ci.poolName) && !getPoolName().equals(ci.poolName)) {
- LOG.warn("The returned compaction request ({}) belong to a different pool. Altough the worker is assigned to the {} pool," +
+ LOG.warn("The returned compaction request ({}) belong to a different pool. Although the worker is assigned to the {} pool," +
" it will process the request.", ci, getPoolName());
}
checkInterrupt();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java
index 856556b8a29..df8275debe5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java
@@ -88,7 +88,7 @@ public class UDFConv extends UDF {
* @param radix
* must be between MIN_RADIX and MAX_RADIX
* @param fromPos
- * is the first element that should be conisdered
+ * is the first element that should be considered
* @return the result should be treated as an unsigned 64-bit integer.
*/
private long encode(int radix, int fromPos) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java
index bf012ddd037..ecd466f646d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java
@@ -61,7 +61,7 @@ public abstract class TableFunctionResolver {
/*
* - called during translation.
* - invokes createEvaluator which must be implemented by a subclass
- * - sets up the evaluator with references to the TableDef, PartitionClass, PartitionMemsize and
+ * - sets up the evaluator with references to the TableDef, PartitionClass, PartitionMemSize and
* the transformsRawInput boolean.
*/
public void initialize(HiveConf cfg, PTFDesc ptfDesc, PartitionedTableFunctionDef tDef)
@@ -198,7 +198,7 @@ public abstract class TableFunctionResolver {
/**
* Provide referenced columns names to be used in partition function
*
- * @return null for unknown (will get all columns from table including virtual columns)
+ * @return null for unknown (will get all columns from table including virtual columns)
* @throws SemanticException
*/
public List<String> getReferencedColumns() throws SemanticException {