You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ab...@apache.org on 2019/09/30 11:23:03 UTC
[hive] branch master updated: HIVE-22191: Simplify SemanticAnalyzer by removing unused code (Akos Dombi via László Bodor)
This is an automated email from the ASF dual-hosted git repository.
abstractdog pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 7429430 HIVE-22191: Simplify SemanticAnalyzer by removing unused code (Akos Dombi via László Bodor)
7429430 is described below
commit 74294307c90b8f0b0016a9d16381dd30345de31d
Author: Akos Dombi <do...@gmail.com>
AuthorDate: Mon Sep 30 12:58:01 2019 +0200
HIVE-22191: Simplify SemanticAnalyzer by removing unused code (Akos Dombi via László Bodor)
Signed-off-by: Laszlo Bodor <bo...@gmail.com>
---
.../hadoop/hive/ql/parse/CalcitePlanner.java | 23 +-
.../hadoop/hive/ql/parse/SemanticAnalyzer.java | 879 +++++++--------------
2 files changed, 284 insertions(+), 618 deletions(-)
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 20ec058..60cd715 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -354,12 +354,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
@SuppressWarnings("nls")
public void analyzeInternal(ASTNode ast) throws SemanticException {
if (runCBO) {
- super.analyzeInternal(ast, new PlannerContextFactory() {
- @Override
- public PlannerContext create() {
- return new PreCboCtx();
- }
- });
+ super.analyzeInternal(ast, PreCboCtx::new);
} else {
super.analyzeInternal(ast);
}
@@ -1786,8 +1781,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
try {
calciteGenPlan = genLogicalPlan(getQB(), true, null, null);
// if it is to create view, we do not use table alias
- resultSchema = SemanticAnalyzer.convertRowSchemaToResultSetSchema(
- relToHiveRR.get(calciteGenPlan),
+ resultSchema = convertRowSchemaToResultSetSchema(relToHiveRR.get(calciteGenPlan),
getQB().isView() || getQB().isMaterializedView() ? false : HiveConf.getBoolVar(conf,
HiveConf.ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES));
} catch (SemanticException e) {
@@ -2927,8 +2921,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
fields.get(i).getFieldName(),
TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()),
tableAlias, false);
- colInfo.setSkewedCol((SemanticAnalyzer.isSkewedCol(tableAlias, qb, colName)) ? true
- : false);
+ colInfo.setSkewedCol(isSkewedCol(tableAlias, qb, colName));
rr.put(tableAlias, colName, colInfo);
cInfoLst.add(colInfo);
}
@@ -3283,7 +3276,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
// Output types. They will be the concatenation of the input refs types and
// the types of the expressions for the lateral view generated rows
// Generate all expressions from lateral view
- ExprNodeDesc valuesExpr = genExprNodeDesc(valuesClause, inputRR, false);
+ ExprNodeDesc valuesExpr = genExprNodeDesc(valuesClause, inputRR, false, false);
RexCall convertedOriginalValuesExpr = (RexCall) new RexNodeConverter(this.cluster, inputRel.getRowType(),
inputPosMap, 0, false).convert(valuesExpr);
RelDataType valuesRowType = ((ArraySqlType) convertedOriginalValuesExpr.getType()).getComponentType();
@@ -3762,7 +3755,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
// As we said before, here we use genSelectLogicalPlan to rewrite AllColRef
srcRel = genSelectLogicalPlan(qb, srcRel, srcRel, null, null, true).getKey();
RowResolver rr = this.relToHiveRR.get(srcRel);
- qbp.setSelExprForClause(detsClauseName, SemanticAnalyzer.genSelectDIAST(rr));
+ qbp.setSelExprForClause(detsClauseName, genSelectDIAST(rr));
}
}
@@ -4610,7 +4603,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
} else {
// 6.3 Get rid of TOK_SELEXPR
expr = (ASTNode) child.getChild(0);
- String[] colRef = SemanticAnalyzer.getColAlias(child, getAutogenColAliasPrfxLbl(),
+ String[] colRef = getColAlias(child, getAutogenColAliasPrfxLbl(),
inputRR, autogenColAliasPrfxIncludeFuncName(), i);
tabAlias = colRef[0];
colAlias = colRef[1];
@@ -4647,7 +4640,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
} else if (expr.getType() == HiveParser.TOK_TABLE_OR_COL
&& !hasAsClause
&& !inputRR.getIsExprResolver()
- && SemanticAnalyzer.isRegex(
+ && isRegex(
unescapeIdentifier(expr.getChild(0).getText()), conf)) {
// In case the expression is a regex COL.
// This can only happen without AS clause
@@ -4660,7 +4653,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
.getChild(0).getText().toLowerCase()))
&& !hasAsClause
&& !inputRR.getIsExprResolver()
- && SemanticAnalyzer.isRegex(
+ && isRegex(
unescapeIdentifier(expr.getChild(1).getText()), conf)) {
// In case the expression is TABLE.COL (col can be regex).
// This can only happen without AS clause
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 3313766..4a54dae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.ql.parse;
+import static java.util.Objects.nonNull;
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS;
import com.google.common.collect.ArrayListMultimap;
@@ -46,10 +47,12 @@ import java.util.SortedSet;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.UUID;
+import java.util.function.Supplier;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
import java.util.stream.Collectors;
+import com.google.common.collect.Lists;
import org.antlr.runtime.ClassicToken;
import org.antlr.runtime.CommonToken;
import org.antlr.runtime.Token;
@@ -57,8 +60,6 @@ import org.antlr.runtime.TokenRewriteStream;
import org.antlr.runtime.tree.Tree;
import org.antlr.runtime.tree.TreeVisitor;
import org.antlr.runtime.tree.TreeVisitorAction;
-import org.antlr.runtime.tree.TreeWizard;
-import org.antlr.runtime.tree.TreeWizard.ContextVisitor;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.util.ImmutableBitSet;
import org.apache.commons.collections.CollectionUtils;
@@ -340,7 +341,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
protected Map<String, PrunedPartitionList> prunedPartitions;
protected List<FieldSchema> resultSchema;
protected CreateViewDesc createVwDesc;
- private MaterializedViewUpdateDesc materializedViewUpdateDesc;
+ private MaterializedViewUpdateDesc materializedViewUpdateDesc;
private List<String> viewsExpanded;
protected ASTNode viewSelect;
protected final UnparseTranslator unparseTranslator;
@@ -398,7 +399,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
Map<String, Table> tabNameToTabObject;
// The tokens we should ignore when we are trying to do table masking.
- private final Set<Integer> ignoredTokens = Sets.newHashSet(HiveParser.TOK_GROUPBY,
+ private static final Set<Integer> IGNORED_TOKENS = Sets.newHashSet(HiveParser.TOK_GROUPBY,
HiveParser.TOK_ORDERBY, HiveParser.TOK_WINDOWSPEC, HiveParser.TOK_CLUSTERBY,
HiveParser.TOK_DISTRIBUTEBY, HiveParser.TOK_SORTBY);
@@ -552,7 +553,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return ctx.getOpContext();
}
- static String genPartValueString(String partColType, String partVal) throws SemanticException {
+ static String genPartValueString(String partColType, String partVal) {
String returnVal = partVal;
if (partColType.equals(serdeConstants.STRING_TYPE_NAME) ||
partColType.contains(serdeConstants.VARCHAR_TYPE_NAME) ||
@@ -689,10 +690,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return false;
}
ASTNode destNode = qbp.getDestForClause(dest);
- if(destNode != null && destNode.getType() == HiveParser.TOK_TAB) {
- return true;
- }
- return false;
+ return destNode != null && destNode.getType() == HiveParser.TOK_TAB;
}
/**
@@ -733,7 +731,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
* @return List of default constraints (including NULL if there is no default)
* @throws SemanticException
*/
- private static List<String> getDefaultConstraints(Table tbl, List<String> targetSchema) throws SemanticException{
+ private List<String> getDefaultConstraints(Table tbl, List<String> targetSchema) throws SemanticException{
Map<String, String> colNameToDefaultVal = getColNameToDefaultValueMap(tbl);
List<String> defaultConstraints = new ArrayList<>();
if(targetSchema != null) {
@@ -749,7 +747,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return defaultConstraints;
}
- protected static Map<String, String> getColNameToDefaultValueMap(Table tbl) throws SemanticException {
+ protected Map<String, String> getColNameToDefaultValueMap(Table tbl) throws SemanticException {
Map<String, String> colNameToDefaultVal = null;
try {
DefaultConstraint dc = Hive.get().getEnabledDefaultConstraints(tbl.getDbName(), tbl.getTableName());
@@ -810,7 +808,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// replace the node in place
selectExpr.replaceChildren(0, 0, newNode);
if (LOG.isDebugEnabled()) {
- LOG.debug("DEFAULT keyword replacement - Inserted " + newNode.getText() + " for table: " + targetTable.getTableName());
+ LOG.debug("DEFAULT keyword replacement - Inserted {} for table: {}", newNode.getText(),
+ targetTable.getTableName());
}
}
}
@@ -838,9 +837,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
ASTNode newNode = getNodeReplacementforDefault(defaultConstraints.get(j-1));
// replace the node in place
valueClause.replaceChildren(j, j, newNode);
- if (LOG.isDebugEnabled()) {
- LOG.debug("DEFAULT keyword replacement - Inserted " + newNode.getText() + " for table: " + targetTable.getTableName());
- }
+ LOG.debug("DEFAULT keyword replacement - Inserted {} for table: {}", newNode.getText(),
+ targetTable.getTableName());
}
}
}
@@ -945,8 +943,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
}
- private List<ASTNode> doPhase1GetDistinctFuncExprs(
- Map<String, ASTNode> aggregationTrees) throws SemanticException {
+ private List<ASTNode> doPhase1GetDistinctFuncExprs(Map<String, ASTNode> aggregationTrees) {
List<ASTNode> exprs = new ArrayList<ASTNode>();
for (Map.Entry<String, ASTNode> entry : aggregationTrees.entrySet()) {
ASTNode value = entry.getValue();
@@ -1092,7 +1089,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
SplitSample sample;
if (type.getType() == HiveParser.TOK_PERCENT) {
assertCombineInputFormat(numerator, "Percentage");
- Double percent = Double.valueOf(value).doubleValue();
+ double percent = Double.valueOf(value);
if (percent < 0 || percent > 100) {
throw new SemanticException(generateErrorMessage((ASTNode) numerator,
"Sampling percentage should be between 0 and 100"));
@@ -1334,8 +1331,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
List<Task<?>> cteLeafs = new ArrayList<>();
List<Task<?>> curTopRoots = null;
List<Task<?>> curBottomLeafs = null;
- for (int i = 0; i < execution.size(); i++) {
- CTEClause current = execution.get(i);
+ for (CTEClause current : execution) {
if (current.parents.isEmpty() && curTopRoots != null) {
cteRoots.addAll(curTopRoots);
cteLeafs.addAll(curBottomLeafs);
@@ -1409,7 +1405,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
table.setMaterializedTable(true);
- LOG.info(cteName + " will be materialized into " + location);
+ LOG.info("{} will be materialized into {}", cteName, location);
cte.source = analyzer;
ctx.addMaterializedTable(cteName, table);
@@ -1419,15 +1415,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
static boolean isJoinToken(ASTNode node) {
- if ((node.getToken().getType() == HiveParser.TOK_JOIN)
+ return (node.getToken().getType() == HiveParser.TOK_JOIN)
|| (node.getToken().getType() == HiveParser.TOK_CROSSJOIN)
|| isOuterJoinToken(node)
|| (node.getToken().getType() == HiveParser.TOK_LEFTSEMIJOIN)
- || (node.getToken().getType() == HiveParser.TOK_UNIQUEJOIN)) {
- return true;
- }
-
- return false;
+ || (node.getToken().getType() == HiveParser.TOK_UNIQUEJOIN);
}
static private boolean isOuterJoinToken(ASTNode node) {
@@ -1837,7 +1829,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
phase1Result = false;
skipRecursion = true;
LOG.info("Partition already exists so insert into overwrite " +
- "skipped for partition : " + parMetaData.toString());
+ "skipped for partition : {}", parMetaData);
break;
}
} catch (HiveException e) {
@@ -1869,8 +1861,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
int child_count = ast.getChildCount();
for (int child_pos = 0; child_pos < child_count && phase1Result; ++child_pos) {
// Recurse
- phase1Result = phase1Result && doPhase1(
- (ASTNode)ast.getChild(child_pos), qb, ctx_1, plannerCtx);
+ phase1Result = doPhase1((ASTNode) ast.getChild(child_pos), qb, ctx_1, plannerCtx);
}
}
return phase1Result;
@@ -1879,9 +1870,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
private int processQueryHint(ASTNode ast, QBParseInfo qbp, int posn) throws SemanticException{
ParseDriver pd = new ParseDriver();
String queryHintStr = ast.getText();
- if (LOG.isDebugEnabled()) {
- LOG.debug("QUERY HINT: "+queryHintStr);
- }
+ LOG.debug("QUERY HINT: {} ", queryHintStr);
try {
ASTNode hintNode = pd.parseHint(queryHintStr);
qbp.setHints(hintNode);
@@ -1910,8 +1899,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
String fullTableName = getUnescapedName((ASTNode) ast.getChild(0).getChild(0),
SessionState.get().getCurrentDatabase());
qbp.setDestSchemaForClause(ctx_1.dest, targetColNames);
- Set<String> targetColumns = new HashSet<String>();
- targetColumns.addAll(targetColNames);
+ Set<String> targetColumns = new HashSet<>(targetColNames);
if(targetColNames.size() != targetColumns.size()) {
throw new SemanticException(generateErrorMessage(tabColName,
"Duplicate column name detected in " + fullTableName + " table schema specification"));
@@ -2575,7 +2563,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
(viewMask.isEnabled() && analyzeRewrite == null)) {
viewTree = rewriteASTWithMaskAndFilter(viewMask, viewTree,
ctx.getViewTokenRewriteStream(viewFullyQualifiedName),
- ctx, db, tabNameToTabObject, ignoredTokens);
+ ctx, db, tabNameToTabObject);
}
Dispatcher nodeOriginDispatcher = new Dispatcher() {
@Override
@@ -2786,7 +2774,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
private void populateAliases(List<String> leftAliases,
List<String> rightAliases, ASTNode condn, QBJoinTree joinTree,
- List<String> leftSrc) throws SemanticException {
+ List<String> leftSrc) {
if ((leftAliases.size() != 0) && (rightAliases.size() != 0)) {
joinTree.addPostJoinFilter(condn);
return;
@@ -2821,7 +2809,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
List<String> leftCondAl1,
List<String> leftCondAl2,
List<String> rightCondAl1,
- List<String> rightCondAl2) throws SemanticException {
+ List<String> rightCondAl2) {
if (leftCondAl1.size() != 0) {
if ((rightCondAl1.size() != 0)
|| ((rightCondAl1.size() == 0) && (rightCondAl2.size() == 0))) {
@@ -3102,14 +3090,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
@SuppressWarnings("rawtypes")
- private void extractJoinCondsFromWhereClause(QBJoinTree joinTree, QB qb, String dest, ASTNode predicate,
- Map<String, Operator> aliasToOpInfo) throws SemanticException {
+ private void extractJoinCondsFromWhereClause(QBJoinTree joinTree, ASTNode predicate,
+ Map<String, Operator> aliasToOpInfo) {
switch (predicate.getType()) {
case HiveParser.KW_AND:
- extractJoinCondsFromWhereClause(joinTree, qb, dest,
+ extractJoinCondsFromWhereClause(joinTree,
(ASTNode) predicate.getChild(0), aliasToOpInfo);
- extractJoinCondsFromWhereClause(joinTree, qb, dest,
+ extractJoinCondsFromWhereClause(joinTree,
(ASTNode) predicate.getChild(1), aliasToOpInfo);
break;
case HiveParser.EQUAL_NS:
@@ -3214,8 +3202,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return output;
}
- protected static ASTNode rewriteGroupingFunctionAST(final List<ASTNode> grpByAstExprs, ASTNode targetNode,
- final boolean noneSet) throws SemanticException {
+ protected ASTNode rewriteGroupingFunctionAST(final List<ASTNode> grpByAstExprs, ASTNode targetNode,
+ final boolean noneSet) {
TreeVisitorAction action = new TreeVisitorAction() {
@@ -3292,8 +3280,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
Phase1Ctx ctx_1 = initPhase1Ctx();
doPhase1(subQueryPredicate.getSubQueryAST(), qbSQ, ctx_1, null);
getMetaData(qbSQ);
- Operator op = genPlan(qbSQ);
- return op;
+ return genPlan(qbSQ);
}
@SuppressWarnings("nls")
@@ -3481,10 +3468,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
ctx.getPlanMapper().link(condn, output);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Created Filter Plan for " + qb.getId() + " row schema: "
- + inputRR.toString());
- }
+ LOG.debug("Created Filter Plan for {} row schema: {}", qb.getId(), inputRR.toString());
return output;
}
@@ -3560,17 +3544,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild(filterDesc,
new RowSchema(inputRR.getColumnInfos()), input), inputRR);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Created Filter Plan for " + qb.getId() + " row schema: "
- + inputRR.toString());
- }
+ LOG.debug("Created Filter Plan for {} row schema: {}", qb.getId(), inputRR);
return output;
}
@SuppressWarnings("nls")
// TODO: make aliases unique, otherwise needless rewriting takes place
Integer genColListRegex(String colRegex, String tabAlias, ASTNode sel,
- List<ExprNodeDesc> col_list, Set<ColumnInfo> excludeCols, RowResolver input,
+ List<ExprNodeDesc> colList, Set<ColumnInfo> excludeCols, RowResolver input,
RowResolver colSrcRR, Integer pos, RowResolver output, List<String> aliases,
boolean ensureUniqueCols) throws SemanticException {
@@ -3641,7 +3622,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
if (oColInfo == null) {
ExprNodeColumnDesc expr = new ExprNodeColumnDesc(colInfo.getType(), name,
colInfo.getTabAlias(), colInfo.getIsVirtualCol(), colInfo.isSkewedCol());
- col_list.add(expr);
+ colList.add(expr);
oColInfo = new ColumnInfo(getColumnInternalName(pos), colInfo.getType(),
colInfo.getTabAlias(), colInfo.getIsVirtualCol(), colInfo.isHiddenVirtualCol());
inputColsProcessed.put(colInfo, oColInfo);
@@ -3655,7 +3636,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
} else {
output.put(tmp[0], tmp[1], oColInfo);
}
- pos = Integer.valueOf(pos.intValue() + 1);
+ pos++;
matched++;
if (unparseTranslator.isEnabled() || (tableMask.isEnabled() && analyzeRewrite == null)) {
@@ -3714,17 +3695,13 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
if (input != colSrcRR) {
colInfo = input.get(tabAlias, tmp[1]);
if (colInfo == null) {
- LOG.error("Cannot find colInfo for " + tabAlias + "." + tmp[1] + ", derived from ["
- + colSrcRR + "], in [" + input + "]");
+ LOG.error("Cannot find colInfo for {}.{}, derived from [{}], in [{}]", tabAlias, tmp[1], colSrcRR, input);
throw new SemanticException(ErrorMsg.NON_KEY_EXPR_IN_GROUPBY, tmp[1]);
}
- String oldCol = null;
- if (LOG.isDebugEnabled()) {
- oldCol = name + " => " + (tmp == null ? "null" : (tmp[0] + "." + tmp[1]));
- }
name = colInfo.getInternalName();
tmp = input.reverseLookup(name);
if (LOG.isDebugEnabled()) {
+ String oldCol = name + " => " + (tmp == null ? "null" : (tmp[0] + "." + tmp[1]));
String newCol = name + " => " + (tmp == null ? "null" : (tmp[0] + "." + tmp[1]));
LOG.debug("Translated [" + oldCol + "] to [" + newCol + "]");
}
@@ -3734,11 +3711,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
if (oColInfo == null) {
ExprNodeColumnDesc expr = new ExprNodeColumnDesc(colInfo.getType(), name,
colInfo.getTabAlias(), colInfo.getIsVirtualCol(), colInfo.isSkewedCol());
- col_list.add(expr);
+ colList.add(expr);
oColInfo = new ColumnInfo(getColumnInternalName(pos), colInfo.getType(),
colInfo.getTabAlias(), colInfo.getIsVirtualCol(), colInfo.isHiddenVirtualCol());
inputColsProcessed.put(colInfo, oColInfo);
}
+ assert nonNull(tmp);
if (ensureUniqueCols) {
if (!output.putWithCheck(tmp[0], tmp[1], null, oColInfo)) {
throw new CalciteSemanticException("Cannot add column to RR: " + tmp[0] + "." + tmp[1]
@@ -3748,7 +3726,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
} else {
output.put(tmp[0], tmp[1], oColInfo);
}
- pos = Integer.valueOf(pos.intValue() + 1);
+ pos++;
matched++;
if (unparseTranslator.isEnabled() || tableMask.isEnabled()) {
@@ -3788,10 +3766,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return (end == -1) ? "" : cmd.substring(end, cmd.length());
}
- private static int getPositionFromInternalName(String internalName) {
- return HiveConf.getPositionFromInternalName(internalName);
- }
-
private String fetchFilesNotInLocalFilesystem(String cmd) {
SessionState ss = SessionState.get();
String progName = getScriptProgName(cmd);
@@ -3801,15 +3775,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
Path p = new Path(filePath);
String fileName = p.getName();
String scriptArgs = getScriptArgs(cmd);
- String finalCmd = fileName + scriptArgs;
- return finalCmd;
+ return fileName + scriptArgs;
}
return cmd;
}
private TableDesc getTableDescFromSerDe(ASTNode child, String cols,
- String colTypes, boolean defaultCols) throws SemanticException {
+ String colTypes) throws SemanticException {
if (child.getType() == HiveParser.TOK_SERDENAME) {
String serdeName = unescapeSQLString(child.getChild(0).getText());
Class<? extends Deserializer> serdeClass = null;
@@ -3822,7 +3795,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
TableDesc tblDesc = PlanUtils.getTableDesc(serdeClass, Integer
- .toString(Utilities.tabCode), cols, colTypes, defaultCols);
+ .toString(Utilities.tabCode), cols, colTypes, false);
// copy all the properties
if (child.getChildCount() == 2) {
ASTNode prop = (ASTNode) ((ASTNode) child.getChild(1)).getChild(0);
@@ -3837,7 +3810,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return tblDesc;
} else if (child.getType() == HiveParser.TOK_SERDEPROPS) {
TableDesc tblDesc = PlanUtils.getDefaultTableDesc(Integer
- .toString(Utilities.ctrlaCode), cols, colTypes, defaultCols);
+ .toString(Utilities.ctrlaCode), cols, colTypes, false);
int numChildRowFormat = child.getChildCount();
for (int numC = 0; numC < numChildRowFormat; numC++) {
ASTNode rowChild = (ASTNode) child.getChild(numC);
@@ -4018,7 +3991,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
if (trfm.getChild(inputSerDeNum).getChildCount() > 0) {
inInfo = getTableDescFromSerDe((ASTNode) (((ASTNode) trfm
.getChild(inputSerDeNum))).getChild(0), inpColumns.toString(),
- inpColumnTypes.toString(), false);
+ inpColumnTypes.toString());
} else {
inInfo = PlanUtils.getTableDesc(serde, Integer
.toString(fieldSeparator), inpColumns.toString(), inpColumnTypes
@@ -4028,7 +4001,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
if (trfm.getChild(outputSerDeNum).getChildCount() > 0) {
outInfo = getTableDescFromSerDe((ASTNode) (((ASTNode) trfm
.getChild(outputSerDeNum))).getChild(0), columns.toString(),
- columnTypes.toString(), false);
+ columnTypes.toString());
// This is for backward compatibility. If the user did not specify the
// output column list, we assume that there are 2 columns: key and value.
// However, if the script outputs: col1, col2, col3 seperated by TAB, the
@@ -4205,7 +4178,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return bitmap | (1L << bitIdx);
}
- private static long unsetBit(long bitmap, int bitIdx) {
+ private long unsetBit(long bitmap, int bitIdx) {
return bitmap & ~(1L << bitIdx);
}
@@ -4353,7 +4326,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
* Returns whether the pattern is a regex expression (instead of a normal
* string). Normal string is a string with all alphabets/digits and "_".
*/
- static boolean isRegex(String pattern, HiveConf conf) {
+ boolean isRegex(String pattern, HiveConf conf) {
String qIdSupport = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUOTEDID_SUPPORT);
if ( "column".equals(qIdSupport)) {
return false;
@@ -4373,9 +4346,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
ASTNode selExprList = qb.getParseInfo().getSelForClause(dest);
Operator<?> op = genSelectPlan(dest, selExprList, qb, input, inputForSelectStar, false);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Created Select Plan for clause: " + dest);
- }
+ LOG.debug("Created Select Plan for clause: {}", dest);
return op;
}
@@ -4384,14 +4355,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
private Operator<?> genSelectPlan(String dest, ASTNode selExprList, QB qb, Operator<?> input,
Operator<?> inputForSelectStar, boolean outerLV) throws SemanticException {
- if (LOG.isDebugEnabled()) {
- LOG.debug("tree: " + selExprList.toStringTree());
- }
+ LOG.debug("tree: {}", selExprList.toStringTree());
- List<ExprNodeDesc> col_list = new ArrayList<ExprNodeDesc>();
+ List<ExprNodeDesc> colList = new ArrayList<ExprNodeDesc>();
RowResolver out_rwsch = new RowResolver();
ASTNode trfm = null;
- Integer pos = Integer.valueOf(0);
+ Integer pos = 0;
RowResolver inputRR = opParseCtx.get(input).getRowResolver();
RowResolver starRR = null;
if (inputForSelectStar != null && inputForSelectStar != input) {
@@ -4442,7 +4411,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
if (isUDTF && (selectStar = udtfExprType == HiveParser.TOK_FUNCTIONSTAR)) {
genColListRegex(".*", null, (ASTNode) udtfExpr.getChild(0),
- col_list, null, inputRR, starRR, pos, out_rwsch, qb.getAliases(), false);
+ colList, null, inputRR, starRR, pos, out_rwsch, qb.getAliases(), false);
}
}
@@ -4478,10 +4447,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
assert (false);
}
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("UDTF table alias is " + udtfTableAlias);
- LOG.debug("UDTF col aliases are " + udtfColAliases);
- }
+ LOG.debug("UDTF table alias is {}", udtfTableAlias);
+ LOG.debug("UDTF col aliases are {}", udtfColAliases);
}
// The list of expressions after SELECT or SELECT TRANSFORM.
@@ -4494,9 +4461,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
exprList = selExprList;
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("genSelectPlan: input = " + inputRR + " starRr = " + starRR);
- }
+ LOG.debug("genSelectPlan: input = {} starRr = {}", inputRR, starRR);
// For UDTF's, skip the function name to get the expressions
int startPosn = isUDTF ? posn + 1 : posn;
@@ -4508,9 +4473,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
|| !qb.getParseInfo().getDestGroupingSets().isEmpty()
|| !qb.getParseInfo().getDestCubes().isEmpty());
Set<String> colAliases = new HashSet<String>();
- ASTNode[] exprs = new ASTNode[exprList.getChildCount()];
- String[][] aliases = new String[exprList.getChildCount()][];
- boolean[] hasAsClauses = new boolean[exprList.getChildCount()];
int offset = 0;
// Iterate over all expression (either after SELECT, or in SELECT TRANSFORM)
for (int i = startPosn; i < exprList.getChildCount(); ++i) {
@@ -4551,9 +4513,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
.getChild(1));
}
}
- exprs[i] = expr;
- aliases[i] = new String[] {tabAlias, colAlias};
- hasAsClauses[i] = hasAsClause;
colAliases.add(colAlias);
// The real expression
@@ -4561,7 +4520,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
int initPos = pos;
pos = genColListRegex(".*", expr.getChildCount() == 0 ? null
: getUnescapedName((ASTNode) expr.getChild(0)).toLowerCase(),
- expr, col_list, null, inputRR, starRR, pos, out_rwsch, qb.getAliases(), false);
+ expr, colList, null, inputRR, starRR, pos, out_rwsch, qb.getAliases(), false);
if (unparseTranslator.isEnabled()) {
offset += pos - initPos - 1;
}
@@ -4573,7 +4532,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// This can only happen without AS clause
// We don't allow this for ExprResolver - the Group By case
pos = genColListRegex(unescapeIdentifier(expr.getChild(0).getText()),
- null, expr, col_list, null, inputRR, starRR, pos, out_rwsch, qb.getAliases(), false);
+ null, expr, colList, null, inputRR, starRR, pos, out_rwsch, qb.getAliases(), false);
} else if (expr.getType() == HiveParser.DOT
&& expr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL
&& inputRR.hasTableAlias(unescapeIdentifier(expr.getChild(0)
@@ -4585,7 +4544,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// We don't allow this for ExprResolver - the Group By case
pos = genColListRegex(unescapeIdentifier(expr.getChild(1).getText()),
unescapeIdentifier(expr.getChild(0).getChild(0).getText().toLowerCase()),
- expr, col_list, null, inputRR, starRR, pos, out_rwsch, qb.getAliases(), false);
+ expr, colList, null, inputRR, starRR, pos, out_rwsch, qb.getAliases(), false);
} else {
// Case when this is an expression
TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR, true, isCBOExecuted());
@@ -4604,12 +4563,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
out_rwsch.get(null, recommended) == null) {
colAlias = recommended;
}
- col_list.add(exp);
+ colList.add(exp);
ColumnInfo colInfo = new ColumnInfo(getColumnInternalName(pos),
exp.getWritableObjectInspector(), tabAlias, false);
- colInfo.setSkewedCol((exp instanceof ExprNodeColumnDesc) ? ((ExprNodeColumnDesc) exp)
- .isSkewedCol() : false);
+ colInfo.setSkewedCol((exp instanceof ExprNodeColumnDesc) && ((ExprNodeColumnDesc) exp)
+ .isSkewedCol());
out_rwsch.put(tabAlias, colAlias, colInfo);
if ( exp instanceof ExprNodeColumnDesc ) {
@@ -4620,23 +4579,23 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
}
- pos = Integer.valueOf(pos.intValue() + 1);
+ pos++;
}
}
selectStar = selectStar && exprList.getChildCount() == posn + 1;
- out_rwsch = handleInsertStatementSpec(col_list, dest, out_rwsch, qb, selExprList);
+ out_rwsch = handleInsertStatementSpec(colList, dest, out_rwsch, qb, selExprList);
List<String> columnNames = new ArrayList<String>();
Map<String, ExprNodeDesc> colExprMap = new HashMap<String, ExprNodeDesc>();
- for (int i = 0; i < col_list.size(); i++) {
+ for (int i = 0; i < colList.size(); i++) {
String outputCol = getColumnInternalName(i);
- colExprMap.put(outputCol, col_list.get(i));
+ colExprMap.put(outputCol, colList.get(i));
columnNames.add(outputCol);
}
Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild(
- new SelectDesc(col_list, columnNames, selectStar), new RowSchema(
+ new SelectDesc(colList, columnNames, selectStar), new RowSchema(
out_rwsch.getColumnInfos()), input), out_rwsch);
output.setColumnExprMap(colExprMap);
@@ -4648,15 +4607,13 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
output = genUDTFPlan(genericUDTF, udtfTableAlias, udtfColAliases, qb, output, outerLV);
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("Created Select Plan row schema: " + out_rwsch.toString());
- }
+ LOG.debug("Created Select Plan row schema: {}", out_rwsch);
return output;
}
private RowResolver getColForInsertStmtSpec(Map<String, ExprNodeDesc> targetCol2Projection, final Table target,
Map<String, ColumnInfo> targetCol2ColumnInfo, int colListPos,
- List<TypeInfo> targetTableColTypes, List<ExprNodeDesc> new_col_list,
+ List<TypeInfo> targetTableColTypes, List<ExprNodeDesc> newColList,
List<String> targetTableColNames)
throws SemanticException {
RowResolver newOutputRR = new RowResolver();
@@ -4670,7 +4627,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
String f = targetTableColNames.get(i);
if(targetCol2Projection.containsKey(f)) {
//put existing column in new list to make sure it is in the right position
- new_col_list.add(targetCol2Projection.get(f));
+ newColList.add(targetCol2Projection.get(f));
ColumnInfo ci = targetCol2ColumnInfo.get(f);
ci.setInternalName(getColumnInternalName(colListPos));
newOutputRR.put(ci.getTabAlias(), ci.getInternalName(), ci);
@@ -4691,12 +4648,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
throw new SemanticException("Error while parsing default value: " + defaultValue
+ ". Error message: " + e.getMessage());
}
- LOG.debug("Added default value from metastore: " + exp);
+ LOG.debug("Added default value from metastore: {}", exp);
}
else {
exp = new ExprNodeConstantDesc(targetTableColTypes.get(i), null);
}
- new_col_list.add(exp);
+ newColList.add(exp);
final String tableAlias = null;//this column doesn't come from any table
ColumnInfo colInfo = new ColumnInfo(getColumnInternalName(colListPos),
exp.getWritableObjectInspector(), tableAlias, false);
@@ -4755,7 +4712,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
throw new SemanticException(generateErrorMessage(selExprList,
"No table/partition found in QB metadata for dest='" + dest + "'"));
}
- List<ExprNodeDesc> new_col_list = new ArrayList<ExprNodeDesc>();
+ List<ExprNodeDesc> newColList = new ArrayList<ExprNodeDesc>();
colListPos = 0;
List<FieldSchema> targetTableCols = target != null ? target.getCols() : partition.getCols();
List<String> targetTableColNames = new ArrayList<String>();
@@ -4780,9 +4737,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
//where missing columns are NULL-filled
Table tbl = target == null? partition.getTable() : target;
RowResolver newOutputRR = getColForInsertStmtSpec(targetCol2Projection, tbl, targetCol2ColumnInfo, colListPos,
- targetTableColTypes, new_col_list, targetTableColNames);
+ targetTableColTypes, newColList, targetTableColNames);
col_list.clear();
- col_list.addAll(new_col_list);
+ col_list.addAll(newColList);
return newOutputRR;
}
@@ -4818,11 +4775,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
* Convert exprNodeDesc array to ObjectInspector array.
*/
static List<ObjectInspector> getWritableObjectInspector(List<ExprNodeDesc> exprs) {
- List<ObjectInspector> result = new ArrayList<ObjectInspector>();
- for (ExprNodeDesc expr : exprs) {
- result.add(expr.getWritableObjectInspector());
- }
- return result;
+ return exprs.stream().map(ExprNodeDesc::getWritableObjectInspector).collect(Collectors.toList());
}
/**
@@ -4938,7 +4891,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
if (Utilities.ReduceField.VALUE.toString().equals(terms[0])) {
- int pos = getPositionFromInternalName(terms[1]);
+ int pos = HiveConf.getPositionFromInternalName(terms[1]);
if (pos >= 0 && pos < reduceValues.size()) {
ExprNodeDesc reduceValue = reduceValues.get(pos);
if (reduceValue != null) {
@@ -4998,8 +4951,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
colExprMap.put(field, groupByKeys.get(groupByKeys.size() - 1));
}
// For each aggregation
- Map<String, ASTNode> aggregationTrees = parseInfo
- .getAggregationExprsForClause(dest);
+ Map<String, ASTNode> aggregationTrees = parseInfo.getAggregationExprsForClause(dest);
assert (aggregationTrees != null);
// get the last colName for the reduce KEY
// it represents the column name corresponding to distinct aggr, if any
@@ -5243,8 +5195,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
}
- Map<String, ASTNode> aggregationTrees = parseInfo
- .getAggregationExprsForClause(dest);
+ Map<String, ASTNode> aggregationTrees = parseInfo.getAggregationExprsForClause(dest);
// get the last colName for the reduce KEY
// it represents the column name corresponding to distinct aggr, if any
String lastKeyColName = null;
@@ -5289,7 +5240,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
String paraExpression = paraExprInfo.getInternalName();
assert (paraExpression != null);
- if (isDistinct && lastKeyColName != null) {
+ if (lastKeyColName != null) {
// if aggr is distinct, the parameter is name is constructed as
// KEY.lastKeyColName:<tag>._colx
paraExpression = Utilities.ReduceField.KEY.name() + "." +
@@ -5392,8 +5343,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
* (qb.getParseInfo().getXXX(dest)). The new GroupByOperator will be a child
* of the inputOperatorInfo.
*
- * @param mode
- * The mode of the aggregation (HASH)
* @param genericUDAFEvaluators
* If not null, this function will store the mapping from Aggregation
* StringTree to the genericUDAFEvaluator in this parameter, so it
@@ -5405,7 +5354,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
String dest,
List<ASTNode> grpByExprs,
Operator inputOperatorInfo,
- GroupByDesc.Mode mode,
Map<String, GenericUDAFEvaluator> genericUDAFEvaluators,
List<Long> groupingSetKeys,
boolean groupingSetsPresent) throws SemanticException {
@@ -5499,7 +5447,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
boolean isDistinct = value.getType() == HiveParser.TOK_FUNCTIONDI;
containsDistinctAggr = containsDistinctAggr || isDistinct;
boolean isAllColumns = value.getType() == HiveParser.TOK_FUNCTIONSTAR;
- Mode amode = groupByDescModeToUDAFMode(mode, isDistinct);
+ Mode amode = groupByDescModeToUDAFMode(GroupByDesc.Mode.HASH, isDistinct);
GenericUDAFEvaluator genericUDAFEvaluator = getGenericUDAFEvaluator(
aggName, aggParameters, value, isDistinct, isAllColumns);
@@ -5528,7 +5476,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
float minReductionHashAggr = HiveConf
.getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION);
Operator op = putOpInsertMap(OperatorFactory.getAndMakeChild(
- new GroupByDesc(mode, outputColumnNames, groupByKeys, aggregations,
+ new GroupByDesc(GroupByDesc.Mode.HASH, outputColumnNames, groupByKeys, aggregations,
false, groupByMemoryUsage, memoryThreshold, minReductionHashAggr,
groupingSetKeys, groupingSetsPresent, groupingSetsPosition, containsDistinctAggr),
new RowSchema(groupByOutputRowResolver.getColumnInfos()),
@@ -5647,8 +5595,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
List<ExprNodeDesc> reduceKeys = new ArrayList<ExprNodeDesc>();
- for (int i = 0; i < grpByExprs.size(); ++i) {
- ASTNode grpbyExpr = grpByExprs.get(i);
+ for (ASTNode grpbyExpr : grpByExprs) {
ExprNodeDesc inputExpr = genExprNodeDesc(grpbyExpr,
reduceSinkInputRowResolver);
ColumnInfo prev = reduceSinkOutputRowResolver.getExpression(grpbyExpr);
@@ -5658,7 +5605,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
reduceKeys.add(inputExpr);
outputKeyColumnNames.add(getColumnInternalName(reduceKeys.size() - 1));
- String field = Utilities.ReduceField.KEY.toString() + "."
+ String field = ReduceField.KEY.toString() + "."
+ getColumnInternalName(reduceKeys.size() - 1);
ColumnInfo colInfo = new ColumnInfo(field, reduceKeys.get(
reduceKeys.size() - 1).getTypeInfo(), null, false);
@@ -5730,8 +5677,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
private void getReduceValuesForReduceSinkNoMapAgg(QBParseInfo parseInfo, String dest,
- RowResolver reduceSinkInputRowResolver, RowResolver reduceSinkOutputRowResolver,
- List<String> outputValueColumnNames, List<ExprNodeDesc> reduceValues,
+ RowResolver reduceSinkInputRowResolver,
+ RowResolver reduceSinkOutputRowResolver,
+ List<String> outputValueColumnNames,
+ List<ExprNodeDesc> reduceValues,
Map<String, ExprNodeDesc> colExprMap) throws SemanticException {
Map<String, ASTNode> aggregationTrees = parseInfo.getAggregationExprsForClause(dest);
@@ -5962,8 +5911,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
* (parseInfo.getXXX(dest)). The new GroupByOperator will do the second
* aggregation based on the partial aggregation results.
*
- * @param mode
- * the mode of aggregation (FINAL)
* @param genericUDAFEvaluators
* The mapping from Aggregation StringTree to the
* genericUDAFEvaluator.
@@ -5974,7 +5921,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
private Operator genGroupByPlanGroupByOperator2MR(QBParseInfo parseInfo,
String dest,
Operator reduceSinkOperatorInfo2,
- GroupByDesc.Mode mode,
Map<String, GenericUDAFEvaluator> genericUDAFEvaluators,
boolean groupingSetsPresent) throws SemanticException {
@@ -6037,7 +5983,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
boolean isDistinct = value.getType() == HiveParser.TOK_FUNCTIONDI;
containsDistinctAggr = containsDistinctAggr || isDistinct;
- Mode amode = groupByDescModeToUDAFMode(mode, isDistinct);
+ Mode amode = groupByDescModeToUDAFMode(GroupByDesc.Mode.FINAL, isDistinct);
GenericUDAFEvaluator genericUDAFEvaluator = genericUDAFEvaluators
.get(entry.getKey());
assert (genericUDAFEvaluator != null);
@@ -6048,8 +5994,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
aggName.toLowerCase(),
udaf.genericUDAFEvaluator,
udaf.convertedParameters,
- (mode != GroupByDesc.Mode.FINAL && value.getToken().getType() ==
- HiveParser.TOK_FUNCTIONDI),
+ false,
amode));
String field = getColumnInternalName(groupByKeys.size()
+ aggregations.size() - 1);
@@ -6064,7 +6009,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
.getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION);
Operator op = putOpInsertMap(OperatorFactory.getAndMakeChild(
- new GroupByDesc(mode, outputColumnNames, groupByKeys, aggregations,
+ new GroupByDesc(GroupByDesc.Mode.FINAL, outputColumnNames, groupByKeys, aggregations,
false, groupByMemoryUsage, memoryThreshold, minReductionHashAggr, null, false,
groupingSetsPosition, containsDistinctAggr),
new RowSchema(groupByOutputRowResolver2.getColumnInfos()),
@@ -6187,9 +6132,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
List<ExprNodeDesc> expressions = new ArrayList<ExprNodeDesc>(2);
expressions.add(current);
expressions.add(previous);
- ExprNodeDesc orExpr =
- new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, or, expressions);
- previous = orExpr;
+ previous = new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, or, expressions);
} else {
// If an expression does not have a where clause, there can be no common filter
previous = null;
@@ -6338,7 +6281,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// ////// 4. Generate GroupbyOperator2
Operator groupByOperatorInfo2 = genGroupByPlanGroupByOperator2MR(parseInfo,
- dest, reduceSinkOperatorInfo2, GroupByDesc.Mode.FINAL,
+ dest, reduceSinkOperatorInfo2,
genericUDAFEvaluators, false);
return groupByOperatorInfo2;
@@ -6350,15 +6293,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return false;
}
- if (!qb.getParseInfo().getDistinctFuncExprsForClause(dest).isEmpty()) {
- return false;
- }
-
- return true;
+ return qb.getParseInfo().getDistinctFuncExprsForClause(dest).isEmpty();
}
- static private void extractColumns(Set<String> colNamesExprs,
- ExprNodeDesc exprNode) throws SemanticException {
+ private void extractColumns(Set<String> colNamesExprs, ExprNodeDesc exprNode) {
if (exprNode instanceof ExprNodeColumnDesc) {
colNamesExprs.add(((ExprNodeColumnDesc) exprNode).getColumn());
return;
@@ -6372,14 +6310,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
}
- static private boolean hasCommonElement(Set<String> set1, Set<String> set2) {
- for (String elem1 : set1) {
- if (set2.contains(elem1)) {
- return true;
- }
- }
-
- return false;
+ private boolean hasCommonElement(Set<String> set1, Set<String> set2) {
+ return set1.stream().anyMatch(set2::contains);
}
void checkExpressionsForGroupingSet(List<ASTNode> grpByExprs,
@@ -6506,9 +6438,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// Is the grouping sets data consumed in the current in MR job, or
// does it need an additional MR job
- boolean groupingSetsNeedAdditionalMRJob =
- groupingSetsPresent && groupingSets.size() > newMRJobGroupingSetsThreshold ?
- true : false;
+ boolean groupingSetsNeedAdditionalMRJob = groupingSetsPresent &&
+ groupingSets.size() > newMRJobGroupingSetsThreshold;
GroupByOperator groupByOperatorInfo =
(GroupByOperator) genGroupByPlanMapGroupByOperator(
@@ -6516,7 +6447,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
dest,
grpByExprs,
inputOperatorInfo,
- GroupByDesc.Mode.HASH,
genericUDAFEvaluators,
groupingSets,
groupingSetsPresent && !groupingSetsNeedAdditionalMRJob);
@@ -6587,7 +6517,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// ////// Generate GroupbyOperator3
return genGroupByPlanGroupByOperator2MR(parseInfo, dest,
- reduceSinkOperatorInfo2, GroupByDesc.Mode.FINAL,
+ reduceSinkOperatorInfo2,
genericUDAFEvaluators, groupingSetsPresent);
}
}
@@ -6681,7 +6611,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
new LinkedHashMap<String, GenericUDAFEvaluator>();
GroupByOperator groupByOperatorInfo =
(GroupByOperator) genGroupByPlanMapGroupByOperator(
- qb, dest, grpByExprs, inputOperatorInfo, GroupByDesc.Mode.HASH,
+ qb, dest, grpByExprs, inputOperatorInfo,
genericUDAFEvaluators, groupingSets, groupingSetsPresent);
groupOpToInputTables.put(groupByOperatorInfo, opParseCtx.get(
@@ -6722,7 +6652,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// ////// Generate GroupbyOperator3
return genGroupByPlanGroupByOperator2MR(parseInfo, dest,
- reduceSinkOperatorInfo2, GroupByDesc.Mode.FINAL,
+ reduceSinkOperatorInfo2,
genericUDAFEvaluators, groupingSetsPresent);
} else {
// If there are no grouping keys, grouping sets cannot be present
@@ -6741,7 +6671,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
groupingSetsPresent);
return genGroupByPlanGroupByOperator2MR(parseInfo, dest,
- reduceSinkOperatorInfo, GroupByDesc.Mode.FINAL, genericUDAFEvaluators, false);
+ reduceSinkOperatorInfo, genericUDAFEvaluators, false);
}
}
@@ -6863,8 +6793,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
if ((dest_tab.getSortCols() != null) &&
(dest_tab.getSortCols().size() > 0)) {
- sortCols = getSortCols(dest, qb, dest_tab, table_desc, input, true);
- sortOrders = getSortOrders(dest, qb, dest_tab, input);
+ sortCols = getSortCols(dest, qb, dest_tab, table_desc, input);
+ sortOrders = getSortOrders(dest_tab);
if (!enforceBucketing) {
throw new SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName()));
}
@@ -6887,7 +6817,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
int numBuckets = dest_tab.getNumBuckets();
if (numBuckets > maxReducers) {
- LOG.debug("numBuckets is {}", numBuckets, " and maxReducers is {}", maxReducers);
+ LOG.debug("numBuckets is {} and maxReducers is {}", numBuckets, maxReducers);
multiFileSpray = true;
totalFiles = numBuckets;
if (totalFiles % maxReducers == 0) {
@@ -6940,11 +6870,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
if (!enforceBucketing) {
throw new SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName()));
}
- else {
- if(!enforceBucketing) {
- partnColsNoConvert = getSortCols(dest, qb, dest_tab, table_desc, input, false);
- }
- }
enforceBucketing = true;
}
@@ -7172,7 +7097,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
private ImmutableBitSet getEnabledNotNullConstraints(Table tbl) throws HiveException{
- List<Boolean> nullConstraints = new ArrayList<>();
final NotNullConstraint nnc = Hive.get().getEnabledNotNullConstraints(
tbl.getDbName(), tbl.getTableName());
ImmutableBitSet bitSet = null;
@@ -7202,9 +7126,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
ExprNodeDesc colExpr = selectOp.getConf().getColList().get(0);
if(colExpr instanceof ExprNodeGenericFuncDesc) {
ExprNodeGenericFuncDesc func = (ExprNodeGenericFuncDesc)colExpr ;
- if(func.getGenericUDF() instanceof GenericUDFCardinalityViolation){
- return true;
- }
+ return func.getGenericUDF() instanceof GenericUDFCardinalityViolation;
}
}
}
@@ -7259,11 +7181,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
if (combinedConstraintExpr != null) {
ExprNodeDesc constraintUDF = TypeCheckProcFactory.DefaultExprProcessor.
getFuncExprNodeDesc("enforce_constraint", combinedConstraintExpr);
- Operator newConstraintFilter = putOpInsertMap(OperatorFactory.getAndMakeChild(
+ return putOpInsertMap(OperatorFactory.getAndMakeChild(
new FilterDesc(constraintUDF, false), new RowSchema(
inputRR.getColumnInfos()), input), inputRR);
-
- return newConstraintFilter;
}
return input;
}
@@ -7314,8 +7234,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return currUDF;
}
- private Path getDestinationFilePath(final String destinationFile, boolean isMmTable)
- throws SemanticException {
+ private Path getDestinationFilePath(final String destinationFile, boolean isMmTable) {
if (this.isResultsCacheEnabled() && this.queryTypeCanUseCache()) {
assert (!isMmTable);
QueryResultsCache instance = QueryResultsCache.getInstance();
@@ -7449,7 +7368,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
if (destTableIsFullAcid) {
acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest);
//todo: should this be done for MM? is it ok to use CombineHiveInputFormat with MM
- checkAcidConstraints(qb, tableDescriptor, destinationTable);
+ checkAcidConstraints();
}
try {
if (ctx.getExplainConfig() != null) {
@@ -7500,8 +7419,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
destinationTable.getFullyQualifiedName(), false, false, true);
}
- WriteEntity output = generateTableWriteEntity(
- dest, destinationTable, partSpec, ltd, dpCtx, isNonNativeTable);
+ WriteEntity output = generateTableWriteEntity(dest, destinationTable, partSpec, ltd, dpCtx);
ctx.getLoadTableOutputMap().put(ltd, output);
break;
}
@@ -7514,7 +7432,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
checkExternalTable(destinationTable);
- Path tabPath = destinationTable.getPath();
Path partPath = destinationPartition.getDataLocation();
checkImmutableTable(qb, destinationTable, partPath, true);
@@ -7573,7 +7490,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
if (destTableIsFullAcid) {
acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest);
//todo: should this be done for MM? is it ok to use CombineHiveInputFormat with MM?
- checkAcidConstraints(qb, tableDescriptor, destinationTable);
+ checkAcidConstraints();
}
try {
if (ctx.getExplainConfig() != null) {
@@ -7605,9 +7522,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
ltd.setLbCtx(lbCtx);
loadTableWork.add(ltd);
- if (!outputs.add(new WriteEntity(destinationPartition,
- determineWriteType(ltd, destinationTable.isNonNative(), dest)))) {
-
+ if (!outputs.add(new WriteEntity(destinationPartition, determineWriteType(ltd, dest)))) {
throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES
.getMsg(destinationTable.getTableName() + "@" + destinationPartition.getName()));
}
@@ -7833,7 +7748,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
if (destTableIsFullAcid) {
acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest);
//todo: should this be done for MM? is it ok to use CombineHiveInputFormat with MM
- checkAcidConstraints(qb, tableDescriptor, null);
+ checkAcidConstraints();
}
// isReplace = false in case concurrent operation is executed
ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx, acidOp, false, writeId);
@@ -7852,8 +7767,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
ltd.setLoadFileType(LoadFileType.KEEP_EXISTING);
}
ltd.setMdTable(destinationTable);
- WriteEntity output = generateTableWriteEntity(
- dest, destinationTable, dpCtx.getPartSpec(), ltd, dpCtx, isNonNativeTable);
+ WriteEntity output = generateTableWriteEntity(dest, destinationTable, dpCtx.getPartSpec(), ltd, dpCtx);
ctx.getLoadTableOutputMap().put(ltd, output);
} else {
// Create LFD even for MM CTAS - it's a no-op move, but it still seems to be used for stats.
@@ -7890,9 +7804,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
.getDeserializer(conf).getObjectInspector();
List<? extends StructField> fields = rowObjectInspector
.getAllStructFieldRefs();
- for (int i = 0; i < fields.size(); i++) {
- vecCol.add(new ColumnInfo(fields.get(i).getFieldName(), TypeInfoUtils
- .getTypeInfoFromObjectInspector(fields.get(i)
+ for (StructField field : fields) {
+ vecCol.add(new ColumnInfo(field.getFieldName(), TypeInfoUtils
+ .getTypeInfoFromObjectInspector(field
.getFieldObjectInspector()), "", false));
}
} catch (Exception e) {
@@ -7942,10 +7856,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
handleLineage(ltd, output);
setWriteIdForSurrogateKeys(ltd, input);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Created FileSink Plan for clause: " + dest + "dest_path: "
- + destinationPath + " row schema: " + inputRR.toString());
- }
+ LOG.debug("Created FileSink Plan for clause: {}dest_path: {} row schema: {}", dest, destinationPath, inputRR);
FileSinkOperator fso = (FileSinkOperator) output;
fso.getConf().setTable(destinationTable);
@@ -7958,16 +7869,16 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
&& !destTableIsTemporary && !destTableIsMaterialization
&& ColumnStatsAutoGatherContext.canRunAutogatherStats(fso)) {
if (destType == QBMetaData.DEST_TABLE) {
- genAutoColumnStatsGatheringPipeline(qb, destinationTable, partSpec, input,
+ genAutoColumnStatsGatheringPipeline(destinationTable, partSpec, input,
qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName()),
false);
} else if (destType == QBMetaData.DEST_PARTITION) {
- genAutoColumnStatsGatheringPipeline(qb, destinationTable, destinationPartition.getSpec(), input,
+ genAutoColumnStatsGatheringPipeline(destinationTable, destinationPartition.getSpec(), input,
qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName()),
false);
} else if (destType == QBMetaData.DEST_LOCAL_FILE || destType == QBMetaData.DEST_DFS_FILE) {
// CTAS or CMV statement
- genAutoColumnStatsGatheringPipeline(qb, destinationTable, null, input,
+ genAutoColumnStatsGatheringPipeline(destinationTable, null, input,
false, true);
}
}
@@ -8009,8 +7920,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
if (numNonPartitionedCols <= 0) {
throw new SemanticException("Too many partition columns declared");
}
- for (int i = 0; i < colInfos.size(); i++) {
- ColumnInfo colInfo = colInfos.get(i);
+ for (ColumnInfo colInfo : colInfos) {
String[] nm = inputRR.reverseLookup(colInfo.getInternalName());
if (nm[1] != null) { // non-null column alias
@@ -8233,7 +8143,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
}
- private void setWriteIdForSurrogateKeys(LoadTableDesc ltd, Operator input) throws SemanticException {
+ private void setWriteIdForSurrogateKeys(LoadTableDesc ltd, Operator input) {
if (ltd == null) {
return;
}
@@ -8257,7 +8167,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
private WriteEntity generateTableWriteEntity(String dest, Table dest_tab,
Map<String, String> partSpec, LoadTableDesc ltd,
- DynamicPartitionCtx dpCtx, boolean isNonNativeTable)
+ DynamicPartitionCtx dpCtx)
throws SemanticException {
WriteEntity output = null;
@@ -8265,7 +8175,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// in the case of DP, we will register WriteEntity in MoveTask when the
// list of dynamically created partitions are known.
if ((dpCtx == null || dpCtx.getNumDPCols() == 0)) {
- output = new WriteEntity(dest_tab, determineWriteType(ltd, isNonNativeTable, dest));
+ output = new WriteEntity(dest_tab, determineWriteType(ltd, dest));
if (!outputs.add(output)) {
if(!((this instanceof MergeSemanticAnalyzer) &&
conf.getBoolVar(ConfVars.MERGE_SPLIT_UPDATE))) {
@@ -8283,7 +8193,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
if ((dpCtx != null) && (dpCtx.getNumDPCols() >= 0)) {
// No static partition specified
if (dpCtx.getNumSPCols() == 0) {
- output = new WriteEntity(dest_tab, determineWriteType(ltd, isNonNativeTable, dest), false);
+ output = new WriteEntity(dest_tab, determineWriteType(ltd, dest), false);
outputs.add(output);
output.setDynamicPartitionWrite(true);
}
@@ -8374,12 +8284,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
PreInsertTableDesc preInsertTableDesc = new PreInsertTableDesc(table, overwrite);
this.rootTasks
.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), preInsertTableDesc)));
-
}
- private void genAutoColumnStatsGatheringPipeline(QB qb, Table table, Map<String, String> partSpec,
- Operator curr, boolean isInsertInto, boolean useTableValueConstructor)
+ private void genAutoColumnStatsGatheringPipeline(Table table, Map<String, String> partSpec, Operator curr,
+ boolean isInsertInto, boolean useTableValueConstructor)
throws SemanticException {
LOG.info("Generate an operator pipeline to autogather column stats for table " + table.getTableName()
+ " in query " + ctx.getCmd());
@@ -8399,8 +8308,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return colName;
}
- private void checkAcidConstraints(QB qb, TableDesc tableDesc,
- Table table) throws SemanticException {
+ private void checkAcidConstraints() {
/*
LOG.info("Modifying config values for ACID write");
conf.setBoolVar(ConfVars.HIVEOPTREDUCEDEDUPLICATION, true);
@@ -8457,8 +8365,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// does the conversion to String by itself.
boolean isMetaDataSerDe = table_desc.getDeserializerClass().equals(
MetadataTypedColumnsetSerDe.class);
- boolean isLazySimpleSerDe = table_desc.getDeserializerClass().equals(
- LazySimpleSerDe.class);
if (!isMetaDataSerDe && !deleting(dest)) {
// If we're updating, add the ROW__ID expression, then make the following column accesses
@@ -8536,8 +8442,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
@SuppressWarnings("nls")
- private Operator genLimitPlan(String dest, QB qb, Operator input, int offset, int limit)
- throws SemanticException {
+ private Operator genLimitPlan(String dest, Operator input, int offset, int limit) {
// A map-only job can be optimized - instead of converting it to a
// map-reduce job, we can have another map
// job to do the same to avoid the cost of sorting in the map-reduce phase.
@@ -8583,10 +8488,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
throw new SemanticException(ErrorMsg.UDTF_LATERAL_VIEW.getMsg());
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("Table alias: " + outputTableAlias + " Col aliases: "
- + colAliases);
- }
+ LOG.debug("Table alias: {} Col aliases: {}", outputTableAlias, colAliases);
// Use the RowResolver from the input operator to generate a input
// ObjectInspector that can be used to initialize the UDTF. Then, the
@@ -8648,10 +8550,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
// Add the UDTFOperator to the operator DAG
- Operator<?> udtf = putOpInsertMap(OperatorFactory.getAndMakeChild(
+ return putOpInsertMap(OperatorFactory.getAndMakeChild(
new UDTFDesc(genericUDTF, outerLV), new RowSchema(out_rwsch.getColumnInfos()),
input), out_rwsch);
- return udtf;
}
@SuppressWarnings("nls")
@@ -8663,7 +8564,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// A better approach would be to
// write into a local file and then have a map-only job.
// Add the limit operator to get the value fields
- Operator curr = genLimitPlan(dest, qb, input, offset, limit);
+ Operator curr = genLimitPlan(dest, input, offset, limit);
// the client requested that an extra map-reduce step be performed
if (!extraMRStep || !HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_GROUPBY_LIMIT_EXTRASTEP)){
@@ -8672,7 +8573,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// Create a reduceSink operator followed by another limit
curr = genReduceSinkPlan(dest, qb, curr, 1, false);
- return genLimitPlan(dest, qb, curr, offset, limit);
+ return genLimitPlan(dest, curr, offset, limit);
}
private List<ExprNodeDesc> getPartitionColsFromBucketCols(String dest, QB qb, Table tab, TableDesc table_desc,
@@ -8695,7 +8596,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
}
- return genConvertCol(dest, qb, tab, table_desc, input, posns, convert);
+ return genConvertCol(dest, qb, table_desc, input, posns, convert);
}
// We have to set up the bucketing columns differently for update and deletes,
@@ -8719,13 +8620,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return rlist;
}
- private List<ExprNodeDesc> genConvertCol(String dest, QB qb, Table tab, TableDesc table_desc, Operator input,
- List<Integer> posns, boolean convert) throws SemanticException {
+ private List<ExprNodeDesc> genConvertCol(String dest, QB qb, TableDesc tableDesc, Operator input,
+ List<Integer> posns, boolean convert)
+ throws SemanticException {
StructObjectInspector oi = null;
try {
- Deserializer deserializer = table_desc.getDeserializerClass()
+ Deserializer deserializer = tableDesc.getDeserializerClass()
.newInstance();
- SerDeUtils.initializeSerDe(deserializer, conf, table_desc.getProperties(), null);
+ SerDeUtils.initializeSerDe(deserializer, conf, tableDesc.getProperties(), null);
oi = (StructObjectInspector) deserializer.getObjectInspector();
} catch (Exception e) {
throw new SemanticException(e);
@@ -8767,8 +8669,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return expressions;
}
- private List<ExprNodeDesc> getSortCols(String dest, QB qb, Table tab, TableDesc table_desc,
- Operator input, boolean convert)
+ private List<ExprNodeDesc> getSortCols(String dest, QB qb, Table tab, TableDesc tableDesc, Operator input)
throws SemanticException {
List<Order> tabSortCols = tab.getSortCols();
List<FieldSchema> tabCols = tab.getCols();
@@ -8786,11 +8687,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
}
- return genConvertCol(dest, qb, tab, table_desc, input, posns, convert);
+ return genConvertCol(dest, qb, tableDesc, input, posns, true);
}
- private List<Integer> getSortOrders(String dest, QB qb, Table tab, Operator input)
- throws SemanticException {
+ private List<Integer> getSortOrders(Table tab) {
List<Order> tabSortCols = tab.getSortCols();
List<FieldSchema> tabCols = tab.getCols();
@@ -9066,7 +8966,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
List<String> outputColumnNames = new ArrayList<String>();
// all children are base classes
Operator<?>[] rightOps = new Operator[right.length];
- int outputPos = 0;
Map<String, Byte> reversedExprs = new HashMap<String, Byte>();
Map<Byte, List<ExprNodeDesc>> exprMap = new HashMap<Byte, List<ExprNodeDesc>>();
@@ -9239,7 +9138,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
@SuppressWarnings("nls")
- private Operator genJoinReduceSinkChild(QB qb, ExprNodeDesc[] joinKeys,
+ private Operator genJoinReduceSinkChild(ExprNodeDesc[] joinKeys,
Operator<?> child, String[] srcs, int tag) throws SemanticException {
Operator dummy = Operator.createDummy(); // dummy for backtracking
@@ -9258,8 +9157,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
// Walk over the input row resolver and copy in the output
- List<ExprNodeDesc> reduceValues = new ArrayList<ExprNodeDesc>();
- List<ExprNodeDesc> reduceValuesBack = new ArrayList<ExprNodeDesc>();
+ ArrayList<ExprNodeDesc> reduceValues = new ArrayList<ExprNodeDesc>();
Map<String, ExprNodeDesc> colExprMap = new HashMap<String, ExprNodeDesc>();
List<ColumnInfo> columns = inputRR.getColumnInfos();
@@ -9295,7 +9193,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
String outputColName = getColumnInternalName(reduceValues.size());
reduceValues.add(expr);
- reduceValuesBack.add(exprBack);
ColumnInfo newColInfo = new ColumnInfo(colInfo);
newColInfo.setInternalName(Utilities.ReduceField.VALUE + "." + outputColName);
@@ -9384,7 +9281,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// generate a groupby operator (HASH mode) for a map-side partial
// aggregation for semijoin
- srcOps[pos++] = genMapGroupByForSemijoin(qb, fields, srcOp, GroupByDesc.Mode.HASH);
+ srcOps[pos++] = genMapGroupByForSemijoin(fields, srcOp);
} else {
srcOps[pos++] = srcOp;
}
@@ -9402,7 +9299,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
if (!isCBOExecuted()) {
srcOps[i] = genNotNullFilterForJoinSourcePlan(qb, srcOps[i], joinTree, joinKeys[i]);
}
- srcOps[i] = genJoinReduceSinkChild(qb, joinKeys[i], srcOps[i], srcs, joinTree.getNextTag());
+ srcOps[i] = genJoinReduceSinkChild(joinKeys[i], srcOps[i], srcs, joinTree.getNextTag());
}
Operator<?> topOp = genJoinOperatorChildren(joinTree, joinSrcOp, srcOps, omitOpts, joinKeys);
@@ -9498,8 +9395,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return output;
}
- private Operator genMapGroupByForSemijoin(QB qb, List<ASTNode> fields,
- Operator<?> input, GroupByDesc.Mode mode)
+ private Operator genMapGroupByForSemijoin(List<ASTNode> fields, Operator<?> input)
throws SemanticException {
RowResolver groupByInputRowResolver = opParseCtx.get(input).getRowResolver();
@@ -9557,7 +9453,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
float minReductionHashAggr = HiveConf
.getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION);
Operator op = putOpInsertMap(OperatorFactory.getAndMakeChild(
- new GroupByDesc(mode, outputColumnNames, groupByKeys, aggregations,
+ new GroupByDesc(GroupByDesc.Mode.HASH, outputColumnNames, groupByKeys, aggregations,
false, groupByMemoryUsage, memoryThreshold, minReductionHashAggr, null, false, -1, false),
new RowSchema(groupByOutputRowResolver.getColumnInfos()),
input), groupByOutputRowResolver);
@@ -9610,8 +9506,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
private Operator genJoinPlan(QB qb, Map<String, Operator> map)
throws SemanticException {
QBJoinTree joinTree = qb.getQbJoinTree();
- Operator joinOp = genJoinOperator(qb, joinTree, map, null);
- return joinOp;
+ return genJoinOperator(qb, joinTree, map, null);
}
/**
@@ -9881,7 +9776,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
joinTree.getAliasToOpInfo().put(
getModifiedAlias(qb, rightalias), aliasToOpInfo.get(rightalias));
// remember rhs table for semijoin
- if (joinTree.getNoSemiJoin() == false) {
+ if (!joinTree.getNoSemiJoin()) {
joinTree.addRHSSemijoin(rightalias);
}
@@ -9920,7 +9815,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
QBJoinTree joinTree = new QBJoinTree();
JoinCond[] condn = new JoinCond[1];
- int joinType = joinParseTree.getToken().getType();
switch (joinParseTree.getToken().getType()) {
case HiveParser.TOK_LEFTOUTERJOIN:
joinTree.setNoOuterJoin(false);
@@ -10013,7 +9907,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
joinTree.getAliasToOpInfo().put(
getModifiedAlias(qb, alias), aliasToOpInfo.get(alias));
// remember rhs table for semijoin
- if (joinTree.getNoSemiJoin() == false) {
+ if (!joinTree.getNoSemiJoin()) {
joinTree.addRHSSemijoin(alias);
}
} else {
@@ -10077,7 +9971,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
joinTree.setMapAliases(mapAliases);
- if ((conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) == false) {
+ if (!(conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez"))) {
parseStreamTables(joinTree, qb);
}
}
@@ -10085,7 +9979,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return joinTree;
}
- private static boolean isValidJoinSide(ASTNode right) {
+ private boolean isValidJoinSide(ASTNode right) {
return (right.getToken().getType() == HiveParser.TOK_TABREF)
|| (right.getToken().getType() == HiveParser.TOK_SUBQUERY)
|| (right.getToken().getType() == HiveParser.TOK_PTBLFUNCTION);
@@ -10214,9 +10108,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
* disableMapJoinWithHint
* @param hints
* @return true if hint to disable hint is provided, else false
- * @throws SemanticException
*/
- private boolean disableMapJoinWithHint(List<ASTNode> hints) throws SemanticException {
+ private boolean disableMapJoinWithHint(List<ASTNode> hints) {
if (hints == null || hints.size() == 0) {
return false;
}
@@ -10242,11 +10135,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
/**
* Merges node to target
*/
- private void mergeJoins(QB qb, QBJoinTree node, QBJoinTree target, int pos, int[] tgtToNodeExprMap) {
+ private void mergeJoins(QBJoinTree node, QBJoinTree target, int pos, int[] tgtToNodeExprMap) {
String[] nodeRightAliases = node.getRightAliases();
String[] trgtRightAliases = target.getRightAliases();
- String[] rightAliases = new String[nodeRightAliases.length
- + trgtRightAliases.length];
+ String[] rightAliases = new String[nodeRightAliases.length + trgtRightAliases.length];
for (int i = 0; i < trgtRightAliases.length; i++) {
rightAliases[i] = trgtRightAliases[i];
@@ -10346,17 +10238,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
}
- if (node.getNoOuterJoin() && target.getNoOuterJoin()) {
- target.setNoOuterJoin(true);
- } else {
- target.setNoOuterJoin(false);
- }
-
- if (node.getNoSemiJoin() && target.getNoSemiJoin()) {
- target.setNoSemiJoin(true);
- } else {
- target.setNoSemiJoin(false);
- }
+ target.setNoOuterJoin(node.getNoOuterJoin() && target.getNoOuterJoin());
+ target.setNoSemiJoin(node.getNoSemiJoin() && target.getNoSemiJoin());
target.mergeRHSSemijoin(node);
@@ -10396,9 +10279,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// Safety check: if we are merging join operators and there are post-filtering
// conditions, they cannot be outer joins
assert node.getNoOuterJoin() ;
- if( target.getPostJoinFilters().size() != 0) {
- assert target.getNoOuterJoin() ;
- }
+ assert target.getPostJoinFilters().size() == 0 || target.getNoOuterJoin();
for (ASTNode exprPostFilter : node.getPostJoinFilters()) {
target.addPostJoinFilter(exprPostFilter);
}
@@ -10548,7 +10429,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
continue;
}
}
- mergeJoins(qb, node, target, pos, mergeDetails.getRight());
+ mergeJoins(node, target, pos, mergeDetails.getRight());
trees.set(j, null);
mergedQBJTree = true;
continue; // continue merging with next alias
@@ -10586,8 +10467,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// reconstruct join tree
QBJoinTree current = null;
- for (int i = 0; i < trees.size(); i++) {
- QBJoinTree target = trees.get(i);
+ for (QBJoinTree target : trees) {
if (target == null) {
continue;
}
@@ -10603,23 +10483,17 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// Join types should be all the same for merging (or returns null)
private JoinType getType(JoinCond[] conds) {
JoinType type = conds[0].getJoinType();
- for (int k = 1; k < conds.length; k++) {
- if (type != conds[k].getJoinType()) {
- return null;
- }
- }
- return type;
+ return Arrays.stream(conds).allMatch(cond -> cond.getJoinType() == type) ? type : null;
}
- private Operator genSelectAllDesc(Operator input) throws SemanticException {
+ private Operator genSelectAllDesc(Operator input) {
OpParseContext inputCtx = opParseCtx.get(input);
RowResolver inputRR = inputCtx.getRowResolver();
List<ColumnInfo> columns = inputRR.getColumnInfos();
List<ExprNodeDesc> colList = new ArrayList<ExprNodeDesc>();
List<String> columnNames = new ArrayList<String>();
Map<String, ExprNodeDesc> columnExprMap = new HashMap<String, ExprNodeDesc>();
- for (int i = 0; i < columns.size(); i++) {
- ColumnInfo col = columns.get(i);
+ for (ColumnInfo col : columns) {
colList.add(new ExprNodeColumnDesc(col, true));
columnNames.add(col.getInternalName());
columnExprMap.put(col.getInternalName(), new ExprNodeColumnDesc(col, true));
@@ -10640,8 +10514,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
QBParseInfo qbp = qb.getParseInfo();
- SortedSet<String> ks = new TreeSet<String>();
- ks.addAll(qbp.getClauseNames());
+ SortedSet<String> ks = new TreeSet<String>(qbp.getClauseNames());
List<List<String>> commonGroupByDestGroups = new ArrayList<List<String>>();
@@ -10686,15 +10559,13 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
if (currentDistinctKeys.isEmpty()) {
// current dest has no distinct keys.
- List<ExprNodeDesc> combinedList = new ArrayList<ExprNodeDesc>();
- combineExprNodeLists(targetSprayKeys, targetDistinctKeys, combinedList);
+ List<ExprNodeDesc> combinedList = combineExprNodeLists(targetSprayKeys, targetDistinctKeys);
if (!matchExprLists(combinedList, currentSprayKeys)) {
continue;
} // else do the common code at the end.
} else {
if (targetDistinctKeys.isEmpty()) {
- List<ExprNodeDesc> combinedList = new ArrayList<ExprNodeDesc>();
- combineExprNodeLists(currentSprayKeys, currentDistinctKeys, combinedList);
+ List<ExprNodeDesc> combinedList = combineExprNodeLists(currentSprayKeys, currentDistinctKeys);
if (!matchExprLists(combinedList, targetSprayKeys)) {
continue;
} else {
@@ -10755,14 +10626,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return sprayKeys;
}
- private void combineExprNodeLists(List<ExprNodeDesc> list, List<ExprNodeDesc> list2,
- List<ExprNodeDesc> combinedList) {
- combinedList.addAll(list);
+ private List<ExprNodeDesc> combineExprNodeLists(List<ExprNodeDesc> list, List<ExprNodeDesc> list2) {
+ ArrayList<ExprNodeDesc> result = new ArrayList<>(list);
for (ExprNodeDesc elem : list2) {
- if (!combinedList.contains(elem)) {
- combinedList.add(elem);
+ if (!result.contains(elem)) {
+ result.add(elem);
}
}
+ return result;
}
// Returns whether or not two lists contain the same elements independent of order
@@ -10889,7 +10760,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
if (node.getToken().getType() == HiveParser.TOK_ALLCOLREF) {
curr = genSelectPlan(dest, qb, curr, curr);
RowResolver rr = opParseCtx.get(curr).getRowResolver();
- qbp.setSelExprForClause(dest, SemanticAnalyzer.genSelectDIAST(rr));
+ qbp.setSelExprForClause(dest, genSelectDIAST(rr));
}
}
if (conf.getBoolVar(HiveConf.ConfVars.HIVEMAPSIDEAGGREGATE)) {
@@ -10917,10 +10788,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
}
-
- if (LOG.isDebugEnabled()) {
- LOG.debug("Created Body Plan for Query Block " + qb.getId());
- }
+ LOG.debug("Created Body Plan for Query Block {}", qb.getId());
return curr;
}
@@ -11174,20 +11042,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
.getColumnInfos()));
// set union operator as child of each of leftOp and rightOp
- List<Operator<? extends OperatorDesc>> child =
- new ArrayList<Operator<? extends OperatorDesc>>();
- child.add(unionforward);
- rightOp.setChildOperators(child);
-
- child = new ArrayList<Operator<? extends OperatorDesc>>();
- child.add(unionforward);
- leftOp.setChildOperators(child);
+ rightOp.setChildOperators(Lists.newArrayList(unionforward));
+ leftOp.setChildOperators(Lists.newArrayList(unionforward));
- List<Operator<? extends OperatorDesc>> parent =
- new ArrayList<Operator<? extends OperatorDesc>>();
- parent.add(leftOp);
- parent.add(rightOp);
- unionforward.setParentOperators(parent);
+ unionforward.setParentOperators(Lists.newArrayList(leftOp, rightOp));
// create operator info list to return
return putOpInsertMap(unionforward, unionoutRR);
@@ -11285,9 +11143,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
* The alias used for the table in the row resolver
* @param rwsch
* The row resolver used to resolve column references
- * @param qbm
- * The metadata information for the query block which is used to
- * resolve unaliased columns
* @param planExpr
* The plan tree for the expression. If the user specified this, the
* parse expressions are not used
@@ -11296,8 +11151,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
*/
private ExprNodeDesc genSamplePredicate(TableSample ts,
List<String> bucketCols, boolean useBucketCols, String alias,
- RowResolver rwsch, QBMetaData qbm, ExprNodeDesc planExpr,
- int bucketingVersion)
+ RowResolver rwsch, ExprNodeDesc planExpr, int bucketingVersion)
throws SemanticException {
ExprNodeDesc numeratorExpr = new ExprNodeConstantDesc(
@@ -11387,8 +11241,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
ColumnInfo colInfo = new ColumnInfo(fields.get(i).getFieldName(),
TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i)
.getFieldObjectInspector()), alias, false);
- colInfo.setSkewedCol((isSkewedCol(alias, qb, fields.get(i)
- .getFieldName())) ? true : false);
+ colInfo.setSkewedCol(isSkewedCol(alias, qb, fields.get(i).getFieldName()));
rwsch.put(alias, fields.get(i).getFieldName(), colInfo);
}
} catch (SerDeException e) {
@@ -11492,26 +11345,25 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
break;
}
- if (((ASTNode) sampleExprs.get(i).getChild(0)).getText()
- .equalsIgnoreCase(tabBucketCols.get(j))) {
+ if ((sampleExprs.get(i).getChild(0)).getText().equalsIgnoreCase(tabBucketCols.get(j))) {
colFound = true;
}
}
- colsEqual = (colsEqual && colFound);
+ colsEqual = colFound;
}
// Check if input can be pruned
- ts.setInputPruning((sampleExprs == null || sampleExprs.size() == 0 || colsEqual));
+ ts.setInputPruning((sampleExprs.size() == 0 || colsEqual));
// check if input pruning is enough
- if ((sampleExprs == null || sampleExprs.size() == 0 || colsEqual)
+ if ((sampleExprs.size() == 0 || colsEqual)
&& (num == den || (den % numBuckets == 0 || numBuckets % den == 0))) {
// input pruning is enough; add the filter for the optimizer to use it
// later
LOG.info("No need for sample filter");
ExprNodeDesc samplePredicate = genSamplePredicate(ts, tabBucketCols,
- colsEqual, alias, rwsch, qb.getMetaData(), null,
+ colsEqual, alias, rwsch, null,
tab.getBucketingVersion());
FilterDesc filterDesc = new FilterDesc(
samplePredicate, true, new SampleDesc(ts.getNumerator(),
@@ -11524,7 +11376,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// create tableOp to be filterDesc and set as child to 'top'
LOG.info("Need sample filter");
ExprNodeDesc samplePredicate = genSamplePredicate(ts, tabBucketCols,
- colsEqual, alias, rwsch, qb.getMetaData(), null,
+ colsEqual, alias, rwsch, null,
tab.getBucketingVersion());
FilterDesc filterDesc = new FilterDesc(samplePredicate, true);
filterDesc.setGenerated(true);
@@ -11556,7 +11408,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
tsSample.setInputPruning(true);
qb.getParseInfo().setTabSample(alias, tsSample);
ExprNodeDesc samplePred = genSamplePredicate(tsSample, tab
- .getBucketCols(), true, alias, rwsch, qb.getMetaData(), null,
+ .getBucketCols(), true, alias, rwsch, null,
tab.getBucketingVersion());
FilterDesc filterDesc = new FilterDesc(samplePred, true,
new SampleDesc(tsSample.getNumerator(), tsSample
@@ -11576,7 +11428,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
.getFuncExprNodeDesc("rand", new ExprNodeConstantDesc(Integer
.valueOf(460476415)));
ExprNodeDesc samplePred = genSamplePredicate(tsSample, null, false,
- alias, rwsch, qb.getMetaData(), randFunc, tab.getBucketingVersion());
+ alias, rwsch, randFunc, tab.getBucketingVersion());
FilterDesc filterDesc = new FilterDesc(samplePred, true);
filterDesc.setGenerated(true);
op = OperatorFactory.getAndMakeChild(filterDesc,
@@ -11588,22 +11440,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
Operator output = putOpInsertMap(op, rwsch);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Created Table Plan for " + alias + " " + op.toString());
- }
+ LOG.debug("Created Table Plan for {} {}", alias, op);
return output;
}
- static boolean isSkewedCol(String alias, QB qb, String colName) {
- boolean isSkewedCol = false;
- List<String> skewedCols = qb.getSkewedColumnNames(alias);
- for (String skewedCol : skewedCols) {
- if (skewedCol.equalsIgnoreCase(colName)) {
- isSkewedCol = true;
- }
- }
- return isSkewedCol;
+ boolean isSkewedCol(String alias, QB qb, String colName) {
+ return qb.getSkewedColumnNames(alias).stream()
+ .anyMatch(skewedCol -> skewedCol.equalsIgnoreCase(colName));
}
private void setupStats(TableScanDesc tsDesc, QBParseInfo qbp, Table tab, String alias,
@@ -11679,7 +11523,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
List<Partition> partitions = qbp.getTableSpec().partitions;
if (partitions != null) {
for (Partition partn : partitions) {
- // inputs.add(new ReadEntity(partn)); // is this needed at all?
WriteEntity pwe = new WriteEntity(partn, WriteEntity.WriteType.DDL_NO_LOCK);
pwe.setTxnAnalyze(true);
outputs.add(pwe);
@@ -11810,7 +11653,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
String dest = dests.iterator().next();
ASTNode whereClause = qb.getParseInfo().getWhrForClause(dest);
if ( whereClause != null ) {
- extractJoinCondsFromWhereClause(joinTree, qb, dest,
+ extractJoinCondsFromWhereClause(joinTree,
(ASTNode) whereClause.getChild(0),
aliasToOpInfo );
}
@@ -11913,7 +11756,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
* @param qb
* @throws SemanticException
*/
-
private void genLateralViewPlans(Map<String, Operator> aliasToOpInfo, QB qb)
throws SemanticException {
Map<String, List<ASTNode>> aliasToLateralViews = qb.getParseInfo().getAliasToLateralViews();
@@ -11933,8 +11775,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// -> LateralViewJoinOperator
//
- Operator lateralViewJoin = genLateralViewPlan(qb, op, lateralViewTree);
- op = lateralViewJoin;
+ op = genLateralViewPlan(qb, op, lateralViewTree);
}
e.setValue(op);
}
@@ -12057,7 +11898,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
@SuppressWarnings("nls")
Phase1Ctx initPhase1Ctx() {
-
Phase1Ctx ctx_1 = new Phase1Ctx();
ctx_1.nextNum = 0;
ctx_1.dest = "reduce";
@@ -12071,23 +11911,13 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
reset(clearPartsCache);
// init
- QB qb = new QB(null, null, false);
- this.qb = qb;
+ this.qb = new QB(null, null, false);
}
@Override
@SuppressWarnings("nls")
public void analyzeInternal(ASTNode ast) throws SemanticException {
- analyzeInternal(ast, new PlannerContextFactory() {
- @Override
- public PlannerContext create() {
- return new PlannerContext();
- }
- });
- }
-
- protected static interface PlannerContextFactory {
- PlannerContext create();
+ analyzeInternal(ast, PlannerContext::new);
}
/**
@@ -12127,8 +11957,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return getTableObjectByName(tableName, true);
}
- private static void walkASTAndQualifyNames(ASTNode ast,
- Set<String> cteAlias, Context ctx, Hive db, Set<Integer> ignoredTokens, UnparseTranslator unparseTranslator)
+ private void walkASTAndQualifyNames(ASTNode ast, Set<String> cteAlias, UnparseTranslator unparseTranslator)
throws SemanticException {
Queue<Node> queue = new LinkedList<>();
queue.add(ast);
@@ -12144,7 +11973,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
}
- if (astNode.getChildCount() > 0 && !ignoredTokens.contains(astNode.getToken().getType())) {
+ if (astNode.getChildCount() > 0 && !IGNORED_TOKENS.contains(astNode.getToken().getType())) {
for (Node child : astNode.getChildren()) {
queue.offer(child);
}
@@ -12181,25 +12010,22 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
throw new SemanticException("Duplicate definition of " + alias);
} else {
cteAlias.add(alias);
- walkASTAndQualifyNames(ast, cteAlias, ctx, db, ignoredTokens, unparseTranslator);
+ walkASTAndQualifyNames(ast, cteAlias, unparseTranslator);
}
}
// walk the other part of ast
for (int index = 1; index < ast.getChildCount(); index++) {
- walkASTAndQualifyNames(ast, cteAlias, ctx, db, ignoredTokens, unparseTranslator);
+ walkASTAndQualifyNames(ast, cteAlias, unparseTranslator);
}
} else { // there is no CTE, walk the whole AST
- walkASTAndQualifyNames(ast, cteAlias, ctx, db, ignoredTokens, unparseTranslator);
+ walkASTAndQualifyNames(ast, cteAlias, unparseTranslator);
}
unparseTranslator.applyTranslations(tokenRewriteStream);
- String rewrittenQuery = tokenRewriteStream.toString(
- ast.getTokenStartIndex(), ast.getTokenStopIndex());
- return rewrittenQuery;
+ return tokenRewriteStream.toString(ast.getTokenStartIndex(), ast.getTokenStopIndex());
}
- private void walkASTMarkTABREF(TableMask tableMask, ASTNode ast, Set<String> cteAlias,
- Context ctx, Hive db, Map<String, Table> tabNameToTabObject, Set<Integer> ignoredTokens)
+ private void walkASTMarkTABREF(TableMask tableMask, ASTNode ast, Set<String> cteAlias, Context ctx)
throws SemanticException {
Queue<Node> queue = new LinkedList<>();
queue.add(ast);
@@ -12280,16 +12106,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
new MaskAndFilterInfo(colTypes, additionalTabInfo.toString(), alias, astNode, table.isView(), table.isNonNative()));
}
}
- if (astNode.getChildCount() > 0 && !ignoredTokens.contains(astNode.getToken().getType())) {
+ if (astNode.getChildCount() > 0 && !IGNORED_TOKENS.contains(astNode.getToken().getType())) {
for (Node child : astNode.getChildren()) {
queue.offer(child);
}
}
}
- List<HivePrivilegeObject> basicPrivObjs = new ArrayList<>();
- basicPrivObjs.addAll(basicInfos.keySet());
- List<HivePrivilegeObject> needRewritePrivObjs = tableMask
- .applyRowFilterAndColumnMasking(basicPrivObjs);
+ List<HivePrivilegeObject> basicPrivObjs = new ArrayList<>(basicInfos.keySet());
+ List<HivePrivilegeObject> needRewritePrivObjs = tableMask.applyRowFilterAndColumnMasking(basicPrivObjs);
if (needRewritePrivObjs != null && !needRewritePrivObjs.isEmpty()) {
for (HivePrivilegeObject privObj : needRewritePrivObjs) {
MaskAndFilterInfo info = basicInfos.get(privObj);
@@ -12316,7 +12140,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
}
- private static void extractColumnInfos(Table table, List<String> colNames, List<String> colTypes) {
+ private void extractColumnInfos(Table table, List<String> colNames, List<String> colTypes) {
for (FieldSchema col : table.getAllCols()) {
colNames.add(col.getName());
colTypes.add(col.getType());
@@ -12329,7 +12153,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// For the replacement, we leverage the methods that are used for
// unparseTranslator.
private ASTNode rewriteASTWithMaskAndFilter(TableMask tableMask, ASTNode ast, TokenRewriteStream tokenRewriteStream,
- Context ctx, Hive db, Map<String, Table> tabNameToTabObject, Set<Integer> ignoredTokens)
+ Context ctx, Hive db, Map<String, Table> tabNameToTabObject)
throws SemanticException {
// 1. collect information about CTE if there is any.
// The base table of CTE should be masked.
@@ -12353,20 +12177,17 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
throw new SemanticException("Duplicate definition of " + alias);
} else {
cteAlias.add(alias);
- walkASTMarkTABREF(tableMask, subq, cteAlias,
- ctx, db, tabNameToTabObject, ignoredTokens);
+ walkASTMarkTABREF(tableMask, subq, cteAlias, ctx);
}
}
// walk the other part of ast
for (int index = 1; index < ast.getChildCount(); index++) {
- walkASTMarkTABREF(tableMask, (ASTNode) ast.getChild(index), cteAlias,
- ctx, db, tabNameToTabObject, ignoredTokens);
+ walkASTMarkTABREF(tableMask, (ASTNode) ast.getChild(index), cteAlias, ctx);
}
}
// there is no CTE, walk the whole AST
else {
- walkASTMarkTABREF(tableMask, ast, cteAlias,
- ctx, db, tabNameToTabObject, ignoredTokens);
+ walkASTMarkTABREF(tableMask, ast, cteAlias, ctx);
}
// 2. rewrite the AST, replace TABREF with masking/filtering
if (tableMask.needsRewrite()) {
@@ -12374,9 +12195,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
String rewrittenQuery = tokenRewriteStream.toString(
ast.getTokenStartIndex(), ast.getTokenStopIndex());
ASTNode rewrittenTree;
- // Parse the rewritten query string
- // check if we need to ctx.setCmd(rewrittenQuery);
- ParseDriver pd = new ParseDriver();
try {
rewrittenTree = ParseUtils.parse(rewrittenQuery);
} catch (ParseException e) {
@@ -12538,21 +12356,22 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
}
- private static void removeASTChild(ASTNode node) {
- Tree parent = node.getParent();
- if (parent != null) {
- parent.deleteChild(node.getChildIndex());
- node.setParent(null);
- }
+ private void removeASTChild(ASTNode node) {
+ Optional.ofNullable(node.getParent())
+ .ifPresent(parent -> {
+ parent.deleteChild(node.getChildIndex());
+ node.setParent(null);
+ });
}
- void analyzeInternal(ASTNode ast, PlannerContextFactory pcf) throws SemanticException {
+ @SuppressWarnings("checkstyle:methodlength")
+ void analyzeInternal(ASTNode ast, Supplier<PlannerContext> pcf) throws SemanticException {
LOG.info("Starting Semantic Analysis");
// 1. Generate Resolved Parse tree from syntax tree
boolean needsTransform = needsTransform();
//change the location of position alias process here
processPositionAlias(ast);
- PlannerContext plannerCtx = pcf.create();
+ PlannerContext plannerCtx = pcf.get();
if (!genResolvedParseTree(ast, plannerCtx)) {
return;
}
@@ -12606,10 +12425,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
(tableMask.isEnabled() && analyzeRewrite == null)) {
// Here we rewrite the * and also the masking table
ASTNode rewrittenAST = rewriteASTWithMaskAndFilter(tableMask, astForMasking, ctx.getTokenRewriteStream(),
- ctx, db, tabNameToTabObject, ignoredTokens);
+ ctx, db, tabNameToTabObject);
if (astForMasking != rewrittenAST) {
usesMasking = true;
- plannerCtx = pcf.create();
+ plannerCtx = pcf.get();
ctx.setSkipTableMasking(true);
init(true);
//change the location of position alias process here
@@ -12760,7 +12579,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
fetchTask = pCtx.getFetchTask();
}
//find all Acid FileSinkOperatorS
- QueryPlanPostProcessor qp = new QueryPlanPostProcessor(rootTasks, acidFileSinks, ctx.getExecutionId());
+ new QueryPlanPostProcessor(rootTasks, acidFileSinks, ctx.getExecutionId());
// 10. Attach CTAS/Insert-Commit-hooks for Storage Handlers
final Optional<TezTask> optionalTezTask =
@@ -12792,7 +12611,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// requires SemanticAnalyzer state to be reset.
if (checkResultsCache(lookupInfo, true)) {
LOG.info("Cached result found on second lookup");
- return;
} else {
QueryResultsCache.QueryInfo queryInfo = createCacheQueryInfoForQuery(lookupInfo);
@@ -12881,8 +12699,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
boolean first = true;
StringBuilder sb = new StringBuilder();
sb.append("SELECT ");
- for (int i = 0; i < derivedSchema.size(); ++i) {
- FieldSchema fieldSchema = derivedSchema.get(i);
+ for (FieldSchema fieldSchema : derivedSchema) {
if (!createVwDesc.getPartColNames().contains(fieldSchema.getName())) {
if (first) {
first = false;
@@ -12989,7 +12806,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
createVwDesc.setViewExpandedText(expandedText);
}
- private Set<String> getTablesUsed(ParseContext parseCtx) throws SemanticException {
+ private Set<String> getTablesUsed(ParseContext parseCtx) {
Set<String> tablesUsed = new HashSet<>();
for (TableScanOperator topOp : parseCtx.getTopOps().values()) {
Table table = topOp.getConf().getTableMetadata();
@@ -13001,14 +12818,13 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return tablesUsed;
}
- private static List<FieldSchema> convertRowSchemaToViewSchema(RowResolver rr) throws SemanticException {
+ private List<FieldSchema> convertRowSchemaToViewSchema(RowResolver rr) throws SemanticException {
List<FieldSchema> fieldSchema = convertRowSchemaToResultSetSchema(rr, false);
ParseUtils.validateColumnNameUniqueness(fieldSchema);
return fieldSchema;
}
- static List<FieldSchema> convertRowSchemaToResultSetSchema(RowResolver rr,
- boolean useTabAliasIfAvailable) {
+ List<FieldSchema> convertRowSchemaToResultSetSchema(RowResolver rr, boolean useTabAliasIfAvailable) {
List<FieldSchema> fieldSchemas = new ArrayList<FieldSchema>();
String[] qualifiedColName;
String colName;
@@ -13050,13 +12866,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return genExprNodeDesc(expr, input, tcCtx);
}
-
- ExprNodeDesc genExprNodeDesc(ASTNode expr, RowResolver input, boolean useCaching)
- throws SemanticException {
- return genExprNodeDesc(expr, input, useCaching, false);
- }
-
- private ExprNodeDesc genExprNodeDesc(ASTNode expr, RowResolver input, boolean useCaching,
+ ExprNodeDesc genExprNodeDesc(ASTNode expr, RowResolver input, boolean useCaching,
boolean foldExpr) throws SemanticException {
TypeCheckCtx tcCtx = new TypeCheckCtx(input, useCaching, foldExpr);
return genExprNodeDesc(expr, input, tcCtx);
@@ -13148,10 +12958,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
} else {
errMsg = tcCtx.getError();
}
- if (errMsg == null) {
- errMsg = "Error in parsing ";
- }
- throw new SemanticException(errMsg);
+ throw new SemanticException(Optional.ofNullable(errMsg).orElse("Error in parsing "));
}
if (desc instanceof ExprNodeColumnListDesc) {
throw new SemanticException("TOK_ALLCOLREF is not supported in current context");
@@ -13162,7 +12969,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return nodeOutputs;
}
- Map<ExprNodeDesc,String> nodeToText = new HashMap<>();
List<ASTNode> fieldDescList = new ArrayList<>();
for (Map.Entry<ASTNode, ExprNodeDesc> entry : nodeOutputs.entrySet()) {
@@ -13192,7 +12998,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
replacementText.append(HiveUtils.unparseIdentifier(tmp[0], conf));
replacementText.append(".");
replacementText.append(HiveUtils.unparseIdentifier(tmp[1], conf));
- nodeToText.put(columnDesc, replacementText.toString());
unparseTranslator.addTranslation(node, replacementText.toString());
}
@@ -13350,12 +13155,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
Map<String, String> tblProp, boolean isExt, StorageFormat storageFormat,
String qualifiedTableName, List<Order> sortCols, boolean isMaterialization,
boolean isTemporaryTable, boolean isTransactional) throws SemanticException {
- Map<String, String> retValue;
- if (tblProp == null) {
- retValue = new HashMap<String, String>();
- } else {
- retValue = tblProp;
- }
+ Map<String, String> retValue = Optional.ofNullable(tblProp).orElseGet(HashMap::new);
+
String paraString = HiveConf.getVar(conf, ConfVars.NEWTABLEDEFAULTPARA);
if (paraString != null && !paraString.isEmpty()) {
for (String keyValuePair : paraString.split(",")) {
@@ -14349,7 +14150,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return qb;
}
- public void setQB(QB qb) {
+ void setQB(QB qb) {
this.qb = qb;
}
@@ -14469,9 +14270,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
PartitionedTableFunctionSpec ptfSpec = processPTFChain(qb, ptf);
- if ( ptfSpec.getAlias() != null ) {
- qb.addAlias(ptfSpec.getAlias());
- }
+ Optional.ofNullable(ptfSpec.getAlias())
+ .ifPresent(qb::addAlias);
PTFInvocationSpec spec = new PTFInvocationSpec();
spec.setFunction(ptfSpec);
@@ -14481,17 +14281,16 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
private void handleQueryWindowClauses(QB qb, Phase1Ctx ctx_1, ASTNode node)
throws SemanticException {
WindowingSpec spec = qb.getWindowingSpec(ctx_1.dest);
- for(int i=0; i < node.getChildCount(); i++) {
- processQueryWindowClause(spec, (ASTNode) node.getChild(i));
+ for(Node child : node.getChildren()) {
+ processQueryWindowClause(spec, (ASTNode) child);
}
}
private PartitionSpec processPartitionSpec(ASTNode node) {
PartitionSpec pSpec = new PartitionSpec();
- int exprCnt = node.getChildCount();
- for(int i=0; i < exprCnt; i++) {
+ for(Node child : node.getChildren()) {
PartitionExpression exprSpec = new PartitionExpression();
- exprSpec.setExpression((ASTNode) node.getChild(i));
+ exprSpec.setExpression((ASTNode) child);
pSpec.addExpression(exprSpec);
}
return pSpec;
@@ -14526,8 +14325,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
PartitioningSpec partitioning = new PartitioningSpec();
ASTNode firstChild = (ASTNode) pSpecNode.getChild(0);
int type = firstChild.getType();
- int exprCnt;
-
if ( type == HiveParser.TOK_DISTRIBUTEBY || type == HiveParser.TOK_CLUSTERBY )
{
@@ -14541,8 +14338,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
}
else if ( type == HiveParser.TOK_SORTBY || type == HiveParser.TOK_ORDERBY ) {
- ASTNode sortNode = firstChild;
- OrderSpec oSpec = processOrderSpec(sortNode);
+ OrderSpec oSpec = processOrderSpec(firstChild);
partitioning.setOrderSpec(oSpec);
}
return partitioning;
@@ -14572,8 +14368,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
if ( wsNode != null ) {
- WindowSpec ws = processWindowSpec(wsNode);
- wfSpec.setWindowSpec(ws);
+ wfSpec.setWindowSpec(processWindowSpec(wsNode));
}
return wfSpec;
@@ -14616,11 +14411,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
private WindowSpec processWindowSpec(ASTNode node) throws SemanticException {
- String sourceId = null;
- PartitionSpec partition = null;
- OrderSpec order = null;
- WindowFrameSpec windowFrame = null;
-
boolean hasSrcId = false, hasPartSpec = false, hasWF = false;
int srcIdIdx = -1, partIdx = -1, wfIdx = -1;
@@ -14667,22 +14457,22 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
private WindowFrameSpec processWindowFrame(ASTNode node) throws SemanticException {
int type = node.getType();
- BoundarySpec start = null, end = null;
+ BoundarySpec end = null;
/*
* A WindowFrame may contain just the Start Boundary or in the
* between style of expressing a WindowFrame both boundaries
* are specified.
*/
- start = processBoundary(type, (ASTNode) node.getChild(0));
+ BoundarySpec start = processBoundary((ASTNode) node.getChild(0));
if ( node.getChildCount() > 1 ) {
- end = processBoundary(type, (ASTNode) node.getChild(1));
+ end = processBoundary((ASTNode) node.getChild(1));
}
// Note: TOK_WINDOWVALUES means RANGE type, TOK_WINDOWRANGE means ROWS type
return new WindowFrameSpec(type == HiveParser.TOK_WINDOWVALUES ? WindowType.RANGE : WindowType.ROWS, start, end);
}
- private BoundarySpec processBoundary(int frameType, ASTNode node) throws SemanticException {
+ private BoundarySpec processBoundary(ASTNode node) throws SemanticException {
BoundarySpec bs = new BoundarySpec();
int type = node.getType();
boolean hasAmt = true;
@@ -14699,6 +14489,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
bs.setDirection(Direction.CURRENT);
hasAmt = false;
break;
+ default:
+ // no-op
}
if ( hasAmt )
@@ -14722,111 +14514,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return bs;
}
- /*
- * check if a Select Expr is a constant.
- * - current logic used is to look for HiveParser.TOK_TABLE_OR_COL
- * - if there is none then the expression is a constant.
- */
- private static class ConstantExprCheck implements ContextVisitor {
- boolean isConstant = true;
-
- @Override
- public void visit(Object t, Object parent, int childIndex, Map labels) {
- if ( !isConstant ) {
- return;
- }
- ASTNode node = (ASTNode) t;
- if (ParseDriver.adaptor.getType(t) == HiveParser.TOK_TABLE_OR_COL ) {
- isConstant = false;
- }
- }
-
- public void reset() {
- isConstant = true;
- }
-
- protected boolean isConstant() {
- return isConstant;
- }
- }
-
- private static class AggregationExprCheck implements ContextVisitor {
- Map<String, ASTNode> destAggrExprs;
- boolean isAggr = false;
-
- public AggregationExprCheck(Map<String, ASTNode> destAggrExprs) {
- super();
- this.destAggrExprs = destAggrExprs;
- }
-
- @Override
- public void visit(Object t, Object parent, int childIndex, Map labels) {
- if ( isAggr ) {
- return;
- }
- if ( destAggrExprs.values().contains(t)) {
- isAggr = true;
- }
- }
-
- public void reset() {
- isAggr = false;
- }
-
- protected boolean isAggr() {
- return isAggr;
- }
- }
-
- /*
- * Returns false if there is a SelectExpr that is not a constant or an aggr.
- *
- */
- private boolean isValidGroupBySelectList(QB currQB, String clause){
- ConstantExprCheck constantExprCheck = new ConstantExprCheck();
- AggregationExprCheck aggrExprCheck = new AggregationExprCheck(
- currQB.getParseInfo().getAggregationExprsForClause(clause));
-
- TreeWizard tw = new TreeWizard(ParseDriver.adaptor, HiveParser.tokenNames);
- ASTNode selectNode = currQB.getParseInfo().getSelForClause(clause);
-
- /*
- * for Select Distinct Queries we don't move any aggregations.
- */
- if ( selectNode != null && selectNode.getType() == HiveParser.TOK_SELECTDI ) {
- return true;
- }
-
- for (int i = 0; selectNode != null && i < selectNode.getChildCount(); i++) {
- ASTNode selectExpr = (ASTNode) selectNode.getChild(i);
- //check for QUERY_HINT expressions on ast
- if(selectExpr.getType() != HiveParser.TOK_SELEXPR){
- continue;
- }
-
- constantExprCheck.reset();
- PTFTranslator.visit(selectExpr.getChild(0), constantExprCheck);
-
- if ( !constantExprCheck.isConstant() ) {
- aggrExprCheck.reset();
- PTFTranslator.visit(selectExpr.getChild(0), aggrExprCheck);
- if (!aggrExprCheck.isAggr() ) {
- return false;
- }
- }
-
- }
- return true;
- }
-
//--------------------------- PTF handling: PTFInvocationSpec to PTFDesc --------------------------
private PTFDesc translatePTFInvocationSpec(PTFInvocationSpec ptfQSpec, RowResolver inputRR)
- throws SemanticException{
- PTFDesc ptfDesc = null;
+ throws SemanticException {
PTFTranslator translator = new PTFTranslator();
- ptfDesc = translator.translate(ptfQSpec, this, conf, inputRR, unparseTranslator);
- return ptfDesc;
+ return translator.translate(ptfQSpec, this, conf, inputRR, unparseTranslator);
}
private Operator genPTFPlan(PTFInvocationSpec ptfQSpec, Operator input) throws SemanticException {
@@ -14834,9 +14527,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
for (PTFInvocationSpec ptfSpec : componentQueries) {
input = genPTFPlanForComponentQuery(ptfSpec, input);
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("Created PTF Plan ");
- }
+ LOG.debug("Created PTF Plan ");
return input;
}
@@ -14845,15 +14536,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
* Construct the data structures containing ExprNodeDesc for partition
* columns and order columns. Use the input definition to construct the list
* of output columns for the ReduceSinkOperator
- *
- * @throws SemanticException
*/
private void buildPTFReduceSinkDetails(PartitionedTableFunctionDef tabDef,
- RowResolver inputRR,
List<ExprNodeDesc> partCols,
List<ExprNodeDesc> orderCols,
StringBuilder orderString,
- StringBuilder nullOrderString) throws SemanticException {
+ StringBuilder nullOrderString) {
List<PTFExpressionDef> partColList = tabDef.getPartition().getExpressions();
@@ -14875,8 +14563,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
* ReduceSinkDesc
*/
List<OrderExpressionDef> orderColList = tabDef.getOrder().getExpressions();
- for (int i = 0; i < orderColList.size(); i++) {
- OrderExpressionDef colDef = orderColList.get(i);
+ for (OrderExpressionDef colDef : orderColList) {
char orderChar = colDef.getOrder() == PTFInvocationSpec.Order.ASC ? '+' : '-';
char nullOrderChar = colDef.getNullOrder() == PTFInvocationSpec.NullOrder.NULLS_FIRST ? 'a' : 'z';
int index = ExprNodeDescUtils.indexOf(colDef.getExprNode(), orderCols);
@@ -14938,7 +14625,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
* If the parent of ReduceSinkOperator is PTFOperator, use it's
* output RR.
*/
- buildPTFReduceSinkDetails(tabDef, rr, partCols, orderCols, orderString, nullOrderString);
+ buildPTFReduceSinkDetails(tabDef, partCols, orderCols, orderString, nullOrderString);
input = genReduceSinkPlan(input, partCols, orderCols, orderString.toString(),
nullOrderString.toString(), -1, Operation.NOT_ACID);
}
@@ -15119,15 +14806,16 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
RowResolver parentRR = opParseCtx.get(parent).getRowResolver();
try {
- ColumnInfo pColInfo = parentRR.get(tab_alias, col_alias);
- tab_alias = pColInfo == null ? null : pColInfo.getTabAlias();
- } catch(SemanticException se) {
+ tab_alias = Optional.ofNullable(parentRR.get(null, col_alias))
+ .map(ColumnInfo::getTabAlias)
+ .orElse(null);
+ } catch (SemanticException se) {
}
gByRR.put(tab_alias, col_alias, colInfo);
}
}
- private WriteEntity.WriteType determineWriteType(LoadTableDesc ltd, boolean isNonNativeTable, String dest) {
+ private WriteEntity.WriteType determineWriteType(LoadTableDesc ltd, String dest) {
if (ltd == null) {
return WriteEntity.WriteType.INSERT_OVERWRITE;
@@ -15142,13 +14830,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
(deleting(dest) ? WriteEntity.WriteType.DELETE : WriteEntity.WriteType.INSERT);
}
private boolean isAcidOutputFormat(Class<? extends OutputFormat> of) {
- Class<?>[] interfaces = of.getInterfaces();
- for (Class<?> iface : interfaces) {
- if (iface.equals(AcidOutputFormat.class)) {
- return true;
- }
- }
- return false;
+ return Arrays.asList(of.getInterfaces()).contains(AcidOutputFormat.class);
}
// Note that this method assumes you have already decided this is an Acid table. It cannot
@@ -15185,7 +14867,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
}
- static ASTNode genSelectDIAST(RowResolver rr) {
+ ASTNode genSelectDIAST(RowResolver rr) {
Map<String, Map<String, ColumnInfo>> map = rr.getRslvMap();
ASTNode selectDI = new ASTNode(SELECTDI_TOKEN);
// Note: this will determine the order of columns in the result. For now, the columns for each
@@ -15198,7 +14880,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
return selectDI;
}
- private static ASTNode buildSelExprSubTree(String tableAlias, String col) {
+
+ private ASTNode buildSelExprSubTree(String tableAlias, String col) {
tableAlias = StringInternUtils.internIfNotNull(tableAlias);
col = StringInternUtils.internIfNotNull(col);
ASTNode selexpr = new ASTNode(SELEXPR_TOKEN);
@@ -15224,19 +14907,15 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
queryProperties.setMaterializedView(qb.getViewDesc() != null);
}
}
+
private void warn(String msg) {
- SessionState.getConsole().printInfo(
- String.format("Warning: %s", msg));
+ SessionState.getConsole().printInfo(String.format("Warning: %s", msg));
}
public List<LoadFileDesc> getLoadFileWork() {
return loadFileWork;
}
- public void setLoadFileWork(List<LoadFileDesc> loadFileWork) {
- this.loadFileWork = loadFileWork;
- }
-
private String getQueryStringFromAst(ASTNode ast) {
StringBuilder sb = new StringBuilder();
int startIdx = ast.getTokenStartIndex();
@@ -15275,12 +14954,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// Re-using the TokenRewriteStream map for views so we do not overwrite the current TokenRewriteStream
String rewriteStreamName = "__qualified_query_string__";
- ASTNode astNode;
try {
- astNode = ParseUtils.parse(queryString, ctx, rewriteStreamName);
+ ASTNode astNode = ParseUtils.parse(queryString, ctx, rewriteStreamName);
TokenRewriteStream tokenRewriteStream = ctx.getViewTokenRewriteStream(rewriteStreamName);
- String fullyQualifiedQuery = rewriteQueryWithQualifiedNames(astNode, tokenRewriteStream);
- return fullyQualifiedQuery;
+ return rewriteQueryWithQualifiedNames(astNode, tokenRewriteStream);
} catch (Exception err) {
LOG.error("Unexpected error while reparsing the query string [" + queryString + "]", err);
// Don't fail the query - just return null (caller should skip cache lookup).
@@ -15294,8 +14971,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
//
List<String> transactionalTables = tablesFromReadEntities(inputs)
.stream()
- .filter(table -> AcidUtils.isTransactionalTable(table))
- .map(table -> table.getFullyQualifiedName())
+ .filter(AcidUtils::isTransactionalTable)
+ .map(Table::getFullyQualifiedName)
.collect(Collectors.toList());
if (transactionalTables.size() > 0) {
try {
@@ -15317,7 +14994,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
String queryString = getQueryStringForCache(astNode);
if (queryString != null) {
ValidTxnWriteIdList writeIdList = getQueryValidTxnWriteIdList();
- lookupInfo = new QueryResultsCache.LookupInfo(queryString, () -> { return writeIdList; });
+ lookupInfo = new QueryResultsCache.LookupInfo(queryString, () -> writeIdList);
}
return lookupInfo;
}
@@ -15387,11 +15064,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
// HIVE-19096 - disable for explain analyze
- if (ctx.getExplainAnalyze() != null) {
- return false;
- }
-
- return true;
+ return ctx.getExplainAnalyze() == null;
}
private boolean needsTransform() {
@@ -15445,10 +15118,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return true;
}
- private static Set<Table> tablesFromReadEntities(Set<ReadEntity> readEntities) {
+ private Set<Table> tablesFromReadEntities(Set<ReadEntity> readEntities) {
return readEntities.stream()
.filter(entity -> entity.getType() == Entity.Type.TABLE)
- .map(entity -> entity.getTable())
+ .map(Entity::getTable)
.collect(Collectors.toSet());
}
@@ -15576,7 +15249,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
}
- protected static IllegalArgumentException raiseWrongType(String expectedTokName, ASTNode n) {
+ static IllegalArgumentException raiseWrongType(String expectedTokName, ASTNode n) {
return new IllegalArgumentException("Expected " + expectedTokName + "; got " + n.getType());
}