You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/08/13 02:25:34 UTC
svn commit: r1617633 [3/6] - in /hive/branches/spark: ./ ant/
ant/src/org/apache/hadoop/hive/ant/ beeline/
beeline/src/java/org/apache/hive/beeline/ beeline/src/main/resources/
cli/src/java/org/apache/hadoop/hive/cli/
common/src/java/org/apache/hadoop/...
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java?rev=1617633&r1=1617632&r2=1617633&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java Wed Aug 13 00:25:32 2014
@@ -37,6 +37,7 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.ql.exec.Operator;
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
+import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.index.AggregateIndexHandler;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -48,6 +49,7 @@ import org.apache.hadoop.hive.ql.parse.O
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.OperatorDesc;
+import org.apache.hadoop.util.StringUtils;
/**
@@ -106,11 +108,6 @@ public class RewriteGBUsingIndex impleme
private final Map<String, RewriteCanApplyCtx> tsOpToProcess =
new LinkedHashMap<String, RewriteCanApplyCtx>();
- //Name of the current table on which rewrite is being performed
- private String baseTableName = null;
- //Name of the current index which is used for rewrite
- private String indexTableName = null;
-
//Index Validation Variables
private static final String IDX_BUCKET_COL = "_bucketname";
private static final String IDX_OFFSETS_ARRAY_COL = "_offsets";
@@ -133,7 +130,7 @@ public class RewriteGBUsingIndex impleme
/* Check if the input query passes all the tests to be eligible for a rewrite
* If yes, rewrite original query; else, return the current parseContext
*/
- if(shouldApplyOptimization()){
+ if (shouldApplyOptimization()) {
LOG.info("Rewriting Original Query using " + getName() + " optimization.");
rewriteOriginalQuery();
}
@@ -155,59 +152,52 @@ public class RewriteGBUsingIndex impleme
* @return
* @throws SemanticException
*/
- boolean shouldApplyOptimization() throws SemanticException{
- boolean canApply = false;
- if(ifQueryHasMultipleTables()){
+ boolean shouldApplyOptimization() throws SemanticException {
+ if (ifQueryHasMultipleTables()) {
//We do not apply this optimization for this case as of now.
return false;
- }else{
+ }
+ Map<Table, List<Index>> tableToIndex = getIndexesForRewrite();
+ if (tableToIndex.isEmpty()) {
+ LOG.debug("No Valid Index Found to apply Rewrite, " +
+ "skipping " + getName() + " optimization");
+ return false;
+ }
/*
* This code iterates over each TableScanOperator from the topOps map from ParseContext.
* For each operator tree originating from this top TableScanOperator, we determine
* if the optimization can be applied. If yes, we add the name of the top table to
* the tsOpToProcess to apply rewrite later on.
* */
- Map<TableScanOperator, Table> topToTable = parseContext.getTopToTable();
- Iterator<TableScanOperator> topOpItr = topToTable.keySet().iterator();
- while(topOpItr.hasNext()){
-
- TableScanOperator topOp = topOpItr.next();
- Table table = topToTable.get(topOp);
- baseTableName = table.getTableName();
- Map<Table, List<Index>> indexes = getIndexesForRewrite();
- if(indexes == null){
- LOG.debug("Error getting valid indexes for rewrite, " +
- "skipping " + getName() + " optimization");
- return false;
- }
+ Map<TableScanOperator, Table> topToTable = parseContext.getTopToTable();
+ Map<String, Operator<?>> topOps = parseContext.getTopOps();
+
+ for (Map.Entry<String, Operator<?>> entry : parseContext.getTopOps().entrySet()) {
- if(indexes.size() == 0){
- LOG.debug("No Valid Index Found to apply Rewrite, " +
+ String alias = entry.getKey();
+ TableScanOperator topOp = (TableScanOperator) entry.getValue();
+
+ Table table = topToTable.get(topOp);
+ List<Index> indexes = tableToIndex.get(table);
+ if (indexes.isEmpty()) {
+ continue;
+ }
+
+ if (table.isPartitioned()) {
+ //if base table has partitions, we need to check if index is built for
+ //all partitions. If not, then we do not apply the optimization
+ if (!checkIfIndexBuiltOnAllTablePartitions(topOp, indexes)) {
+ LOG.debug("Index is not built for all table partitions, " +
"skipping " + getName() + " optimization");
- return false;
- }else{
- //we need to check if the base table has confirmed or unknown partitions
- if(parseContext.getOpToPartList() != null && parseContext.getOpToPartList().size() > 0){
- //if base table has partitions, we need to check if index is built for
- //all partitions. If not, then we do not apply the optimization
- if(checkIfIndexBuiltOnAllTablePartitions(topOp, indexes)){
- //check if rewrite can be applied for operator tree
- //if partitions condition returns true
- canApply = checkIfRewriteCanBeApplied(topOp, table, indexes);
- }else{
- LOG.debug("Index is not built for all table partitions, " +
- "skipping " + getName() + " optimization");
- return false;
- }
- }else{
- //check if rewrite can be applied for operator tree
- //if there are no partitions on base table
- canApply = checkIfRewriteCanBeApplied(topOp, table, indexes);
- }
+ continue;
}
}
+ //check if rewrite can be applied for operator tree
+ //if there are no partitions on base table
+ checkIfRewriteCanBeApplied(alias, topOp, table, indexes);
}
- return canApply;
+
+ return !tsOpToProcess.isEmpty();
}
/**
@@ -219,61 +209,36 @@ public class RewriteGBUsingIndex impleme
* @return - true if rewrite can be applied on the current branch; false otherwise
* @throws SemanticException
*/
- private boolean checkIfRewriteCanBeApplied(TableScanOperator topOp, Table baseTable,
- Map<Table, List<Index>> indexes) throws SemanticException{
- boolean canApply = false;
+ private boolean checkIfRewriteCanBeApplied(String alias, TableScanOperator topOp,
+ Table baseTable, List<Index> indexes) throws SemanticException{
//Context for checking if this optimization can be applied to the input query
RewriteCanApplyCtx canApplyCtx = RewriteCanApplyCtx.getInstance(parseContext);
- Map<String, Operator<? extends OperatorDesc>> topOps = parseContext.getTopOps();
- canApplyCtx.setBaseTableName(baseTableName);
+ canApplyCtx.setAlias(alias);
+ canApplyCtx.setBaseTableName(baseTable.getTableName());
canApplyCtx.populateRewriteVars(topOp);
- Map<Index, Set<String>> indexTableMap = getIndexToKeysMap(indexes.get(baseTable));
- Iterator<Index> indexMapItr = indexTableMap.keySet().iterator();
- Index index = null;
- while(indexMapItr.hasNext()){
+ Map<Index, Set<String>> indexTableMap = getIndexToKeysMap(indexes);
+ for (Map.Entry<Index, Set<String>> entry : indexTableMap.entrySet()) {
//we rewrite the original query using the first valid index encountered
//this can be changed if we have a better mechanism to
//decide which index will produce a better rewrite
- index = indexMapItr.next();
- canApply = canApplyCtx.isIndexUsableForQueryBranchRewrite(index,
- indexTableMap.get(index));
- if(canApply){
- canApply = checkIfAllRewriteCriteriaIsMet(canApplyCtx);
- //break here if any valid index is found to apply rewrite
- if(canApply){
- //check if aggregation function is set.
- //If not, set it using the only indexed column
- if(canApplyCtx.getAggFunction() == null){
- //strip of the start and end braces [...]
- String aggregationFunction = indexTableMap.get(index).toString();
- aggregationFunction = aggregationFunction.substring(1,
- aggregationFunction.length() - 1);
- canApplyCtx.setAggFunction("_count_of_" + aggregationFunction + "");
- }
+ Index index = entry.getKey();
+ Set<String> indexKeyNames = entry.getValue();
+ //break here if any valid index is found to apply rewrite
+ if (canApplyCtx.isIndexUsableForQueryBranchRewrite(index, indexKeyNames) &&
+ checkIfAllRewriteCriteriaIsMet(canApplyCtx)) {
+ //check if aggregation function is set.
+ //If not, set it using the only indexed column
+ if (canApplyCtx.getAggFunction() == null) {
+ canApplyCtx.setAggFunction("_count_of_" + StringUtils.join(",", indexKeyNames) + "");
}
- break;
+ canApplyCtx.setIndexTableName(index.getIndexTableName());
+ tsOpToProcess.put(alias, canApplyCtx);
+ return true;
}
}
- indexTableName = index.getIndexTableName();
-
- if(canApply && topOps.containsValue(topOp)) {
- Iterator<String> topOpNamesItr = topOps.keySet().iterator();
- while(topOpNamesItr.hasNext()){
- String topOpName = topOpNamesItr.next();
- if(topOps.get(topOpName).equals(topOp)){
- tsOpToProcess.put(topOpName, canApplyCtx);
- }
- }
- }
-
- if(tsOpToProcess.size() == 0){
- canApply = false;
- }else{
- canApply = true;
- }
- return canApply;
+ return false;
}
/**
@@ -329,7 +294,7 @@ public class RewriteGBUsingIndex impleme
* @throws SemanticException
*/
private boolean checkIfIndexBuiltOnAllTablePartitions(TableScanOperator tableScan,
- Map<Table, List<Index>> indexes) throws SemanticException{
+ List<Index> indexes) throws SemanticException {
// check if we have indexes on all partitions in this table scan
Set<Partition> queryPartitions;
try {
@@ -341,7 +306,7 @@ public class RewriteGBUsingIndex impleme
LOG.error("Fatal Error: problem accessing metastore", e);
throw new SemanticException(e);
}
- if(queryPartitions.size() != 0){
+ if (queryPartitions.size() != 0) {
return true;
}
return false;
@@ -355,12 +320,11 @@ public class RewriteGBUsingIndex impleme
* @throws SemanticException
*/
Map<Index, Set<String>> getIndexToKeysMap(List<Index> indexTables) throws SemanticException{
- Index index = null;
Hive hiveInstance = hiveDb;
Map<Index, Set<String>> indexToKeysMap = new LinkedHashMap<Index, Set<String>>();
for (int idxCtr = 0; idxCtr < indexTables.size(); idxCtr++) {
final Set<String> indexKeyNames = new LinkedHashSet<String>();
- index = indexTables.get(idxCtr);
+ Index index = indexTables.get(idxCtr);
//Getting index key columns
StorageDescriptor sd = index.getSd();
List<FieldSchema> idxColList = sd.getCols();
@@ -373,8 +337,9 @@ public class RewriteGBUsingIndex impleme
// index is changed.
List<String> idxTblColNames = new ArrayList<String>();
try {
- Table idxTbl = hiveInstance.getTable(index.getDbName(),
+ String[] qualified = Utilities.getDbTableName(index.getDbName(),
index.getIndexTableName());
+ Table idxTbl = hiveInstance.getTable(qualified[0], qualified[1]);
for (FieldSchema idxTblCol : idxTbl.getCols()) {
idxTblColNames.add(idxTblCol.getName());
}
@@ -403,17 +368,17 @@ public class RewriteGBUsingIndex impleme
*/
@SuppressWarnings("unchecked")
private void rewriteOriginalQuery() throws SemanticException {
- Map<String, Operator<? extends OperatorDesc>> topOpMap =
- (HashMap<String, Operator<? extends OperatorDesc>>) parseContext.getTopOps().clone();
+ Map<String, Operator<?>> topOpMap = parseContext.getTopOps();
Iterator<String> tsOpItr = tsOpToProcess.keySet().iterator();
- while(tsOpItr.hasNext()){
- baseTableName = tsOpItr.next();
- RewriteCanApplyCtx canApplyCtx = tsOpToProcess.get(baseTableName);
- TableScanOperator topOp = (TableScanOperator) topOpMap.get(baseTableName);
+ for (Map.Entry<String, RewriteCanApplyCtx> entry : tsOpToProcess.entrySet()) {
+ String alias = entry.getKey();
+ RewriteCanApplyCtx canApplyCtx = entry.getValue();
+ TableScanOperator topOp = (TableScanOperator) topOpMap.get(alias);
RewriteQueryUsingAggregateIndexCtx rewriteQueryCtx =
RewriteQueryUsingAggregateIndexCtx.getInstance(parseContext, hiveDb,
- indexTableName, baseTableName, canApplyCtx.getAggFunction());
+ canApplyCtx.getIndexTableName(), canApplyCtx.getAlias(),
+ canApplyCtx.getAllColumns(), canApplyCtx.getAggFunction());
rewriteQueryCtx.invokeRewriteQueryProc(topOp);
parseContext = rewriteQueryCtx.getParseContext();
parseContext.setOpParseCtx((LinkedHashMap<Operator<? extends OperatorDesc>,
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndex.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndex.java?rev=1617633&r1=1617632&r2=1617633&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndex.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndex.java Wed Aug 13 00:25:32 2014
@@ -42,6 +42,7 @@ import org.apache.hadoop.hive.ql.lib.Nod
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.optimizer.ColumnPrunerProcFactory;
import org.apache.hadoop.hive.ql.parse.OpParseContext;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.RowResolver;
@@ -68,7 +69,6 @@ import org.apache.hadoop.hive.serde2.typ
*/
public final class RewriteQueryUsingAggregateIndex {
private static final Log LOG = LogFactory.getLog(RewriteQueryUsingAggregateIndex.class.getName());
- private static RewriteQueryUsingAggregateIndexCtx rewriteQueryCtx = null;
private RewriteQueryUsingAggregateIndex() {
//this prevents the class from getting instantiated
@@ -78,7 +78,7 @@ public final class RewriteQueryUsingAggr
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx ctx,
Object... nodeOutputs) throws SemanticException {
SelectOperator operator = (SelectOperator)nd;
- rewriteQueryCtx = (RewriteQueryUsingAggregateIndexCtx)ctx;
+ RewriteQueryUsingAggregateIndexCtx rewriteQueryCtx = (RewriteQueryUsingAggregateIndexCtx)ctx;
List<Operator<? extends OperatorDesc>> childOps = operator.getChildOperators();
Operator<? extends OperatorDesc> childOp = childOps.iterator().next();
@@ -98,7 +98,7 @@ public final class RewriteQueryUsingAggr
List<ColumnInfo> selRSSignature =
selRS.getSignature();
//Need to create a new type for Column[_count_of_indexed_key_column] node
- PrimitiveTypeInfo pti = (PrimitiveTypeInfo) TypeInfoFactory.getPrimitiveTypeInfo("bigint");
+ PrimitiveTypeInfo pti = TypeInfoFactory.getPrimitiveTypeInfo("bigint");
pti.setTypeName("bigint");
ColumnInfo newCI = new ColumnInfo(rewriteQueryCtx.getAggregateFunction(), pti, "", false);
selRSSignature.add(newCI);
@@ -117,19 +117,15 @@ public final class RewriteQueryUsingAggr
/**
* This processor replaces the original TableScanOperator with
* the new TableScanOperator and metadata that scans over the
- * index table rather than scanning over the orginal table.
+ * index table rather than scanning over the original table.
*
*/
private static class ReplaceTableScanOpProc implements NodeProcessor {
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx ctx,
Object... nodeOutputs) throws SemanticException {
TableScanOperator scanOperator = (TableScanOperator)nd;
- rewriteQueryCtx = (RewriteQueryUsingAggregateIndexCtx)ctx;
- String baseTableName = rewriteQueryCtx.getBaseTableName();
- String alias = null;
- if(baseTableName.contains(":")){
- alias = (baseTableName.split(":"))[0];
- }
+ RewriteQueryUsingAggregateIndexCtx rewriteQueryCtx = (RewriteQueryUsingAggregateIndexCtx)ctx;
+ String alias = rewriteQueryCtx.getAlias();
//Need to remove the original TableScanOperators from these data structures
// and add new ones
@@ -144,8 +140,8 @@ public final class RewriteQueryUsingAggr
OpParseContext operatorContext = opParseContext.get(scanOperator);
//remove original TableScanOperator
+ topOps.remove(alias);
topToTable.remove(scanOperator);
- topOps.remove(baseTableName);
opParseContext.remove(scanOperator);
//construct a new descriptor for the index table scan
@@ -171,13 +167,11 @@ public final class RewriteQueryUsingAggr
try {
StructObjectInspector rowObjectInspector =
(StructObjectInspector) indexTableHandle.getDeserializer().getObjectInspector();
- List<? extends StructField> fields = rowObjectInspector
- .getAllStructFieldRefs();
- for (int i = 0; i < fields.size(); i++) {
- rr.put(indexTableName, fields.get(i).getFieldName(), new ColumnInfo(fields
- .get(i).getFieldName(), TypeInfoUtils
- .getTypeInfoFromObjectInspector(fields.get(i)
- .getFieldObjectInspector()), indexTableName, false));
+ for (String column : rewriteQueryCtx.getColumns()) {
+ StructField field = rowObjectInspector.getStructFieldRef(column);
+ rr.put(indexTableName, field.getFieldName(), new ColumnInfo(field.getFieldName(),
+ TypeInfoUtils.getTypeInfoFromObjectInspector(field.getFieldObjectInspector()),
+ indexTableName, false));
}
} catch (SerDeException e) {
LOG.error("Error while creating the RowResolver for new TableScanOperator.");
@@ -187,18 +181,18 @@ public final class RewriteQueryUsingAggr
//Set row resolver for new table
operatorContext.setRowResolver(rr);
- String tabNameWithAlias = null;
- if(alias != null){
- tabNameWithAlias = alias + ":" + indexTableName;
- }else{
- tabNameWithAlias = indexTableName;
- }
+
+ String newAlias = indexTableName;
+ int index = alias.lastIndexOf(":");
+ if (index >= 0) {
+ newAlias = alias.substring(0, index) + ":" + indexTableName;
+ }
//Scan operator now points to other table
topToTable.put(scanOperator, indexTableHandle);
- scanOperator.getConf().setAlias(tabNameWithAlias);
+ scanOperator.getConf().setAlias(newAlias);
scanOperator.setAlias(indexTableName);
- topOps.put(tabNameWithAlias, scanOperator);
+ topOps.put(newAlias, scanOperator);
opParseContext.put(scanOperator, operatorContext);
rewriteQueryCtx.getParseContext().setTopToTable(
(HashMap<TableScanOperator, Table>) topToTable);
@@ -207,6 +201,9 @@ public final class RewriteQueryUsingAggr
rewriteQueryCtx.getParseContext().setOpParseCtx(
(LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext>) opParseContext);
+ ColumnPrunerProcFactory.setupNeededColumns(scanOperator, rr,
+ new ArrayList<String>(rewriteQueryCtx.getColumns()));
+
return null;
}
}
@@ -228,7 +225,7 @@ public final class RewriteQueryUsingAggr
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx ctx,
Object... nodeOutputs) throws SemanticException {
GroupByOperator operator = (GroupByOperator)nd;
- rewriteQueryCtx = (RewriteQueryUsingAggregateIndexCtx)ctx;
+ RewriteQueryUsingAggregateIndexCtx rewriteQueryCtx = (RewriteQueryUsingAggregateIndexCtx)ctx;
//We need to replace the GroupByOperator which is in
//groupOpToInputTables map with the new GroupByOperator
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java?rev=1617633&r1=1617632&r2=1617633&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java Wed Aug 13 00:25:32 2014
@@ -22,6 +22,7 @@ import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.Stack;
import org.apache.hadoop.hive.ql.exec.Operator;
@@ -54,19 +55,21 @@ import org.apache.hadoop.hive.ql.udf.gen
public final class RewriteQueryUsingAggregateIndexCtx implements NodeProcessorCtx {
private RewriteQueryUsingAggregateIndexCtx(ParseContext parseContext, Hive hiveDb,
- String indexTableName, String baseTableName, String aggregateFunction){
+ String indexTableName, String alias, Set<String> columns, String aggregateFunction) {
this.parseContext = parseContext;
this.hiveDb = hiveDb;
this.indexTableName = indexTableName;
- this.baseTableName = baseTableName;
+ this.alias = alias;
this.aggregateFunction = aggregateFunction;
+ this.columns = columns;
this.opc = parseContext.getOpParseCtx();
}
public static RewriteQueryUsingAggregateIndexCtx getInstance(ParseContext parseContext,
- Hive hiveDb, String indexTableName, String baseTableName, String aggregateFunction){
+ Hive hiveDb, String indexTableName, String alias,
+ Set<String> columns, String aggregateFunction) {
return new RewriteQueryUsingAggregateIndexCtx(
- parseContext, hiveDb, indexTableName, baseTableName, aggregateFunction);
+ parseContext, hiveDb, indexTableName, alias, columns, aggregateFunction);
}
@@ -77,8 +80,9 @@ public final class RewriteQueryUsingAggr
//We need the GenericUDAFEvaluator for GenericUDAF function "sum"
private GenericUDAFEvaluator eval = null;
private final String indexTableName;
- private final String baseTableName;
+ private final String alias;
private final String aggregateFunction;
+ private final Set<String> columns;
private ExprNodeColumnDesc aggrExprNode = null;
public Map<Operator<? extends OperatorDesc>, OpParseContext> getOpc() {
@@ -161,11 +165,15 @@ public final class RewriteQueryUsingAggr
};
}
- public String getBaseTableName() {
- return baseTableName;
+ public String getAlias() {
+ return alias;
}
public String getAggregateFunction() {
return aggregateFunction;
}
+
+ public Set<String> getColumns() {
+ return columns;
+ }
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanOptimizer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanOptimizer.java?rev=1617633&r1=1617632&r2=1617633&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanOptimizer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanOptimizer.java Wed Aug 13 00:25:32 2014
@@ -86,7 +86,7 @@ public class NullScanOptimizer implement
return null;
}
ExprNodeConstantDesc c = (ExprNodeConstantDesc) condition;
- if (c.getValue() != Boolean.FALSE) {
+ if (!Boolean.FALSE.equals(c.getValue())) {
return null;
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java?rev=1617633&r1=1617632&r2=1617633&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java Wed Aug 13 00:25:32 2014
@@ -149,6 +149,7 @@ public class Vectorizer implements Physi
patternBuilder.append("|float");
patternBuilder.append("|double");
patternBuilder.append("|date");
+ patternBuilder.append("|void");
// Decimal types can be specified with different precision and scales e.g. decimal(10,5),
// as opposed to other data types which can be represented by constant strings.
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcessor.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcessor.java?rev=1617633&r1=1617632&r2=1617633&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcessor.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcessor.java Wed Aug 13 00:25:32 2014
@@ -44,7 +44,6 @@ import org.apache.hadoop.hive.ql.lib.Nod
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.HiveUtils;
import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.optimizer.IndexUtils;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -64,11 +63,11 @@ import org.apache.hadoop.hive.ql.plan.Ta
public class IndexWhereProcessor implements NodeProcessor {
private static final Log LOG = LogFactory.getLog(IndexWhereProcessor.class.getName());
- private final Map<Table, List<Index>> indexes;
+ private final Map<TableScanOperator, List<Index>> tsToIndices;
- public IndexWhereProcessor(Map<Table, List<Index>> indexes) {
+ public IndexWhereProcessor(Map<TableScanOperator, List<Index>> tsToIndices) {
super();
- this.indexes = indexes;
+ this.tsToIndices = tsToIndices;
}
@Override
@@ -81,9 +80,11 @@ public class IndexWhereProcessor impleme
TableScanOperator operator = (TableScanOperator) nd;
List<Node> opChildren = operator.getChildren();
TableScanDesc operatorDesc = operator.getConf();
- if (operatorDesc == null) {
+ if (operatorDesc == null || !tsToIndices.containsKey(operator)) {
return null;
}
+ List<Index> indexes = tsToIndices.get(operator);
+
ExprNodeDesc predicate = operatorDesc.getFilterExpr();
IndexWhereProcCtx context = (IndexWhereProcCtx) procCtx;
@@ -96,7 +97,7 @@ public class IndexWhereProcessor impleme
}
LOG.info(predicate.getExprString());
- // check if we have indexes on all partitions in this table scan
+ // check if we have tsToIndices on all partitions in this table scan
Set<Partition> queryPartitions;
try {
queryPartitions = IndexUtils.checkPartitionsCoveredByIndex(operator, pctx, indexes);
@@ -118,14 +119,9 @@ public class IndexWhereProcessor impleme
Map<Index, HiveIndexQueryContext> queryContexts = new HashMap<Index, HiveIndexQueryContext>();
// make sure we have an index on the table being scanned
TableDesc tblDesc = operator.getTableDesc();
- Table srcTable = pctx.getTopToTable().get(operator);
- if (indexes == null || indexes.get(srcTable) == null) {
- return null;
- }
- List<Index> tableIndexes = indexes.get(srcTable);
Map<String, List<Index>> indexesByType = new HashMap<String, List<Index>>();
- for (Index indexOnTable : tableIndexes) {
+ for (Index indexOnTable : indexes) {
if (indexesByType.get(indexOnTable.getIndexHandlerClass()) == null) {
List<Index> newType = new ArrayList<Index>();
newType.add(indexOnTable);
@@ -135,7 +131,7 @@ public class IndexWhereProcessor impleme
}
}
- // choose index type with most indexes of the same type on the table
+ // choose index type with most tsToIndices of the same type on the table
// TODO HIVE-2130 This would be a good place for some sort of cost based choice?
List<Index> bestIndexes = indexesByType.values().iterator().next();
for (List<Index> indexTypes : indexesByType.values()) {
@@ -179,7 +175,7 @@ public class IndexWhereProcessor impleme
}
/**
- * Get a list of Tasks to activate use of indexes.
+ * Get a list of Tasks to activate use of tsToIndices.
* Generate the tasks for the index query (where we store results of
* querying the index in a tmp file) inside the IndexHandler
* @param predicate Predicate of query to rewrite
@@ -193,7 +189,7 @@ public class IndexWhereProcessor impleme
HiveIndexQueryContext queryContext)
throws SemanticException {
HiveIndexHandler indexHandler;
- // All indexes in the list are of the same type, and therefore can use the
+ // All tsToIndices in the list are of the same type, and therefore can use the
// same handler to generate the index query tasks
Index index = indexes.get(0);
try {
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java?rev=1617633&r1=1617632&r2=1617633&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java Wed Aug 13 00:25:32 2014
@@ -116,12 +116,11 @@ public class IndexWhereTaskDispatcher im
// query the metastore to know what columns we have indexed
Collection<Table> topTables = pctx.getTopToTable().values();
- Map<Table, List<Index>> indexes = new HashMap<Table, List<Index>>();
- for (Table tbl : topTables)
- {
- List<Index> tblIndexes = IndexUtils.getIndexes(tbl, supportedIndexes);
+ Map<TableScanOperator, List<Index>> indexes = new HashMap<TableScanOperator, List<Index>>();
+ for (Map.Entry<TableScanOperator, Table> entry : pctx.getTopToTable().entrySet()) {
+ List<Index> tblIndexes = IndexUtils.getIndexes(entry.getValue(), supportedIndexes);
if (tblIndexes.size() > 0) {
- indexes.put(tbl, tblIndexes);
+ indexes.put(entry.getKey(), tblIndexes);
}
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java?rev=1617633&r1=1617632&r2=1617633&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java Wed Aug 13 00:25:32 2014
@@ -132,7 +132,7 @@ public class PartitionPruner implements
* condition.
*/
public static PrunedPartitionList prune(TableScanOperator ts, ParseContext parseCtx,
- String alias) throws HiveException {
+ String alias) throws SemanticException {
return prune(parseCtx.getTopToTable().get(ts), parseCtx.getOpToPartPruner().get(ts),
parseCtx.getConf(), alias, parseCtx.getPrunedPartitions());
}
@@ -157,7 +157,7 @@ public class PartitionPruner implements
*/
private static PrunedPartitionList prune(Table tab, ExprNodeDesc prunerExpr,
HiveConf conf, String alias, Map<String, PrunedPartitionList> prunedPartitionsMap)
- throws HiveException {
+ throws SemanticException {
LOG.trace("Started pruning partiton");
LOG.trace("dbname = " + tab.getDbName());
LOG.trace("tabname = " + tab.getTableName());
@@ -267,7 +267,7 @@ public class PartitionPruner implements
}
private static PrunedPartitionList getPartitionsFromServer(Table tab,
- ExprNodeDesc prunerExpr, HiveConf conf, String alias) throws HiveException {
+ ExprNodeDesc prunerExpr, HiveConf conf, String alias) throws SemanticException {
try {
if (!tab.isPartitioned()) {
// If the table is not partitioned, return everything.
@@ -334,10 +334,10 @@ public class PartitionPruner implements
return new PrunedPartitionList(tab, new LinkedHashSet<Partition>(partitions),
new ArrayList<String>(referred),
hasUnknownPartitions || !isPruningByExactFilter);
- } catch (HiveException e) {
+ } catch (SemanticException e) {
throw e;
} catch (Exception e) {
- throw new HiveException(e);
+ throw new SemanticException(e);
}
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java?rev=1617633&r1=1617632&r2=1617633&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java Wed Aug 13 00:25:32 2014
@@ -18,11 +18,8 @@
package org.apache.hadoop.hive.ql.optimizer.stats.annotation;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Stack;
-
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.conf.HiveConf;
@@ -69,8 +66,10 @@ import org.apache.hadoop.hive.ql.udf.gen
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
import org.apache.hadoop.hive.serde.serdeConstants;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Stack;
public class StatsRulesProcFactory {
@@ -92,12 +91,8 @@ public class StatsRulesProcFactory {
Object... nodeOutputs) throws SemanticException {
TableScanOperator tsop = (TableScanOperator) nd;
AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx;
- PrunedPartitionList partList = null;
- try {
- partList = aspCtx.getParseContext().getPrunedPartitions(tsop.getName(), tsop);
- } catch (HiveException e1) {
- throw new SemanticException(e1);
- }
+ PrunedPartitionList partList =
+ aspCtx.getParseContext().getPrunedPartitions(tsop.getName(), tsop);
Table table = aspCtx.getParseContext().getTopToTable().get(tsop);
try {
@@ -925,8 +920,7 @@ public class StatsRulesProcFactory {
+ " #Rows of parents: " + rowCountParents.toString() + ". Denominator: " + denom);
}
- stats.setNumRows(newRowCount);
- stats.setDataSize(StatsUtils.getDataSizeFromColumnStats(newRowCount, outColStats));
+ updateStatsForJoinType(stats, newRowCount, true, jop.getConf());
jop.setStatistics(stats);
if (LOG.isDebugEnabled()) {
@@ -972,6 +966,39 @@ public class StatsRulesProcFactory {
return null;
}
+ private void updateStatsForJoinType(Statistics stats, long newNumRows,
+ boolean useColStats, JoinDesc conf) {
+ long oldRowCount = stats.getNumRows();
+ double ratio = (double) newNumRows / (double) oldRowCount;
+ stats.setNumRows(newNumRows);
+
+ if (useColStats) {
+ List<ColStatistics> colStats = stats.getColumnStats();
+ for (ColStatistics cs : colStats) {
+ long oldDV = cs.getCountDistint();
+ long newDV = oldDV;
+
+ // if ratio is greater than 1, then number of rows increases. This can happen
+ // when some operators like GROUPBY duplicates the input rows in which case
+ // number of distincts should not change. Update the distinct count only when
+ // the output number of rows is less than input number of rows.
+ if (ratio <= 1.0) {
+ newDV = (long) Math.ceil(ratio * oldDV);
+ }
+ // Assumes inner join
+ // TODO: HIVE-5579 will handle different join types
+ cs.setNumNulls(0);
+ cs.setCountDistint(newDV);
+ }
+ stats.setColumnStats(colStats);
+ long newDataSize = StatsUtils.getDataSizeFromColumnStats(newNumRows, colStats);
+ stats.setDataSize(newDataSize);
+ } else {
+ long newDataSize = (long) (ratio * stats.getDataSize());
+ stats.setDataSize(newDataSize);
+ }
+ }
+
private long computeNewRowCount(List<Long> rowCountParents, long denom) {
double factor = 0.0d;
long result = 1;
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java?rev=1617633&r1=1617632&r2=1617633&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java Wed Aug 13 00:25:32 2014
@@ -34,6 +34,7 @@ import java.util.Map.Entry;
import org.antlr.runtime.tree.CommonTree;
import org.antlr.runtime.tree.Tree;
+import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.conf.HiveConf;
@@ -305,6 +306,28 @@ public abstract class BaseSemanticAnalyz
return unescapeIdentifier(tableOrColumnNode.getText());
}
+ public static String[] getQualifiedTableName(ASTNode tabNameNode) throws SemanticException {
+ if (tabNameNode.getType() != HiveParser.TOK_TABNAME ||
+ (tabNameNode.getChildCount() != 1 && tabNameNode.getChildCount() != 2)) {
+ throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME.getMsg(tabNameNode));
+ }
+ if (tabNameNode.getChildCount() == 2) {
+ String dbName = unescapeIdentifier(tabNameNode.getChild(0).getText());
+ String tableName = unescapeIdentifier(tabNameNode.getChild(1).getText());
+ return new String[] {dbName, tableName};
+ }
+ String tableName = unescapeIdentifier(tabNameNode.getChild(0).getText());
+ return new String[]{SessionState.get().getCurrentDatabase(), tableName};
+ }
+
+ public static String getDotName(String[] qname) throws SemanticException {
+ String genericName = StringUtils.join(qname, ".");
+ if (qname.length != 2) {
+ throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, genericName);
+ }
+ return genericName;
+ }
+
/**
* Get the unqualified name from a table node.
*
@@ -817,9 +840,9 @@ public abstract class BaseSemanticAnalyz
this.columnAccessInfo = columnAccessInfo;
}
- protected HashMap<String, String> extractPartitionSpecs(Tree partspec)
+ protected LinkedHashMap<String, String> extractPartitionSpecs(Tree partspec)
throws SemanticException {
- HashMap<String, String> partSpec = new LinkedHashMap<String, String>();
+ LinkedHashMap<String, String> partSpec = new LinkedHashMap<String, String>();
for (int i = 0; i < partspec.getChildCount(); ++i) {
CommonTree partspec_val = (CommonTree) partspec.getChild(i);
String val = stripQuotes(partspec_val.getChild(1).getText());
@@ -1176,23 +1199,16 @@ public abstract class BaseSemanticAnalyz
}
}
+ protected Table getTable(String[] qualified) throws SemanticException {
+ return getTable(qualified[0], qualified[1], true);
+ }
+
protected Table getTable(String tblName) throws SemanticException {
return getTable(null, tblName, true);
}
protected Table getTable(String tblName, boolean throwException) throws SemanticException {
- String currentDb = SessionState.get().getCurrentDatabase();
- return getTable(currentDb, tblName, throwException);
- }
-
- // qnName : possibly contains database name (dot separated)
- protected Table getTableWithQN(String qnName, boolean throwException) throws SemanticException {
- int dot = qnName.indexOf('.');
- if (dot < 0) {
- String currentDb = SessionState.get().getCurrentDatabase();
- return getTable(currentDb, qnName, throwException);
- }
- return getTable(qnName.substring(0, dot), qnName.substring(dot + 1), throwException);
+ return getTable(null, tblName, throwException);
}
protected Table getTable(String database, String tblName, boolean throwException)
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessAnalyzer.java?rev=1617633&r1=1617632&r2=1617633&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessAnalyzer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessAnalyzer.java Wed Aug 13 00:25:32 2014
@@ -23,7 +23,6 @@ import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Table;
public class ColumnAccessAnalyzer {
@@ -49,13 +48,7 @@ public class ColumnAccessAnalyzer {
columnAccessInfo.add(tableName, column);
}
if (table.isPartitioned()) {
- PrunedPartitionList parts;
- try {
- parts = pGraphContext.getPrunedPartitions(table.getTableName(), op);
- } catch (HiveException e) {
- LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
- throw new SemanticException(e.getMessage(), e);
- }
+ PrunedPartitionList parts = pGraphContext.getPrunedPartitions(table.getTableName(), op);
if (parts.getReferredPartCols() != null) {
for (String partKey : parts.getReferredPartCols()) {
columnAccessInfo.add(tableName, partKey);
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessInfo.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessInfo.java?rev=1617633&r1=1617632&r2=1617633&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessInfo.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessInfo.java Wed Aug 13 00:25:32 2014
@@ -18,8 +18,11 @@
package org.apache.hadoop.hive.ql.parse;
+import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
+import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -42,7 +45,13 @@ public class ColumnAccessInfo {
tableColumns.add(col);
}
- public Map<String, Set<String>> getTableToColumnAccessMap() {
- return tableToColumnAccessMap;
+ public Map<String, List<String>> getTableToColumnAccessMap() {
+ Map<String, List<String>> mapping = new HashMap<String, List<String>>();
+ for (Map.Entry<String, Set<String>> entry : tableToColumnAccessMap.entrySet()) {
+ List<String> sortedCols = new ArrayList<String>(entry.getValue());
+ Collections.sort(sortedCols);
+ mapping.put(entry.getKey(), sortedCols);
+ }
+ return mapping;
}
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1617633&r1=1617632&r2=1617633&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Wed Aug 13 00:25:32 2014
@@ -217,7 +217,7 @@ public class DDLSemanticAnalyzer extends
}
public TablePartition(ASTNode tblPart) throws SemanticException {
- tableName = unescapeIdentifier(tblPart.getChild(0).getText());
+ tableName = getDotName((getQualifiedTableName((ASTNode) tblPart.getChild(0))));
if (tblPart.getChildCount() > 1) {
ASTNode part = (ASTNode) tblPart.getChild(1);
if (part.getToken().getType() == HiveParser.TOK_PARTSPEC) {
@@ -1015,7 +1015,7 @@ public class DDLSemanticAnalyzer extends
private void analyzeCreateIndex(ASTNode ast) throws SemanticException {
String indexName = unescapeIdentifier(ast.getChild(0).getText());
String typeName = unescapeSQLString(ast.getChild(1).getText());
- String tableName = getUnescapedName((ASTNode) ast.getChild(2));
+ String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(2));
List<String> indexedCols = getColumnNames((ASTNode) ast.getChild(3));
IndexType indexType = HiveIndex.getIndexType(typeName);
@@ -1081,8 +1081,14 @@ public class DDLSemanticAnalyzer extends
storageFormat.fillDefaultStorageFormat();
+ if (indexTableName == null) {
+ indexTableName = MetaStoreUtils.getIndexTableName(qualified[0], qualified[1], indexName);
+ indexTableName = qualified[0] + "." + indexTableName; // on same database with base table
+ } else {
+ indexTableName = getDotName(Utilities.getDbTableName(indexTableName));
+ }
- CreateIndexDesc crtIndexDesc = new CreateIndexDesc(tableName, indexName,
+ CreateIndexDesc crtIndexDesc = new CreateIndexDesc(getDotName(qualified), indexName,
indexedCols, indexTableName, deferredRebuild, storageFormat.getInputFormat(),
storageFormat.getOutputFormat(),
storageFormat.getStorageHandler(), typeName, location, idxProps, tblProps,
@@ -1116,21 +1122,20 @@ public class DDLSemanticAnalyzer extends
}
private void analyzeAlterIndexRebuild(ASTNode ast) throws SemanticException {
- String baseTableName = unescapeIdentifier(ast.getChild(0).getText());
+ String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
String indexName = unescapeIdentifier(ast.getChild(1).getText());
HashMap<String, String> partSpec = null;
Tree part = ast.getChild(2);
if (part != null) {
partSpec = extractPartitionSpecs(part);
}
- List<Task<?>> indexBuilder = getIndexBuilderMapRed(baseTableName, indexName, partSpec);
+ List<Task<?>> indexBuilder = getIndexBuilderMapRed(qualified, indexName, partSpec);
rootTasks.addAll(indexBuilder);
// Handle updating index timestamps
AlterIndexDesc alterIdxDesc = new AlterIndexDesc(AlterIndexTypes.UPDATETIMESTAMP);
alterIdxDesc.setIndexName(indexName);
- alterIdxDesc.setBaseTableName(baseTableName);
- alterIdxDesc.setDbName(SessionState.get().getCurrentDatabase());
+ alterIdxDesc.setBaseTableName(getDotName(qualified));
alterIdxDesc.setSpec(partSpec);
Task<?> tsTask = TaskFactory.get(new DDLWork(alterIdxDesc), conf);
@@ -1142,27 +1147,28 @@ public class DDLSemanticAnalyzer extends
private void analyzeAlterIndexProps(ASTNode ast)
throws SemanticException {
- String baseTableName = getUnescapedName((ASTNode) ast.getChild(0));
+ String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
String indexName = unescapeIdentifier(ast.getChild(1).getText());
HashMap<String, String> mapProp = getProps((ASTNode) (ast.getChild(2))
.getChild(0));
- AlterIndexDesc alterIdxDesc =
- new AlterIndexDesc(AlterIndexTypes.ADDPROPS);
+ AlterIndexDesc alterIdxDesc = new AlterIndexDesc(AlterIndexTypes.ADDPROPS);
alterIdxDesc.setProps(mapProp);
alterIdxDesc.setIndexName(indexName);
- alterIdxDesc.setBaseTableName(baseTableName);
- alterIdxDesc.setDbName(SessionState.get().getCurrentDatabase());
+ alterIdxDesc.setBaseTableName(getDotName(qualified));
rootTasks.add(TaskFactory.get(new DDLWork(alterIdxDesc), conf));
}
- private List<Task<?>> getIndexBuilderMapRed(String baseTableName, String indexName,
+ private List<Task<?>> getIndexBuilderMapRed(String[] names, String indexName,
HashMap<String, String> partSpec) throws SemanticException {
try {
- String dbName = SessionState.get().getCurrentDatabase();
- Index index = db.getIndex(dbName, baseTableName, indexName);
- Table indexTbl = getTable(index.getIndexTableName());
+ Index index = db.getIndex(names[0], names[1], indexName);
+ Table indexTbl = null;
+ String indexTableName = index.getIndexTableName();
+ if (indexTableName != null) {
+ indexTbl = getTable(Utilities.getDbTableName(index.getDbName(), indexTableName));
+ }
String baseTblName = index.getOrigTableName();
Table baseTbl = getTable(baseTblName);
@@ -1474,7 +1480,7 @@ public class DDLSemanticAnalyzer extends
boolean checkIndex = HiveConf.getBoolVar(conf,
HiveConf.ConfVars.HIVE_CONCATENATE_CHECK_INDEX);
if (checkIndex) {
- List<Index> indexes = db.getIndexes(tblObj.getDbName(), tableName,
+ List<Index> indexes = db.getIndexes(tblObj.getDbName(), tblObj.getTableName(),
Short.MAX_VALUE);
if (indexes != null && indexes.size() > 0) {
throw new SemanticException("can not do merge because source table "
@@ -1633,7 +1639,7 @@ public class DDLSemanticAnalyzer extends
LinkedHashMap<String, String> newPartSpec = null;
if (partSpec != null) newPartSpec = new LinkedHashMap<String, String>(partSpec);
- AlterTableSimpleDesc desc = new AlterTableSimpleDesc(SessionState.get().getCurrentDatabase(),
+ AlterTableSimpleDesc desc = new AlterTableSimpleDesc(
tableName, newPartSpec, type);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf));
@@ -2098,25 +2104,17 @@ public class DDLSemanticAnalyzer extends
}
private void analyzeShowColumns(ASTNode ast) throws SemanticException {
- ShowColumnsDesc showColumnsDesc;
- String dbName = null;
- String tableName = null;
- switch (ast.getChildCount()) {
- case 1:
- tableName = getUnescapedName((ASTNode) ast.getChild(0));
- break;
- case 2:
- dbName = getUnescapedName((ASTNode) ast.getChild(0));
- tableName = getUnescapedName((ASTNode) ast.getChild(1));
- break;
- default:
- break;
+ String tableName = getUnescapedName((ASTNode) ast.getChild(0));
+ if (ast.getChildCount() > 1) {
+ if (tableName.contains(".")) {
+ throw new SemanticException("Duplicates declaration for database name");
+ }
+ tableName = getUnescapedName((ASTNode) ast.getChild(1)) + "." + tableName;
}
-
- Table tab = getTable(dbName, tableName, true);
+ Table tab = getTable(tableName);
inputs.add(new ReadEntity(tab));
- showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), dbName, tableName);
+ ShowColumnsDesc showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), tableName);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
showColumnsDesc), conf));
setFetchTask(createFetchTask(showColumnsDesc.getSchema()));
@@ -2157,13 +2155,13 @@ public class DDLSemanticAnalyzer extends
private void analyzeShowTableProperties(ASTNode ast) throws SemanticException {
ShowTblPropertiesDesc showTblPropertiesDesc;
- String tableNames = getUnescapedName((ASTNode) ast.getChild(0));
- String dbName = SessionState.get().getCurrentDatabase();
+ String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
String propertyName = null;
if (ast.getChildCount() > 1) {
propertyName = unescapeSQLString(ast.getChild(1).getText());
}
+ String tableNames = getDotName(qualified);
validateTable(tableNames, null);
showTblPropertiesDesc = new ShowTblPropertiesDesc(ctx.getResFile().toString(), tableNames,
@@ -2437,17 +2435,20 @@ public class DDLSemanticAnalyzer extends
private void analyzeAlterTableRename(ASTNode ast, boolean expectView) throws SemanticException {
- String tblName = getUnescapedName((ASTNode) ast.getChild(0));
- AlterTableDesc alterTblDesc = new AlterTableDesc(tblName,
- getUnescapedName((ASTNode) ast.getChild(1)), expectView);
+ String[] source = getQualifiedTableName((ASTNode) ast.getChild(0));
+ String[] target = getQualifiedTableName((ASTNode) ast.getChild(1));
- addInputsOutputsAlterTable(tblName, null, alterTblDesc);
+ String sourceName = getDotName(source);
+ String targetName = getDotName(target);
+
+ AlterTableDesc alterTblDesc = new AlterTableDesc(sourceName, targetName, expectView);
+ addInputsOutputsAlterTable(sourceName, null, alterTblDesc);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
alterTblDesc), conf));
}
private void analyzeAlterTableRenameCol(ASTNode ast) throws SemanticException {
- String tblName = getUnescapedName((ASTNode) ast.getChild(0));
+ String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
String newComment = null;
String newType = null;
newType = getTypeStringFromAST((ASTNode) ast.getChild(3));
@@ -2477,7 +2478,7 @@ public class DDLSemanticAnalyzer extends
String newColName = ast.getChild(2).getText();
/* Validate the operation of renaming a column name. */
- Table tab = getTable(tblName);
+ Table tab = getTable(qualified);
SkewedInfo skewInfo = tab.getTTable().getSd().getSkewedInfo();
if ((null != skewInfo)
@@ -2487,6 +2488,7 @@ public class DDLSemanticAnalyzer extends
+ ErrorMsg.ALTER_TABLE_NOT_ALLOWED_RENAME_SKEWED_COLUMN.getMsg());
}
+ String tblName = getDotName(qualified);
AlterTableDesc alterTblDesc = new AlterTableDesc(tblName,
unescapeIdentifier(oldColName), unescapeIdentifier(newColName),
newType, newComment, first, flagCol);
@@ -2511,9 +2513,8 @@ public class DDLSemanticAnalyzer extends
List<Map<String, String>> partSpecs = new ArrayList<Map<String, String>>();
partSpecs.add(oldPartSpec);
partSpecs.add(newPartSpec);
- addTablePartsOutputs(tblName, partSpecs, WriteEntity.WriteType.DDL_EXCLUSIVE);
- RenamePartitionDesc renamePartitionDesc = new RenamePartitionDesc(
- SessionState.get().getCurrentDatabase(), tblName, oldPartSpec, newPartSpec);
+ addTablePartsOutputs(tab, partSpecs, WriteEntity.WriteType.DDL_EXCLUSIVE);
+ RenamePartitionDesc renamePartitionDesc = new RenamePartitionDesc(tblName, oldPartSpec, newPartSpec);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
renamePartitionDesc), conf));
}
@@ -2536,7 +2537,9 @@ public class DDLSemanticAnalyzer extends
private void analyzeAlterTableModifyCols(ASTNode ast,
AlterTableTypes alterType) throws SemanticException {
- String tblName = getUnescapedName((ASTNode) ast.getChild(0));
+ String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
+
+ String tblName = getDotName(qualified);
List<FieldSchema> newCols = getColumns((ASTNode) ast.getChild(1));
AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, newCols,
alterType);
@@ -2559,8 +2562,8 @@ public class DDLSemanticAnalyzer extends
// popular case but that's kinda hacky. Let's not do it for now.
boolean canGroupExprs = ifExists;
- String tblName = getUnescapedName((ASTNode) ast.getChild(0));
- Table tab = getTable(tblName, true);
+ String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
+ Table tab = getTable(qualified);
Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs =
getFullPartitionSpecs(ast, tab, canGroupExprs);
if (partSpecs.isEmpty()) return; // nothing to do
@@ -2574,24 +2577,19 @@ public class DDLSemanticAnalyzer extends
addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists, ignoreProtection);
DropTableDesc dropTblDesc =
- new DropTableDesc(tblName, partSpecs, expectView, ignoreProtection);
+ new DropTableDesc(getDotName(qualified), partSpecs, expectView, ignoreProtection);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc), conf));
}
private void analyzeAlterTablePartColType(ASTNode ast)
throws SemanticException {
// get table name
- String tblName = getUnescapedName((ASTNode)ast.getChild(0));
+ String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
- Table tab = null;
// check if table exists.
- try {
- tab = getTable(tblName, true);
- inputs.add(new ReadEntity(tab));
- } catch (HiveException e) {
- throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName));
- }
+ Table tab = getTable(qualified);
+ inputs.add(new ReadEntity(tab));
// validate the DDL is a valid operation on the table.
validateAlterTableType(tab, AlterTableTypes.ALTERPARTITION, false);
@@ -2625,7 +2623,7 @@ public class DDLSemanticAnalyzer extends
}
AlterTableAlterPartDesc alterTblAlterPartDesc =
- new AlterTableAlterPartDesc(SessionState.get().getCurrentDatabase(), tblName, newCol);
+ new AlterTableAlterPartDesc(getDotName(qualified), newCol);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
alterTblAlterPartDesc), conf));
@@ -2648,10 +2646,10 @@ public class DDLSemanticAnalyzer extends
throws SemanticException {
// ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists? alterStatementSuffixAddPartitionsElement+)
- String tblName = getUnescapedName((ASTNode)ast.getChild(0));
+ String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
boolean ifNotExists = ast.getChild(1).getType() == HiveParser.TOK_IFNOTEXISTS;
- Table tab = getTable(tblName, true);
+ Table tab = getTable(qualified);
boolean isView = tab.isView();
validateAlterTableType(tab, AlterTableTypes.ADDPARTITION, expectView);
outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_SHARED));
@@ -2662,7 +2660,8 @@ public class DDLSemanticAnalyzer extends
String currentLocation = null;
Map<String, String> currentPart = null;
// Parser has done some verification, so the order of tokens doesn't need to be verified here.
- AddPartitionDesc addPartitionDesc = new AddPartitionDesc(tab.getDbName(), tblName, ifNotExists);
+ AddPartitionDesc addPartitionDesc =
+ new AddPartitionDesc(tab.getDbName(), tab.getTableName(), ifNotExists);
for (int num = start; num < numCh; num++) {
ASTNode child = (ASTNode) ast.getChild(num);
switch (child.getToken().getType()) {
@@ -2683,7 +2682,7 @@ public class DDLSemanticAnalyzer extends
currentLocation = unescapeSQLString(child.getChild(0).getText());
boolean isLocal = false;
try {
- // do best effor to determine if this is a local file
+ // do best effort to determine if this is a local file
String scheme = new URI(currentLocation).getScheme();
if (scheme != null) {
isLocal = FileUtils.isLocalFile(conf, currentLocation);
@@ -2714,7 +2713,7 @@ public class DDLSemanticAnalyzer extends
// Compile internal query to capture underlying table partition dependencies
StringBuilder cmd = new StringBuilder();
cmd.append("SELECT * FROM ");
- cmd.append(HiveUtils.unparseIdentifier(tblName));
+ cmd.append(HiveUtils.unparseIdentifier(getDotName(qualified)));
cmd.append(" WHERE ");
boolean firstOr = true;
for (int i = 0; i < addPartitionDesc.getPartitionCount(); ++i) {
@@ -2775,9 +2774,9 @@ public class DDLSemanticAnalyzer extends
*/
private void analyzeAlterTableTouch(CommonTree ast)
throws SemanticException {
+ String[] qualified = getQualifiedTableName((ASTNode)ast.getChild(0));
- String tblName = getUnescapedName((ASTNode)ast.getChild(0));
- Table tab = getTable(tblName, true);
+ Table tab = getTable(qualified);
validateAlterTableType(tab, AlterTableTypes.TOUCH);
inputs.add(new ReadEntity(tab));
@@ -2786,16 +2785,16 @@ public class DDLSemanticAnalyzer extends
if (partSpecs.size() == 0) {
AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc(
- SessionState.get().getCurrentDatabase(), tblName, null,
+ getDotName(qualified), null,
AlterTableDesc.AlterTableTypes.TOUCH);
outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_NO_LOCK));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
touchDesc), conf));
} else {
- addTablePartsOutputs(tblName, partSpecs, WriteEntity.WriteType.DDL_NO_LOCK);
+ addTablePartsOutputs(tab, partSpecs, WriteEntity.WriteType.DDL_NO_LOCK);
for (Map<String, String> partSpec : partSpecs) {
AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc(
- SessionState.get().getCurrentDatabase(), tblName, partSpec,
+ getDotName(qualified), partSpec,
AlterTableDesc.AlterTableTypes.TOUCH);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
touchDesc), conf));
@@ -2810,12 +2809,12 @@ public class DDLSemanticAnalyzer extends
throw new SemanticException(ErrorMsg.ARCHIVE_METHODS_DISABLED.getMsg());
}
- String tblName = getUnescapedName((ASTNode) ast.getChild(0));
+ String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
// partition name to value
List<Map<String, String>> partSpecs = getPartitionSpecs(ast);
- Table tab = getTable(tblName, true);
- addTablePartsOutputs(tblName, partSpecs, true, WriteEntity.WriteType.DDL_NO_LOCK);
+ Table tab = getTable(qualified);
+ addTablePartsOutputs(tab, partSpecs, true, WriteEntity.WriteType.DDL_NO_LOCK);
validateAlterTableType(tab, AlterTableTypes.ARCHIVE);
inputs.add(new ReadEntity(tab));
@@ -2835,7 +2834,7 @@ public class DDLSemanticAnalyzer extends
throw new SemanticException(e.getMessage(), e);
}
AlterTableSimpleDesc archiveDesc = new AlterTableSimpleDesc(
- SessionState.get().getCurrentDatabase(), tblName, partSpec,
+ getDotName(qualified), partSpec,
(isUnArchive ? AlterTableTypes.UNARCHIVE : AlterTableTypes.ARCHIVE));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
archiveDesc), conf));
@@ -3003,20 +3002,20 @@ public class DDLSemanticAnalyzer extends
* Add the table partitions to be modified in the output, so that it is available for the
* pre-execution hook. If the partition does not exist, no error is thrown.
*/
- private void addTablePartsOutputs(String tblName, List<Map<String, String>> partSpecs,
+ private void addTablePartsOutputs(Table table, List<Map<String, String>> partSpecs,
WriteEntity.WriteType writeType)
throws SemanticException {
- addTablePartsOutputs(tblName, partSpecs, false, false, null, writeType);
+ addTablePartsOutputs(table, partSpecs, false, false, null, writeType);
}
/**
* Add the table partitions to be modified in the output, so that it is available for the
* pre-execution hook. If the partition does not exist, no error is thrown.
*/
- private void addTablePartsOutputs(String tblName, List<Map<String, String>> partSpecs,
+ private void addTablePartsOutputs(Table table, List<Map<String, String>> partSpecs,
boolean allowMany, WriteEntity.WriteType writeType)
throws SemanticException {
- addTablePartsOutputs(tblName, partSpecs, false, allowMany, null, writeType);
+ addTablePartsOutputs(table, partSpecs, false, allowMany, null, writeType);
}
/**
@@ -3024,10 +3023,9 @@ public class DDLSemanticAnalyzer extends
* pre-execution hook. If the partition does not exist, throw an error if
* throwIfNonExistent is true, otherwise ignore it.
*/
- private void addTablePartsOutputs(String tblName, List<Map<String, String>> partSpecs,
+ private void addTablePartsOutputs(Table table, List<Map<String, String>> partSpecs,
boolean throwIfNonExistent, boolean allowMany, ASTNode ast, WriteEntity.WriteType writeType)
throws SemanticException {
- Table tab = getTable(tblName);
Iterator<Map<String, String>> i;
int index;
@@ -3036,7 +3034,7 @@ public class DDLSemanticAnalyzer extends
List<Partition> parts = null;
if (allowMany) {
try {
- parts = db.getPartitions(tab, partSpec);
+ parts = db.getPartitions(table, partSpec);
} catch (HiveException e) {
LOG.error("Got HiveException during obtaining list of partitions"
+ StringUtils.stringifyException(e));
@@ -3045,7 +3043,7 @@ public class DDLSemanticAnalyzer extends
} else {
parts = new ArrayList<Partition>();
try {
- Partition p = db.getPartition(tab, partSpec, false);
+ Partition p = db.getPartition(table, partSpec, false);
if (p != null) {
parts.add(p);
}
@@ -3125,14 +3123,15 @@ public class DDLSemanticAnalyzer extends
*/
HiveConf hiveConf = SessionState.get().getConf();
- String tableName = getUnescapedName((ASTNode) ast.getChild(0));
- Table tab = getTable(tableName, true);
+ String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
+ Table tab = getTable(qualified);
inputs.add(new ReadEntity(tab));
outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE));
validateAlterTableType(tab, AlterTableTypes.ADDSKEWEDBY);
+ String tableName = getDotName(qualified);
if (ast.getChildCount() == 1) {
/* Convert a skewed table to non-skewed table. */
AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, true,
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g?rev=1617633&r1=1617632&r2=1617633&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g Wed Aug 13 00:25:32 2014
@@ -953,8 +953,8 @@ alterTableStatementSuffix
alterStatementPartitionKeyType
@init {msgs.push("alter partition key type"); }
@after {msgs.pop();}
- : identifier KW_PARTITION KW_COLUMN LPAREN columnNameType RPAREN
- -> ^(TOK_ALTERTABLE_PARTCOLTYPE identifier columnNameType)
+ : tableName KW_PARTITION KW_COLUMN LPAREN columnNameType RPAREN
+ -> ^(TOK_ALTERTABLE_PARTCOLTYPE tableName columnNameType)
;
alterViewStatementSuffix
@@ -974,16 +974,14 @@ alterViewStatementSuffix
alterIndexStatementSuffix
@init { pushMsg("alter index statement", state); }
@after { popMsg(state); }
- : indexName=identifier
- (KW_ON tableNameId=identifier)
- partitionSpec?
+ : indexName=identifier KW_ON tableName partitionSpec?
(
KW_REBUILD
- ->^(TOK_ALTERINDEX_REBUILD $tableNameId $indexName partitionSpec?)
+ ->^(TOK_ALTERINDEX_REBUILD tableName $indexName partitionSpec?)
|
KW_SET KW_IDXPROPERTIES
indexProperties
- ->^(TOK_ALTERINDEX_PROPERTIES $tableNameId $indexName indexProperties)
+ ->^(TOK_ALTERINDEX_PROPERTIES tableName $indexName indexProperties)
)
;
@@ -1011,23 +1009,23 @@ alterDatabaseSuffixSetOwner
alterStatementSuffixRename
@init { pushMsg("rename statement", state); }
@after { popMsg(state); }
- : oldName=identifier KW_RENAME KW_TO newName=identifier
+ : oldName=tableName KW_RENAME KW_TO newName=tableName
-> ^(TOK_ALTERTABLE_RENAME $oldName $newName)
;
alterStatementSuffixAddCol
@init { pushMsg("add column statement", state); }
@after { popMsg(state); }
- : identifier (add=KW_ADD | replace=KW_REPLACE) KW_COLUMNS LPAREN columnNameTypeList RPAREN
- -> {$add != null}? ^(TOK_ALTERTABLE_ADDCOLS identifier columnNameTypeList)
- -> ^(TOK_ALTERTABLE_REPLACECOLS identifier columnNameTypeList)
+ : tableName (add=KW_ADD | replace=KW_REPLACE) KW_COLUMNS LPAREN columnNameTypeList RPAREN
+ -> {$add != null}? ^(TOK_ALTERTABLE_ADDCOLS tableName columnNameTypeList)
+ -> ^(TOK_ALTERTABLE_REPLACECOLS tableName columnNameTypeList)
;
alterStatementSuffixRenameCol
@init { pushMsg("rename column name", state); }
@after { popMsg(state); }
- : identifier KW_CHANGE KW_COLUMN? oldName=identifier newName=identifier colType (KW_COMMENT comment=StringLiteral)? alterStatementChangeColPosition?
- ->^(TOK_ALTERTABLE_RENAMECOL identifier $oldName $newName colType $comment? alterStatementChangeColPosition?)
+ : tableName KW_CHANGE KW_COLUMN? oldName=identifier newName=identifier colType (KW_COMMENT comment=StringLiteral)? alterStatementChangeColPosition?
+ ->^(TOK_ALTERTABLE_RENAMECOL tableName $oldName $newName colType $comment? alterStatementChangeColPosition?)
;
alterStatementChangeColPosition
@@ -1039,8 +1037,8 @@ alterStatementChangeColPosition
alterStatementSuffixAddPartitions
@init { pushMsg("add partition statement", state); }
@after { popMsg(state); }
- : identifier KW_ADD ifNotExists? alterStatementSuffixAddPartitionsElement+
- -> ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists? alterStatementSuffixAddPartitionsElement+)
+ : tableName KW_ADD ifNotExists? alterStatementSuffixAddPartitionsElement+
+ -> ^(TOK_ALTERTABLE_ADDPARTS tableName ifNotExists? alterStatementSuffixAddPartitionsElement+)
;
alterStatementSuffixAddPartitionsElement
@@ -1050,22 +1048,22 @@ alterStatementSuffixAddPartitionsElement
alterStatementSuffixTouch
@init { pushMsg("touch statement", state); }
@after { popMsg(state); }
- : identifier KW_TOUCH (partitionSpec)*
- -> ^(TOK_ALTERTABLE_TOUCH identifier (partitionSpec)*)
+ : tableName KW_TOUCH (partitionSpec)*
+ -> ^(TOK_ALTERTABLE_TOUCH tableName (partitionSpec)*)
;
alterStatementSuffixArchive
@init { pushMsg("archive statement", state); }
@after { popMsg(state); }
- : identifier KW_ARCHIVE (partitionSpec)*
- -> ^(TOK_ALTERTABLE_ARCHIVE identifier (partitionSpec)*)
+ : tableName KW_ARCHIVE (partitionSpec)*
+ -> ^(TOK_ALTERTABLE_ARCHIVE tableName (partitionSpec)*)
;
alterStatementSuffixUnArchive
@init { pushMsg("unarchive statement", state); }
@after { popMsg(state); }
- : identifier KW_UNARCHIVE (partitionSpec)*
- -> ^(TOK_ALTERTABLE_UNARCHIVE identifier (partitionSpec)*)
+ : tableName KW_UNARCHIVE (partitionSpec)*
+ -> ^(TOK_ALTERTABLE_UNARCHIVE tableName (partitionSpec)*)
;
partitionLocation
@@ -1078,26 +1076,26 @@ partitionLocation
alterStatementSuffixDropPartitions
@init { pushMsg("drop partition statement", state); }
@after { popMsg(state); }
- : identifier KW_DROP ifExists? dropPartitionSpec (COMMA dropPartitionSpec)* ignoreProtection?
- -> ^(TOK_ALTERTABLE_DROPPARTS identifier dropPartitionSpec+ ifExists? ignoreProtection?)
+ : tableName KW_DROP ifExists? dropPartitionSpec (COMMA dropPartitionSpec)* ignoreProtection?
+ -> ^(TOK_ALTERTABLE_DROPPARTS tableName dropPartitionSpec+ ifExists? ignoreProtection?)
;
alterStatementSuffixProperties
@init { pushMsg("alter properties statement", state); }
@after { popMsg(state); }
- : name=identifier KW_SET KW_TBLPROPERTIES tableProperties
- -> ^(TOK_ALTERTABLE_PROPERTIES $name tableProperties)
- | name=identifier KW_UNSET KW_TBLPROPERTIES ifExists? tableProperties
- -> ^(TOK_DROPTABLE_PROPERTIES $name tableProperties ifExists?)
+ : tableName KW_SET KW_TBLPROPERTIES tableProperties
+ -> ^(TOK_ALTERTABLE_PROPERTIES tableName tableProperties)
+ | tableName KW_UNSET KW_TBLPROPERTIES ifExists? tableProperties
+ -> ^(TOK_DROPTABLE_PROPERTIES tableName tableProperties ifExists?)
;
alterViewSuffixProperties
@init { pushMsg("alter view properties statement", state); }
@after { popMsg(state); }
- : name=identifier KW_SET KW_TBLPROPERTIES tableProperties
- -> ^(TOK_ALTERVIEW_PROPERTIES $name tableProperties)
- | name=identifier KW_UNSET KW_TBLPROPERTIES ifExists? tableProperties
- -> ^(TOK_DROPVIEW_PROPERTIES $name tableProperties ifExists?)
+ : tableName KW_SET KW_TBLPROPERTIES tableProperties
+ -> ^(TOK_ALTERVIEW_PROPERTIES tableName tableProperties)
+ | tableName KW_UNSET KW_TBLPROPERTIES ifExists? tableProperties
+ -> ^(TOK_DROPVIEW_PROPERTIES tableName tableProperties ifExists?)
;
alterStatementSuffixSerdeProperties
@@ -1112,8 +1110,8 @@ alterStatementSuffixSerdeProperties
tablePartitionPrefix
@init {pushMsg("table partition prefix", state);}
@after {popMsg(state);}
- :name=identifier partitionSpec?
- ->^(TOK_TABLE_PARTITION $name partitionSpec?)
+ : tableName partitionSpec?
+ ->^(TOK_TABLE_PARTITION tableName partitionSpec?)
;
alterTblPartitionStatement
@@ -1192,21 +1190,21 @@ alterStatementSuffixLocation
alterStatementSuffixSkewedby
@init {pushMsg("alter skewed by statement", state);}
@after{popMsg(state);}
- :name=identifier tableSkewed
- ->^(TOK_ALTERTABLE_SKEWED $name tableSkewed)
+ : tableName tableSkewed
+ ->^(TOK_ALTERTABLE_SKEWED tableName tableSkewed)
|
- name=identifier KW_NOT KW_SKEWED
- ->^(TOK_ALTERTABLE_SKEWED $name)
+ tableName KW_NOT KW_SKEWED
+ ->^(TOK_ALTERTABLE_SKEWED tableName)
|
- name=identifier KW_NOT storedAsDirs
- ->^(TOK_ALTERTABLE_SKEWED $name storedAsDirs)
+ tableName KW_NOT storedAsDirs
+ ->^(TOK_ALTERTABLE_SKEWED tableName storedAsDirs)
;
alterStatementSuffixExchangePartition
@init {pushMsg("alter exchange partition", state);}
@after{popMsg(state);}
- : name=tableName KW_EXCHANGE partitionSpec KW_WITH KW_TABLE exchangename=tableName
- -> ^(TOK_EXCHANGEPARTITION $name partitionSpec $exchangename)
+ : tableName KW_EXCHANGE partitionSpec KW_WITH KW_TABLE exchangename=tableName
+ -> ^(TOK_EXCHANGEPARTITION tableName partitionSpec $exchangename)
;
alterStatementSuffixProtectMode
@@ -1315,14 +1313,14 @@ showStatement
@after { popMsg(state); }
: KW_SHOW (KW_DATABASES|KW_SCHEMAS) (KW_LIKE showStmtIdentifier)? -> ^(TOK_SHOWDATABASES showStmtIdentifier?)
| KW_SHOW KW_TABLES ((KW_FROM|KW_IN) db_name=identifier)? (KW_LIKE showStmtIdentifier|showStmtIdentifier)? -> ^(TOK_SHOWTABLES (TOK_FROM $db_name)? showStmtIdentifier?)
- | KW_SHOW KW_COLUMNS (KW_FROM|KW_IN) tabname=tableName ((KW_FROM|KW_IN) db_name=identifier)?
- -> ^(TOK_SHOWCOLUMNS $db_name? $tabname)
+ | KW_SHOW KW_COLUMNS (KW_FROM|KW_IN) tableName ((KW_FROM|KW_IN) db_name=identifier)?
+ -> ^(TOK_SHOWCOLUMNS tableName $db_name?)
| KW_SHOW KW_FUNCTIONS showFunctionIdentifier? -> ^(TOK_SHOWFUNCTIONS showFunctionIdentifier?)
| KW_SHOW KW_PARTITIONS tabName=tableName partitionSpec? -> ^(TOK_SHOWPARTITIONS $tabName partitionSpec?)
| KW_SHOW KW_CREATE KW_TABLE tabName=tableName -> ^(TOK_SHOW_CREATETABLE $tabName)
| KW_SHOW KW_TABLE KW_EXTENDED ((KW_FROM|KW_IN) db_name=identifier)? KW_LIKE showStmtIdentifier partitionSpec?
-> ^(TOK_SHOW_TABLESTATUS showStmtIdentifier $db_name? partitionSpec?)
- | KW_SHOW KW_TBLPROPERTIES tblName=identifier (LPAREN prptyName=StringLiteral RPAREN)? -> ^(TOK_SHOW_TBLPROPERTIES $tblName $prptyName?)
+ | KW_SHOW KW_TBLPROPERTIES tableName (LPAREN prptyName=StringLiteral RPAREN)? -> ^(TOK_SHOW_TBLPROPERTIES tableName $prptyName?)
| KW_SHOW KW_LOCKS (parttype=partTypeExpr)? (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWLOCKS $parttype? $isExtended?)
| KW_SHOW KW_LOCKS (KW_DATABASE|KW_SCHEMA) (dbName=Identifier) (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWDBLOCKS $dbName $isExtended?)
| KW_SHOW (showOptions=KW_FORMATTED)? (KW_INDEX|KW_INDEXES) KW_ON showStmtIdentifier ((KW_FROM|KW_IN) db_name=identifier)?
@@ -1455,26 +1453,25 @@ privilegeIncludeColObject
@init {pushMsg("privilege object including columns", state);}
@after {popMsg(state);}
: KW_ALL -> ^(TOK_RESOURCE_ALL)
- | privObjectType identifier (LPAREN cols=columnNameList RPAREN)? partitionSpec?
- -> ^(TOK_PRIV_OBJECT_COL identifier privObjectType $cols? partitionSpec?)
+ | privObjectCols -> ^(TOK_PRIV_OBJECT_COL privObjectCols)
;
privilegeObject
-@init {pushMsg("privilege subject", state);}
+@init {pushMsg("privilege object", state);}
@after {popMsg(state);}
- : KW_ON privObjectType identifier partitionSpec?
- -> ^(TOK_PRIV_OBJECT identifier privObjectType partitionSpec?)
+ : KW_ON privObject -> ^(TOK_PRIV_OBJECT privObject)
;
-
// database or table type. Type is optional, default type is table
-privObjectType
-@init {pushMsg("privilege object type type", state);}
-@after {popMsg(state);}
- : (KW_DATABASE|KW_SCHEMA) -> ^(TOK_DB_TYPE)
- | KW_TABLE? -> ^(TOK_TABLE_TYPE)
+privObject
+ : (KW_DATABASE|KW_SCHEMA) identifier -> ^(TOK_DB_TYPE identifier)
+ | KW_TABLE? tableName partitionSpec? -> ^(TOK_TABLE_TYPE tableName partitionSpec?)
;
+privObjectCols
+ : (KW_DATABASE|KW_SCHEMA) identifier -> ^(TOK_DB_TYPE identifier)
+ | KW_TABLE? tableName (LPAREN cols=columnNameList RPAREN)? partitionSpec? -> ^(TOK_TABLE_TYPE tableName $cols? partitionSpec?)
+ ;
privilegeList
@init {pushMsg("grant privilege list", state);}
@@ -1551,8 +1548,8 @@ withAdminOption
metastoreCheck
@init { pushMsg("metastore check statement", state); }
@after { popMsg(state); }
- : KW_MSCK (repair=KW_REPAIR)? (KW_TABLE table=identifier partitionSpec? (COMMA partitionSpec)*)?
- -> ^(TOK_MSCK $repair? ($table partitionSpec*)?)
+ : KW_MSCK (repair=KW_REPAIR)? (KW_TABLE tableName partitionSpec? (COMMA partitionSpec)*)?
+ -> ^(TOK_MSCK $repair? (tableName partitionSpec*)?)
;
resourceList
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java?rev=1617633&r1=1617632&r2=1617633&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java Wed Aug 13 00:25:32 2014
@@ -24,6 +24,7 @@ import org.apache.hadoop.hive.conf.HiveC
import org.apache.hadoop.hive.metastore.api.Index;
import org.apache.hadoop.hive.ql.Driver;
import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -134,10 +135,10 @@ public class IndexUpdater {
inputs.addAll(driver.getPlan().getInputs());
}
-
private boolean containsPartition(Index index, Map<String, String> partSpec)
- throws HiveException {
- Table indexTable = hive.getTable(index.getIndexTableName());
+ throws HiveException {
+ String[] qualified = Utilities.getDbTableName(index.getDbName(), index.getIndexTableName());
+ Table indexTable = hive.getTable(qualified[0], qualified[1]);
List<Partition> parts = hive.getPartitions(indexTable, partSpec);
return (parts == null || parts.size() == 0);
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java?rev=1617633&r1=1617632&r2=1617633&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java Wed Aug 13 00:25:32 2014
@@ -613,7 +613,7 @@ public class ParseContext {
}
public PrunedPartitionList getPrunedPartitions(String alias, TableScanOperator ts)
- throws HiveException {
+ throws SemanticException {
PrunedPartitionList partsList = opToPartList.get(ts);
if (partsList == null) {
partsList = PartitionPruner.prune(ts, this, alias);
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1617633&r1=1617632&r2=1617633&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Wed Aug 13 00:25:32 2014
@@ -483,7 +483,7 @@ public class SemanticAnalyzer extends Ba
if(containsLeadLagUDF(expressionTree)) {
throw new SemanticException(ErrorMsg.MISSING_OVER_CLAUSE.getMsg(functionName));
}
- aggregations.put(expressionTree.toStringTree(), expressionTree);
+ aggregations.put(expressionTree.toStringTree().toLowerCase(), expressionTree);
FunctionInfo fi = FunctionRegistry.getFunctionInfo(functionName);
if (!fi.isNative()) {
unparseTranslator.addIdentifierTranslation((ASTNode) expressionTree
@@ -2580,6 +2580,7 @@ public class SemanticAnalyzer extends Ba
throw new SemanticException(generateErrorMessage(rowChild,
ErrorMsg.LINES_TERMINATED_BY_NON_NEWLINE.getMsg()));
}
+ break;
case HiveParser.TOK_TABLEROWFORMATNULL:
String nullFormat = unescapeSQLString(rowChild.getChild(0).getText());
tblDesc.getProperties().setProperty(serdeConstants.SERIALIZATION_NULL_FORMAT,
@@ -10148,7 +10149,7 @@ public class SemanticAnalyzer extends Ba
// check for existence of table
if (ifNotExists) {
try {
- Table table = getTableWithQN(tableName, false);
+ Table table = getTable(tableName, false);
if (table != null) { // table exists
return null;
}
@@ -10213,7 +10214,7 @@ public class SemanticAnalyzer extends Ba
tblProps = addDefaultProperties(tblProps);
if (isTemporary) {
- Table likeTable = getTableWithQN(likeTableName, false);
+ Table likeTable = getTable(likeTableName, false);
if (likeTable != null && likeTable.getPartCols().size() > 0) {
throw new SemanticException("Partition columns are not supported on temporary tables "
+ "and source table in CREATE TABLE LIKE is partitioned.");
@@ -10333,7 +10334,7 @@ public class SemanticAnalyzer extends Ba
private void validateCreateView(CreateViewDesc createVwDesc)
throws SemanticException {
try {
- Table oldView = getTableWithQN(createVwDesc.getViewName(), false);
+ Table oldView = getTable(createVwDesc.getViewName(), false);
// ALTER VIEW AS SELECT requires the view must exist
if (createVwDesc.getIsAlterViewAs() && oldView == null) {