You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2014/09/16 00:46:49 UTC
svn commit: r1625176 [3/9] - in /hive/branches/cbo: ./
common/src/java/org/apache/hadoop/hive/common/
common/src/java/org/apache/hadoop/hive/conf/
contrib/src/test/results/clientpositive/ data/conf/tez/ data/files/
itests/hive-unit/src/test/java/org/ap...
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Mon Sep 15 22:46:44 2014
@@ -43,6 +43,8 @@ import java.util.regex.PatternSyntaxExce
import net.hydromatic.optiq.SchemaPlus;
import net.hydromatic.optiq.tools.Frameworks;
+import org.antlr.runtime.ClassicToken;
+import org.antlr.runtime.Token;
import org.antlr.runtime.tree.Tree;
import org.antlr.runtime.tree.TreeWizard;
import org.antlr.runtime.tree.TreeWizard.ContextVisitor;
@@ -90,6 +92,8 @@ import org.apache.hadoop.hive.ql.exec.Un
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
@@ -297,6 +301,8 @@ public class SemanticAnalyzer extends Ba
// Max characters when auto generating the column name with func name
private static final int AUTOGEN_COLALIAS_PRFX_MAXLENGTH = 20;
+ private static final String VALUES_TMP_TABLE_NAME_PREFIX = "Values__Tmp__Table__";
+
private HashMap<TableScanOperator, ExprNodeDesc> opToPartPruner;
private HashMap<TableScanOperator, PrunedPartitionList> opToPartList;
private HashMap<String, Operator<? extends OperatorDesc>> topOps;
@@ -776,6 +782,140 @@ public class SemanticAnalyzer extends Ba
return alias;
}
+ // Generate a temp table out of a value clause
+ private ASTNode genValuesTempTable(ASTNode originalFrom) throws SemanticException {
+ // Pick a name for the table
+ SessionState ss = SessionState.get();
+ String tableName = VALUES_TMP_TABLE_NAME_PREFIX + ss.getNextValuesTempTableSuffix();
+
+ // Step 1, parse the values clause we were handed
+ List<? extends Node> fromChildren = originalFrom.getChildren();
+ // First child should be the virtual table ref
+ ASTNode virtualTableRef = (ASTNode)fromChildren.get(0);
+ assert virtualTableRef.getToken().getType() == HiveParser.TOK_VIRTUAL_TABREF :
+ "Expected first child of TOK_VIRTUAL_TABLE to be TOK_VIRTUAL_TABREF but was " +
+ virtualTableRef.getName();
+
+ List<? extends Node> virtualTableRefChildren = virtualTableRef.getChildren();
+ // First child of this should be the table name. If it's anonymous,
+ // then we don't have a table name.
+ ASTNode tabName = (ASTNode)virtualTableRefChildren.get(0);
+ if (tabName.getToken().getType() != HiveParser.TOK_ANONYMOUS) {
+ // TODO, if you want to make select ... from (values(...) as foo(...) work,
+ // you need to parse this list of columns names and build it into the table
+ throw new SemanticException(ErrorMsg.VALUES_TABLE_CONSTRUCTOR_NOT_SUPPORTED.getMsg());
+ }
+
+ // The second child of the TOK_VIRTUAL_TABLE should be TOK_VALUES_TABLE
+ ASTNode valuesTable = (ASTNode)fromChildren.get(1);
+ assert valuesTable.getToken().getType() == HiveParser.TOK_VALUES_TABLE :
+ "Expected second child of TOK_VIRTUAL_TABLE to be TOK_VALUE_TABLE but was " +
+ valuesTable.getName();
+ // Each of the children of TOK_VALUES_TABLE will be a TOK_VALUE_ROW
+ List<? extends Node> valuesTableChildren = valuesTable.getChildren();
+
+ // Now that we're going to start reading through the rows, open a file to write the rows too
+ // If we leave this method before creating the temporary table we need to be sure to clean up
+ // this file.
+ Path tablePath = null;
+ FileSystem fs = null;
+ try {
+ tablePath = Warehouse.getDnsPath(new Path(ss.getTempTableSpace(), tableName), conf);
+ fs = tablePath.getFileSystem(conf);
+ fs.mkdirs(tablePath);
+ Path dataFile = new Path(tablePath, "data_file");
+ FSDataOutputStream out = fs.create(dataFile);
+ List<FieldSchema> fields = new ArrayList<FieldSchema>();
+
+ boolean firstRow = true;
+ for (Node n : valuesTableChildren) {
+ ASTNode valuesRow = (ASTNode) n;
+ assert valuesRow.getToken().getType() == HiveParser.TOK_VALUE_ROW :
+ "Expected child of TOK_VALUE_TABLE to be TOK_VALUE_ROW but was " + valuesRow.getName();
+ // Each of the children of this should be a literal
+ List<? extends Node> valuesRowChildren = valuesRow.getChildren();
+ boolean isFirst = true;
+ int nextColNum = 1;
+ for (Node n1 : valuesRowChildren) {
+ ASTNode value = (ASTNode) n1;
+ if (firstRow) {
+ fields.add(new FieldSchema("tmp_values_col" + nextColNum++, "string", ""));
+ }
+ if (isFirst) isFirst = false;
+ else out.writeBytes("\u0001");
+ out.writeBytes(unparseExprForValuesClause(value));
+ }
+ out.writeBytes("\n");
+ firstRow = false;
+ }
+ out.close();
+
+ // Step 2, create a temp table, using the created file as the data
+ StorageFormat format = new StorageFormat(conf);
+ format.processStorageFormat("TextFile");
+ Table table = db.newTable(tableName);
+ table.setSerializationLib(format.getSerde());
+ table.setFields(fields);
+ table.setDataLocation(tablePath);
+ table.getTTable().setTemporary(true);
+ table.setStoredAsSubDirectories(false);
+ table.setInputFormatClass(format.getInputFormat());
+ table.setOutputFormatClass(format.getOutputFormat());
+ db.createTable(table, false);
+ } catch (Exception e) {
+ String errMsg = ErrorMsg.INSERT_CANNOT_CREATE_TEMP_FILE.getMsg() + e.getMessage();
+ LOG.error(errMsg);
+ // Try to delete the file
+ if (fs != null && tablePath != null) {
+ try {
+ fs.delete(tablePath, false);
+ } catch (IOException swallowIt) {}
+ }
+ throw new SemanticException(errMsg, e);
+ }
+
+ // Step 3, return a new subtree with a from clause built around that temp table
+ // The form of the tree is TOK_TABREF->TOK_TABNAME->identifier(tablename)
+ Token t = new ClassicToken(HiveParser.TOK_TABREF);
+ ASTNode tabRef = new ASTNode(t);
+ t = new ClassicToken(HiveParser.TOK_TABNAME);
+ ASTNode tabNameNode = new ASTNode(t);
+ tabRef.addChild(tabNameNode);
+ t = new ClassicToken(HiveParser.Identifier, tableName);
+ ASTNode identifier = new ASTNode(t);
+ tabNameNode.addChild(identifier);
+ return tabRef;
+ }
+
+ // Take an expression in the values clause and turn it back into a string. This is far from
+ // comprehensive. At the moment it only supports:
+ // * literals (all types)
+ // * unary negatives
+ // * true/false
+ private String unparseExprForValuesClause(ASTNode expr) throws SemanticException {
+ switch (expr.getToken().getType()) {
+ case HiveParser.Number:
+ return expr.getText();
+
+ case HiveParser.StringLiteral:
+ return PlanUtils.stripQuotes(expr.getText());
+
+ case HiveParser.KW_FALSE:
+ return "FALSE";
+
+ case HiveParser.KW_TRUE:
+ return "TRUE";
+
+ case HiveParser.MINUS:
+ return "-" + unparseExprForValuesClause((ASTNode)expr.getChildren().get(0));
+
+ default:
+ throw new SemanticException("Expression of type " + expr.getText() +
+ " not supported in insert/values");
+ }
+
+ }
+
private void assertCombineInputFormat(Tree numerator, String message) throws SemanticException {
String inputFormat = conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez") ?
HiveConf.getVar(conf, HiveConf.ConfVars.HIVETEZINPUTFORMAT):
@@ -1108,7 +1248,11 @@ public class SemanticAnalyzer extends Ba
if (frm.getToken().getType() == HiveParser.TOK_TABREF) {
processTable(qb, frm);
} else if (frm.getToken().getType() == HiveParser.TOK_VIRTUAL_TABLE) {
- throw new RuntimeException("VALUES() clause is not fully supported yet...");
+ // Create a temp table with the passed values in it then rewrite this portion of the
+ // tree to be from that table.
+ ASTNode newFrom = genValuesTempTable(frm);
+ ast.setChild(0, newFrom);
+ processTable(qb, newFrom);
} else if (frm.getToken().getType() == HiveParser.TOK_SUBQUERY) {
processSubQuery(qb, frm);
} else if (frm.getToken().getType() == HiveParser.TOK_LATERAL_VIEW ||
@@ -1301,10 +1445,6 @@ public class SemanticAnalyzer extends Ba
case HiveParser.TOK_CTE:
processCTE(qb, ast);
break;
- case HiveParser.TOK_DELETE_FROM:
- throw new RuntimeException("DELETE is not (yet) implemented...");
- case HiveParser.TOK_UPDATE_TABLE:
- throw new RuntimeException("UPDATE is not (yet) implemented...");
default:
skipRecursion = false;
break;
@@ -1395,7 +1535,7 @@ public class SemanticAnalyzer extends Ba
// Disallow INSERT INTO on bucketized tables
if (qb.getParseInfo().isInsertIntoTable(tab.getDbName(), tab.getTableName()) &&
- tab.getNumBuckets() > 0) {
+ tab.getNumBuckets() > 0 && !isAcidTable(tab)) {
throw new SemanticException(ErrorMsg.INSERT_INTO_BUCKETIZED_TABLE.
getMsg("Table: " + tab_name));
}
@@ -4341,7 +4481,7 @@ public class SemanticAnalyzer extends Ba
groupingSetsPresent ? keyLength + 1 : keyLength,
reduceValues, distinctColIndices,
outputKeyColumnNames, outputValueColumnNames, true, -1, numPartitionFields,
- numReducers),
+ numReducers, AcidUtils.Operation.NOT_ACID),
new RowSchema(reduceSinkOutputRowResolver.getColumnInfos()), inputOperatorInfo),
reduceSinkOutputRowResolver);
rsOp.setColumnExprMap(colExprMap);
@@ -4544,7 +4684,7 @@ public class SemanticAnalyzer extends Ba
}
ReduceSinkDesc rsDesc = PlanUtils.getReduceSinkDesc(reduceKeys, keyLength, reduceValues,
distinctColIndices, outputKeyColumnNames, outputValueColumnNames,
- true, -1, keyLength, numReducers);
+ true, -1, keyLength, numReducers, AcidUtils.Operation.NOT_ACID);
ReduceSinkOperator rsOp = (ReduceSinkOperator) putOpInsertMap(
OperatorFactory.getAndMakeChild(rsDesc, new RowSchema(reduceSinkOutputRowResolver
@@ -4659,8 +4799,8 @@ public class SemanticAnalyzer extends Ba
ReduceSinkOperator rsOp = (ReduceSinkOperator) putOpInsertMap(
OperatorFactory.getAndMakeChild(PlanUtils.getReduceSinkDesc(reduceKeys,
reduceValues, outputColumnNames, true, -1, numPartitionFields,
- numReducers), new RowSchema(reduceSinkOutputRowResolver2
- .getColumnInfos()), groupByOperatorInfo),
+ numReducers, AcidUtils.Operation.NOT_ACID),
+ new RowSchema(reduceSinkOutputRowResolver2.getColumnInfos()), groupByOperatorInfo),
reduceSinkOutputRowResolver2);
rsOp.setColumnExprMap(colExprMap);
@@ -5644,9 +5784,14 @@ public class SemanticAnalyzer extends Ba
if ((dest_tab.getNumBuckets() > 0) &&
(conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING))) {
enforceBucketing = true;
- partnCols = getPartitionColsFromBucketCols(dest, qb, dest_tab, table_desc, input, true);
- partnColsNoConvert = getPartitionColsFromBucketCols(dest, qb, dest_tab, table_desc, input,
- false);
+ if (updating() || deleting()) {
+ partnCols = getPartitionColsFromBucketColsForUpdateDelete(input, true);
+ partnColsNoConvert = getPartitionColsFromBucketColsForUpdateDelete(input, false);
+ } else {
+ partnCols = getPartitionColsFromBucketCols(dest, qb, dest_tab, table_desc, input, true);
+ partnColsNoConvert = getPartitionColsFromBucketCols(dest, qb, dest_tab, table_desc, input,
+ false);
+ }
}
if ((dest_tab.getSortCols() != null) &&
@@ -5668,6 +5813,7 @@ public class SemanticAnalyzer extends Ba
}
int numBuckets = dest_tab.getNumBuckets();
if (numBuckets > maxReducers) {
+ LOG.debug("XXXXXX numBuckets is " + numBuckets + " and maxReducers is " + maxReducers);
multiFileSpray = true;
totalFiles = numBuckets;
if (totalFiles % maxReducers == 0) {
@@ -5833,7 +5979,11 @@ public class SemanticAnalyzer extends Ba
// Create the work for moving the table
// NOTE: specify Dynamic partitions in dest_tab for WriteEntity
if (!isNonNativeTable) {
- ltd = new LoadTableDesc(queryTmpdir,table_desc, dpCtx);
+ AcidUtils.Operation acidOp = getAcidType(table_desc.getOutputFileFormatClass());
+ if (acidOp != AcidUtils.Operation.NOT_ACID) {
+ checkIfAcidAndOverwriting(qb, table_desc);
+ }
+ ltd = new LoadTableDesc(queryTmpdir,table_desc, dpCtx, acidOp);
ltd.setReplace(!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),
dest_tab.getTableName()));
ltd.setLbCtx(lbCtx);
@@ -5936,7 +6086,11 @@ public class SemanticAnalyzer extends Ba
lbCtx = constructListBucketingCtx(dest_part.getSkewedColNames(),
dest_part.getSkewedColValues(), dest_part.getSkewedColValueLocationMaps(),
dest_part.isStoredAsSubDirectories(), conf);
- ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec());
+ AcidUtils.Operation acidOp = getAcidType(table_desc.getOutputFileFormatClass());
+ if (acidOp != AcidUtils.Operation.NOT_ACID) {
+ checkIfAcidAndOverwriting(qb, table_desc);
+ }
+ ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), acidOp);
ltd.setReplace(!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),
dest_tab.getTableName()));
ltd.setLbCtx(lbCtx);
@@ -6088,18 +6242,25 @@ public class SemanticAnalyzer extends Ba
ArrayList<ColumnInfo> vecCol = new ArrayList<ColumnInfo>();
- try {
- StructObjectInspector rowObjectInspector = (StructObjectInspector) table_desc
- .getDeserializer().getObjectInspector();
- List<? extends StructField> fields = rowObjectInspector
- .getAllStructFieldRefs();
- for (int i = 0; i < fields.size(); i++) {
- vecCol.add(new ColumnInfo(fields.get(i).getFieldName(), TypeInfoUtils
- .getTypeInfoFromObjectInspector(fields.get(i)
- .getFieldObjectInspector()), "", false));
+ if (updating() || deleting()) {
+ vecCol.add(new ColumnInfo(VirtualColumn.ROWID.getName(),
+ //TypeInfoUtils.getTypeInfoFromObjectInspector(VirtualColumn.ROWID.getObjectInspector()),
+ VirtualColumn.ROWID.getTypeInfo(),
+ "", true));
+ } else {
+ try {
+ StructObjectInspector rowObjectInspector = (StructObjectInspector) table_desc
+ .getDeserializer().getObjectInspector();
+ List<? extends StructField> fields = rowObjectInspector
+ .getAllStructFieldRefs();
+ for (int i = 0; i < fields.size(); i++) {
+ vecCol.add(new ColumnInfo(fields.get(i).getFieldName(), TypeInfoUtils
+ .getTypeInfoFromObjectInspector(fields.get(i)
+ .getFieldObjectInspector()), "", false));
+ }
+ } catch (Exception e) {
+ throw new SemanticException(e.getMessage(), e);
}
- } catch (Exception e) {
- throw new SemanticException(e.getMessage(), e);
}
RowSchema fsRS = new RowSchema(vecCol);
@@ -6112,6 +6273,10 @@ public class SemanticAnalyzer extends Ba
(dest_tab.getSortCols() != null && dest_tab.getSortCols().size() > 0 &&
conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCESORTING))));
+ // If this table is working with ACID semantics, turn off merging
+ boolean acidTable = isAcidTable(dest_tab);
+ canBeMerged &= !acidTable;
+
FileSinkDesc fileSinkDesc = new FileSinkDesc(
queryTmpdir,
table_desc,
@@ -6124,6 +6289,15 @@ public class SemanticAnalyzer extends Ba
rsCtx.getPartnCols(),
dpCtx);
+ // If this is an insert, update, or delete on an ACID table then mark that so the
+ // FileSinkOperator knows how to properly write to it.
+ if (acidTable) {
+ AcidUtils.Operation wt = updating() ? AcidUtils.Operation.UPDATE :
+ (deleting() ? AcidUtils.Operation.DELETE : AcidUtils.Operation.INSERT);
+ fileSinkDesc.setWriteType(wt);
+ acidFileSinks.add(fileSinkDesc);
+ }
+
/* Set List Bucketing context. */
if (lbCtx != null) {
lbCtx.processRowSkewedIndex(fsRS);
@@ -6174,6 +6348,17 @@ public class SemanticAnalyzer extends Ba
return output;
}
+ // Check if we are overwriting any tables. If so, throw an exception as that is not allowed
+ // when using an Acid compliant txn manager and operating on an acid table.
+ private void checkIfAcidAndOverwriting(QB qb, TableDesc tableDesc) throws SemanticException {
+ String tableName = tableDesc.getTableName();
+ if (!qb.getParseInfo().isInsertIntoTable(tableName)) {
+ LOG.debug("Couldn't find table " + tableName + " in insertIntoTable");
+ throw new SemanticException(ErrorMsg.NO_INSERT_OVERWRITE_WITH_ACID.getMsg());
+ }
+
+ }
+
/**
* Generate the conversion SelectOperator that converts the columns into the
* types that are expected by the table_desc.
@@ -6201,16 +6386,34 @@ public class SemanticAnalyzer extends Ba
outColumnCnt += dpCtx.getNumDPCols();
}
- if (inColumnCnt != outColumnCnt) {
- String reason = "Table " + dest + " has " + outColumnCnt
- + " columns, but query has " + inColumnCnt + " columns.";
- throw new SemanticException(ErrorMsg.TARGET_TABLE_COLUMN_MISMATCH.getMsg(
- qb.getParseInfo().getDestForClause(dest), reason));
- } else if (dynPart && dpCtx != null) {
- // create the mapping from input ExprNode to dest table DP column
- dpCtx.mapInputToDP(rowFields.subList(tableFields.size(), rowFields.size()));
+ if (deleting()) {
+ // Figure out if we have partition columns in the list or not. If so,
+ // add them into the mapping. Partition columns will be located after the row id.
+ if (rowFields.size() > 1) {
+ // This means we have partition columns to deal with, so set up the mapping from the
+ // input to the partition columns.
+ dpCtx.mapInputToDP(rowFields.subList(1, rowFields.size()));
+ }
+ } else if (updating()) {
+ // In this case we expect the number of in fields to exceed the number of out fields by one
+ // (for the ROW__ID virtual column). If there are more columns than this,
+ // then the extras are for dynamic partitioning
+ if (dynPart && dpCtx != null) {
+ dpCtx.mapInputToDP(rowFields.subList(tableFields.size() + 1, rowFields.size()));
+ }
+ } else {
+ if (inColumnCnt != outColumnCnt) {
+ String reason = "Table " + dest + " has " + outColumnCnt
+ + " columns, but query has " + inColumnCnt + " columns.";
+ throw new SemanticException(ErrorMsg.TARGET_TABLE_COLUMN_MISMATCH.getMsg(
+ qb.getParseInfo().getDestForClause(dest), reason));
+ } else if (dynPart && dpCtx != null) {
+ // create the mapping from input ExprNode to dest table DP column
+ dpCtx.mapInputToDP(rowFields.subList(tableFields.size(), rowFields.size()));
+ }
}
+
// Check column types
boolean converted = false;
int columnNumber = tableFields.size();
@@ -6222,17 +6425,26 @@ public class SemanticAnalyzer extends Ba
MetadataTypedColumnsetSerDe.class);
boolean isLazySimpleSerDe = table_desc.getDeserializerClass().equals(
LazySimpleSerDe.class);
- if (!isMetaDataSerDe) {
+ if (!isMetaDataSerDe && !deleting()) {
+
+ // If we're updating, add the ROW__ID expression, then make the following column accesses
+ // offset by 1 so that we don't try to convert the ROW__ID
+ if (updating()) {
+ expressions.add(new ExprNodeColumnDesc(rowFields.get(0).getType(),
+ rowFields.get(0).getInternalName(), "", true));
+ }
// here only deals with non-partition columns. We deal with partition columns next
for (int i = 0; i < columnNumber; i++) {
+ int rowFieldsOffset = updating() ? i + 1 : i;
ObjectInspector tableFieldOI = tableFields.get(i)
.getFieldObjectInspector();
TypeInfo tableFieldTypeInfo = TypeInfoUtils
.getTypeInfoFromObjectInspector(tableFieldOI);
- TypeInfo rowFieldTypeInfo = rowFields.get(i).getType();
+ TypeInfo rowFieldTypeInfo = rowFields.get(rowFieldsOffset).getType();
ExprNodeDesc column = new ExprNodeColumnDesc(rowFieldTypeInfo,
- rowFields.get(i).getInternalName(), "", false, rowFields.get(i).isSkewedCol());
+ rowFields.get(rowFieldsOffset).getInternalName(), "", false,
+ rowFields.get(rowFieldsOffset).isSkewedCol());
// LazySimpleSerDe can convert any types to String type using
// JSON-format.
if (!tableFieldTypeInfo.equals(rowFieldTypeInfo)
@@ -6262,7 +6474,7 @@ public class SemanticAnalyzer extends Ba
// deal with dynamic partition columns: convert ExprNodeDesc type to String??
if (dynPart && dpCtx != null && dpCtx.getNumDPCols() > 0) {
// DP columns starts with tableFields.size()
- for (int i = tableFields.size(); i < rowFields.size(); ++i) {
+ for (int i = tableFields.size() + (updating() ? 1 : 0); i < rowFields.size(); ++i) {
TypeInfo rowFieldTypeInfo = rowFields.get(i).getType();
ExprNodeDesc column = new ExprNodeColumnDesc(
rowFieldTypeInfo, rowFields.get(i).getInternalName(), "", false);
@@ -6456,6 +6668,27 @@ public class SemanticAnalyzer extends Ba
return genConvertCol(dest, qb, tab, table_desc, input, posns, convert);
}
+ // We have to set up the bucketing columns differently for update and deletes,
+ // as it is always using the ROW__ID column.
+ private ArrayList<ExprNodeDesc> getPartitionColsFromBucketColsForUpdateDelete(
+ Operator input, boolean convert) throws SemanticException {
+ //return genConvertCol(dest, qb, tab, table_desc, input, Arrays.asList(0), convert);
+ // In the case of update and delete the bucketing column is always the first column,
+ // and it isn't in the table info. So rather than asking the table for it,
+ // we'll construct it ourself and send it back. This is based on the work done in
+ // genConvertCol below.
+ ColumnInfo rowField = opParseCtx.get(input).getRowResolver().getColumnInfos().get(0);
+ TypeInfo rowFieldTypeInfo = rowField.getType();
+ ExprNodeDesc column = new ExprNodeColumnDesc(rowFieldTypeInfo, rowField.getInternalName(),
+ rowField.getTabAlias(), true);
+ if (convert) {
+ column = ParseUtils.createConversionCast(column, TypeInfoFactory.intTypeInfo);
+ }
+ ArrayList<ExprNodeDesc> rlist = new ArrayList<ExprNodeDesc>(1);
+ rlist.add(column);
+ return rlist;
+ }
+
private ArrayList<ExprNodeDesc> genConvertCol(String dest, QB qb, Table tab,
TableDesc table_desc, Operator input, List<Integer> posns, boolean convert)
throws SemanticException {
@@ -6578,9 +6811,11 @@ public class SemanticAnalyzer extends Ba
order.append(sortOrder == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC ? '+' : '-');
}
+ AcidUtils.Operation acidOp = (isAcidTable(tab) ? getAcidType() : AcidUtils.Operation.NOT_ACID);
+
Operator interim = putOpInsertMap(OperatorFactory.getAndMakeChild(PlanUtils
.getReduceSinkDesc(sortCols, valueCols, outputColumns, false, -1,
- partitionCols, order.toString(), numReducers),
+ partitionCols, order.toString(), numReducers, acidOp),
new RowSchema(inputRR.getColumnInfos()), input), inputRR);
interim.setColumnExprMap(colExprMap);
reduceSinkOperatorsAddedByEnforceBucketingSorting.add((ReduceSinkOperator) interim);
@@ -6736,8 +6971,9 @@ public class SemanticAnalyzer extends Ba
dummy.setParentOperators(null);
+ // TODO Not 100% sure NOT_ACID is always right here.
ReduceSinkDesc rsdesc = PlanUtils.getReduceSinkDesc(sortCols, valueCols, outputColumns,
- false, -1, partitionCols, order.toString(), numReducers);
+ false, -1, partitionCols, order.toString(), numReducers, AcidUtils.Operation.NOT_ACID);
Operator interim = putOpInsertMap(OperatorFactory.getAndMakeChild(rsdesc,
new RowSchema(rsRR.getColumnInfos()), input), rsRR);
@@ -7002,7 +7238,7 @@ public class SemanticAnalyzer extends Ba
ReduceSinkDesc rsDesc = PlanUtils.getReduceSinkDesc(reduceKeys,
reduceValues, outputColumns, false, tag,
- reduceKeys.size(), numReds);
+ reduceKeys.size(), numReds, AcidUtils.Operation.NOT_ACID);
ReduceSinkOperator rsOp = (ReduceSinkOperator) putOpInsertMap(
OperatorFactory.getAndMakeChild(rsDesc, new RowSchema(outputRR
@@ -8190,7 +8426,8 @@ public class SemanticAnalyzer extends Ba
ReduceSinkOperator rsOp = (ReduceSinkOperator) putOpInsertMap(
OperatorFactory.getAndMakeChild(PlanUtils.getReduceSinkDesc(reduceKeys,
- reduceValues, outputColumnNames, true, -1, reduceKeys.size(), -1),
+ reduceValues, outputColumnNames, true, -1, reduceKeys.size(), -1,
+ AcidUtils.Operation.NOT_ACID),
new RowSchema(reduceSinkOutputRowResolver.getColumnInfos()), input),
reduceSinkOutputRowResolver);
@@ -11563,7 +11800,7 @@ public class SemanticAnalyzer extends Ba
input = putOpInsertMap(OperatorFactory.getAndMakeChild(PlanUtils
.getReduceSinkDesc(orderCols,
valueCols, outputColumnNames, false,
- -1, partCols, orderString.toString(), -1),
+ -1, partCols, orderString.toString(), -1, AcidUtils.Operation.NOT_ACID),
new RowSchema(rsOpRR.getColumnInfos()), input), rsOpRR);
input.setColumnExprMap(colExprMap);
}
@@ -11688,7 +11925,7 @@ public class SemanticAnalyzer extends Ba
input = putOpInsertMap(OperatorFactory.getAndMakeChild(PlanUtils
.getReduceSinkDesc(orderCols,
valueCols, outputColumnNames, false,
- -1, partCols, orderString.toString(), -1),
+ -1, partCols, orderString.toString(), -1, AcidUtils.Operation.NOT_ACID),
new RowSchema(rsNewRR.getColumnInfos()), input), rsNewRR);
input.setColumnExprMap(colExprMap);
@@ -11846,6 +12083,50 @@ public class SemanticAnalyzer extends Ba
WriteEntity.WriteType.INSERT);
}
+ // Even if the table is of Acid type, if we aren't working with an Acid compliant TxnManager
+ // then return false.
+ private boolean isAcidTable(Table tab) {
+ if (tab == null || tab.getOutputFormatClass() == null) return false;
+ if (!SessionState.get().getTxnMgr().supportsAcid()) return false;
+ return isAcidOutputFormat(tab.getOutputFormatClass());
+ }
+
+ private boolean isAcidOutputFormat(Class<? extends HiveOutputFormat> of) {
+ Class<?>[] interfaces = of.getInterfaces();
+ for (Class<?> iface : interfaces) {
+ if (iface.equals(AcidOutputFormat.class)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Note that this method assumes you have already decided this is an Acid table. It cannot
+ // figure out if a table is Acid or not.
+ private AcidUtils.Operation getAcidType() {
+ return deleting() ? AcidUtils.Operation.DELETE :
+ (updating() ? AcidUtils.Operation.UPDATE :
+ AcidUtils.Operation.INSERT);
+ }
+
+ private AcidUtils.Operation getAcidType(Class<? extends HiveOutputFormat> of) {
+ if (SessionState.get() == null || !SessionState.get().getTxnMgr().supportsAcid()) {
+ return AcidUtils.Operation.NOT_ACID;
+ } else if (isAcidOutputFormat(of)) {
+ return getAcidType();
+ } else {
+ return AcidUtils.Operation.NOT_ACID;
+ }
+ }
+
+ protected boolean updating() {
+ return false;
+ }
+
+ protected boolean deleting() {
+ return false;
+ }
+
/**** Temporary Place Holder For Optiq plan Gen, Optimizer ****/
/*
@@ -13700,5 +13981,4 @@ public class SemanticAnalyzer extends Ba
return tabAliases;
}
}
-
}
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java Mon Sep 15 22:46:44 2014
@@ -268,6 +268,11 @@ public final class SemanticAnalyzerFacto
case HiveParser.TOK_CREATEMACRO:
case HiveParser.TOK_DROPMACRO:
return new MacroSemanticAnalyzer(conf);
+
+ case HiveParser.TOK_UPDATE_TABLE:
+ case HiveParser.TOK_DELETE_FROM:
+ return new UpdateDeleteSemanticAnalyzer(conf);
+
default:
return new SemanticAnalyzer(conf);
}
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java Mon Sep 15 22:46:44 2014
@@ -80,7 +80,7 @@ public class StorageFormat {
return true;
}
- private void processStorageFormat(String name) throws SemanticException {
+ protected void processStorageFormat(String name) throws SemanticException {
if (name.isEmpty()) {
throw new SemanticException("File format in STORED AS clause cannot be empty");
}
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java Mon Sep 15 22:46:44 2014
@@ -404,6 +404,9 @@ public class TezCompiler extends TaskCom
}
private void setInputFormat(MapWork work, Operator<? extends OperatorDesc> op) {
+ if (op == null) {
+ return;
+ }
if (op.isUseBucketizedHiveInputFormat()) {
work.setUseBucketizedHiveInputFormat(true);
return;
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java Mon Sep 15 22:46:44 2014
@@ -23,6 +23,7 @@ import java.util.LinkedHashMap;
import java.util.Map;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
/**
* LoadTableDesc.
@@ -37,6 +38,9 @@ public class LoadTableDesc extends org.a
private boolean holdDDLTime;
private boolean inheritTableSpecs = true; //For partitions, flag controlling whether the current
//table specs are to be used
+ // Need to remember whether this is an acid compliant operation, and if so whether it is an
+ // insert, update, or delete.
+ private AcidUtils.Operation writeType;
// TODO: the below seems like they should just be combined into partitionDesc
private org.apache.hadoop.hive.ql.plan.TableDesc table;
@@ -48,36 +52,69 @@ public class LoadTableDesc extends org.a
public LoadTableDesc(final Path sourcePath,
final org.apache.hadoop.hive.ql.plan.TableDesc table,
- final Map<String, String> partitionSpec, final boolean replace) {
+ final Map<String, String> partitionSpec,
+ final boolean replace,
+ final AcidUtils.Operation writeType) {
super(sourcePath);
- init(table, partitionSpec, replace);
+ init(table, partitionSpec, replace, writeType);
+ }
+
+ /**
+ * For use with non-ACID compliant operations, such as LOAD
+ * @param sourcePath
+ * @param table
+ * @param partitionSpec
+ * @param replace
+ */
+ public LoadTableDesc(final Path sourcePath,
+ final TableDesc table,
+ final Map<String, String> partitionSpec,
+ final boolean replace) {
+ this(sourcePath, table, partitionSpec, replace, AcidUtils.Operation.NOT_ACID);
}
public LoadTableDesc(final Path sourcePath,
final org.apache.hadoop.hive.ql.plan.TableDesc table,
- final Map<String, String> partitionSpec) {
- this(sourcePath, table, partitionSpec, true);
+ final Map<String, String> partitionSpec,
+ final AcidUtils.Operation writeType) {
+ this(sourcePath, table, partitionSpec, true, writeType);
+ }
+
+ /**
+ * For DDL operations that are not ACID compliant.
+ * @param sourcePath
+ * @param table
+ * @param partitionSpec
+ */
+ public LoadTableDesc(final Path sourcePath,
+ final org.apache.hadoop.hive.ql.plan.TableDesc table,
+ final Map<String, String> partitionSpec) {
+ this(sourcePath, table, partitionSpec, true, AcidUtils.Operation.NOT_ACID);
}
public LoadTableDesc(final Path sourcePath,
final org.apache.hadoop.hive.ql.plan.TableDesc table,
- final DynamicPartitionCtx dpCtx) {
+ final DynamicPartitionCtx dpCtx,
+ final AcidUtils.Operation writeType) {
super(sourcePath);
this.dpCtx = dpCtx;
if (dpCtx != null && dpCtx.getPartSpec() != null && partitionSpec == null) {
- init(table, dpCtx.getPartSpec(), true);
+ init(table, dpCtx.getPartSpec(), true, writeType);
} else {
- init(table, new LinkedHashMap<String, String>(), true);
+ init(table, new LinkedHashMap<String, String>(), true, writeType);
}
}
private void init(
final org.apache.hadoop.hive.ql.plan.TableDesc table,
- final Map<String, String> partitionSpec, final boolean replace) {
+ final Map<String, String> partitionSpec,
+ final boolean replace,
+ AcidUtils.Operation writeType) {
this.table = table;
this.partitionSpec = partitionSpec;
this.replace = replace;
this.holdDDLTime = false;
+ this.writeType = writeType;
}
public void setHoldDDLTime(boolean ddlTime) {
@@ -144,4 +181,8 @@ public class LoadTableDesc extends org.a
public void setLbCtx(ListBucketingCtx lbCtx) {
this.lbCtx = lbCtx;
}
+
+ public AcidUtils.Operation getWriteType() {
+ return writeType;
+ }
}
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java Mon Sep 15 22:46:44 2014
@@ -41,6 +41,7 @@ import org.apache.hadoop.hive.ql.exec.Ro
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
import org.apache.hadoop.hive.ql.io.HivePassThroughOutputFormat;
@@ -597,19 +598,22 @@ public final class PlanUtils {
* @param numReducers
* The number of reducers, set to -1 for automatic inference based on
* input data size.
+ * @param writeType Whether this is an Acid write, and if so whether it is insert, update,
+ * or delete.
* @return The reduceSinkDesc object.
*/
public static ReduceSinkDesc getReduceSinkDesc(
ArrayList<ExprNodeDesc> keyCols, ArrayList<ExprNodeDesc> valueCols,
List<String> outputColumnNames, boolean includeKeyCols, int tag,
- ArrayList<ExprNodeDesc> partitionCols, String order, int numReducers) {
+ ArrayList<ExprNodeDesc> partitionCols, String order, int numReducers,
+ AcidUtils.Operation writeType) {
return getReduceSinkDesc(keyCols, keyCols.size(), valueCols,
new ArrayList<List<Integer>>(),
includeKeyCols ? outputColumnNames.subList(0, keyCols.size()) :
new ArrayList<String>(),
includeKeyCols ? outputColumnNames.subList(keyCols.size(),
outputColumnNames.size()) : outputColumnNames,
- includeKeyCols, tag, partitionCols, order, numReducers);
+ includeKeyCols, tag, partitionCols, order, numReducers, writeType);
}
/**
@@ -635,6 +639,8 @@ public final class PlanUtils {
* @param numReducers
* The number of reducers, set to -1 for automatic inference based on
* input data size.
+ * @param writeType Whether this is an Acid write, and if so whether it is insert, update,
+ * or delete.
* @return The reduceSinkDesc object.
*/
public static ReduceSinkDesc getReduceSinkDesc(
@@ -644,7 +650,8 @@ public final class PlanUtils {
List<String> outputKeyColumnNames,
List<String> outputValueColumnNames,
boolean includeKeyCols, int tag,
- ArrayList<ExprNodeDesc> partitionCols, String order, int numReducers) {
+ ArrayList<ExprNodeDesc> partitionCols, String order, int numReducers,
+ AcidUtils.Operation writeType) {
TableDesc keyTable = null;
TableDesc valueTable = null;
ArrayList<String> outputKeyCols = new ArrayList<String>();
@@ -670,7 +677,7 @@ public final class PlanUtils {
return new ReduceSinkDesc(keyCols, numKeys, valueCols, outputKeyCols,
distinctColIndices, outputValCols,
tag, partitionCols, numReducers, keyTable,
- valueTable);
+ valueTable, writeType);
}
/**
@@ -690,12 +697,15 @@ public final class PlanUtils {
* @param numReducers
* The number of reducers, set to -1 for automatic inference based on
* input data size.
+ * @param writeType Whether this is an Acid write, and if so whether it is insert, update,
+ * or delete.
* @return The reduceSinkDesc object.
*/
public static ReduceSinkDesc getReduceSinkDesc(
ArrayList<ExprNodeDesc> keyCols, ArrayList<ExprNodeDesc> valueCols,
List<String> outputColumnNames, boolean includeKey, int tag,
- int numPartitionFields, int numReducers) throws SemanticException {
+ int numPartitionFields, int numReducers, AcidUtils.Operation writeType)
+ throws SemanticException {
return getReduceSinkDesc(keyCols, keyCols.size(), valueCols,
new ArrayList<List<Integer>>(),
includeKey ? outputColumnNames.subList(0, keyCols.size()) :
@@ -703,7 +713,7 @@ public final class PlanUtils {
includeKey ?
outputColumnNames.subList(keyCols.size(), outputColumnNames.size())
: outputColumnNames,
- includeKey, tag, numPartitionFields, numReducers);
+ includeKey, tag, numPartitionFields, numReducers, writeType);
}
/**
@@ -729,6 +739,8 @@ public final class PlanUtils {
* @param numReducers
* The number of reducers, set to -1 for automatic inference based on
* input data size.
+ * @param writeType Whether this is an Acid write, and if so whether it is insert, update,
+ * or delete.
* @return The reduceSinkDesc object.
*/
public static ReduceSinkDesc getReduceSinkDesc(
@@ -737,7 +749,8 @@ public final class PlanUtils {
List<List<Integer>> distinctColIndices,
List<String> outputKeyColumnNames, List<String> outputValueColumnNames,
boolean includeKey, int tag,
- int numPartitionFields, int numReducers) throws SemanticException {
+ int numPartitionFields, int numReducers, AcidUtils.Operation writeType)
+ throws SemanticException {
ArrayList<ExprNodeDesc> partitionCols = new ArrayList<ExprNodeDesc>();
if (numPartitionFields >= keyCols.size()) {
@@ -755,7 +768,7 @@ public final class PlanUtils {
}
return getReduceSinkDesc(keyCols, numKeys, valueCols, distinctColIndices,
outputKeyColumnNames, outputValueColumnNames, includeKey, tag,
- partitionCols, order.toString(), numReducers);
+ partitionCols, order.toString(), numReducers, writeType);
}
/**
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java Mon Sep 15 22:46:44 2014
@@ -23,6 +23,7 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
/**
@@ -91,6 +92,9 @@ public class ReduceSinkDesc extends Abst
private boolean skipTag; // Skip writing tags when feeding into mapjoin hashtable
private Boolean autoParallel = null; // Is reducer auto-parallelism enabled, disabled or unset
+ // Write type, since this needs to calculate buckets differently for updates and deletes
+ private AcidUtils.Operation writeType;
+
private static transient Log LOG = LogFactory.getLog(ReduceSinkDesc.class);
public ReduceSinkDesc() {
}
@@ -102,7 +106,8 @@ public class ReduceSinkDesc extends Abst
List<List<Integer>> distinctColumnIndices,
ArrayList<String> outputValueColumnNames, int tag,
ArrayList<ExprNodeDesc> partitionCols, int numReducers,
- final TableDesc keySerializeInfo, final TableDesc valueSerializeInfo) {
+ final TableDesc keySerializeInfo, final TableDesc valueSerializeInfo,
+ AcidUtils.Operation writeType) {
this.keyCols = keyCols;
this.numDistributionKeys = numDistributionKeys;
this.valueCols = valueCols;
@@ -116,6 +121,7 @@ public class ReduceSinkDesc extends Abst
this.distinctColumnIndices = distinctColumnIndices;
this.setNumBuckets(-1);
this.setBucketCols(null);
+ this.writeType = writeType;
}
@Override
@@ -367,4 +373,8 @@ public class ReduceSinkDesc extends Abst
this.autoParallel = autoParallel;
}
}
+
+ public AcidUtils.Operation getWriteType() {
+ return writeType;
+ }
}
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java Mon Sep 15 22:46:44 2014
@@ -267,9 +267,9 @@ public class Operation2Privilege {
// select with grant for exporting contents
op2Priv.put(HiveOperationType.EXPORT, PrivRequirement.newIOPrivRequirement
-(SEL_GRANT_AR, null));
+(SEL_GRANT_AR, OWNER_INS_SEL_DEL_NOGRANT_AR));
op2Priv.put(HiveOperationType.IMPORT, PrivRequirement.newIOPrivRequirement
-(INS_NOGRANT_AR, null));
+(OWNER_INS_SEL_DEL_NOGRANT_AR, INS_NOGRANT_AR));
// operations require select priv
op2Priv.put(HiveOperationType.SHOWCOLUMNS, PrivRequirement.newIOPrivRequirement
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java Mon Sep 15 22:46:44 2014
@@ -208,6 +208,11 @@ public class SessionState {
private String hdfsScratchDirURIString;
/**
+ * Next value to use in naming a temporary table created by an insert...values statement
+ */
+ private int nextValueTempTableSuffix = 1;
+
+ /**
* Transaction manager to use for this session. This is instantiated lazily by
* {@link #initTxnMgr(org.apache.hadoop.hive.conf.HiveConf)}
*/
@@ -608,7 +613,7 @@ public class SessionState {
hdfsSessionPath.getFileSystem(conf).delete(hdfsSessionPath, true);
}
if (localSessionPath != null) {
- localSessionPath.getFileSystem(conf).delete(localSessionPath, true);
+ FileSystem.getLocal(conf).delete(localSessionPath, true);
}
}
@@ -1341,4 +1346,12 @@ public class SessionState {
this.userIpAddress = userIpAddress;
}
+ /**
+ * Get the next suffix to use in naming a temporary table created by insert...values
+ * @return suffix
+ */
+ public String getNextValuesTempTableSuffix() {
+ return Integer.toString(nextValueTempTableSuffix++);
+ }
+
}
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToInteger.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToInteger.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToInteger.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToInteger.java Mon Sep 15 22:46:44 2014
@@ -23,6 +23,7 @@ import org.apache.hadoop.hive.ql.exec.ve
import org.apache.hadoop.hive.ql.exec.vector.expressions.CastDecimalToLong;
import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.CastDoubleToLong;
import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.CastTimestampToLongViaLongToLong;
+import org.apache.hadoop.hive.ql.io.RecordIdentifier;
import org.apache.hadoop.hive.serde2.io.ByteWritable;
import org.apache.hadoop.hive.serde2.io.DoubleWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
@@ -204,4 +205,19 @@ public class UDFToInteger extends UDF {
}
}
+ /**
+ * Convert a RecordIdentifier. This is done so that we can use the RecordIdentifier in place
+ * of the bucketing column.
+ * @param i RecordIdentifier to convert
+ * @return value of the bucket identifier
+ */
+ public IntWritable evaluate(RecordIdentifier i) {
+ if (i == null) {
+ return null;
+ } else {
+ intWritable.set(i.getBucketId());
+ return intWritable;
+ }
+ }
+
}
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java Mon Sep 15 22:46:44 2014
@@ -83,7 +83,7 @@ import org.apache.hadoop.hive.ql.exec.ve
IfExprCharScalarStringGroupColumn.class, IfExprVarCharScalarStringGroupColumn.class,
IfExprStringScalarStringScalar.class,
IfExprStringScalarCharScalar.class, IfExprStringScalarVarCharScalar.class,
- IfExprCharScalarStringScalar.class, IfExprVarCharScalarStringScalar.class,
+ IfExprCharScalarStringScalar.class, IfExprVarCharScalarStringScalar.class
})
public class GenericUDFIf extends GenericUDF {
private transient ObjectInspector[] argumentOIs;
Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java (original)
+++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java Mon Sep 15 22:46:44 2014
@@ -37,6 +37,7 @@ import org.apache.hadoop.hive.ql.DriverC
import org.apache.hadoop.hive.ql.WindowsPathUtil;
import org.apache.hadoop.hive.ql.exec.mr.ExecDriver;
import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.Table;
@@ -137,7 +138,7 @@ public class TestExecDriver extends Test
db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true);
db.createTable(src, cols, null, TextInputFormat.class,
IgnoreKeyTextOutputFormat.class);
- db.loadTable(hadoopDataFile[i], src, false, false, true, false);
+ db.loadTable(hadoopDataFile[i], src, false, false, true, false, false);
i++;
}
@@ -246,7 +247,7 @@ public class TestExecDriver extends Test
Operator<ReduceSinkDesc> op1 = OperatorFactory.get(PlanUtils
.getReduceSinkDesc(Utilities.makeList(getStringColumn("key")),
Utilities.makeList(getStringColumn("value")), outputColumns, true,
- -1, 1, -1));
+ -1, 1, -1, AcidUtils.Operation.NOT_ACID));
addMapWork(mr, src, "a", op1);
ReduceWork rWork = new ReduceWork();
@@ -276,7 +277,7 @@ public class TestExecDriver extends Test
.getReduceSinkDesc(Utilities.makeList(getStringColumn("key")),
Utilities
.makeList(getStringColumn("key"), getStringColumn("value")),
- outputColumns, false, -1, 1, -1));
+ outputColumns, false, -1, 1, -1, AcidUtils.Operation.NOT_ACID));
addMapWork(mr, src, "a", op1);
ReduceWork rWork = new ReduceWork();
@@ -310,14 +311,14 @@ public class TestExecDriver extends Test
Operator<ReduceSinkDesc> op1 = OperatorFactory.get(PlanUtils
.getReduceSinkDesc(Utilities.makeList(getStringColumn("key")),
Utilities.makeList(getStringColumn("value")), outputColumns, true,
- Byte.valueOf((byte) 0), 1, -1));
+ Byte.valueOf((byte) 0), 1, -1, AcidUtils.Operation.NOT_ACID));
addMapWork(mr, src, "a", op1);
Operator<ReduceSinkDesc> op2 = OperatorFactory.get(PlanUtils
.getReduceSinkDesc(Utilities.makeList(getStringColumn("key")),
Utilities.makeList(getStringColumn("key")), outputColumns, true,
- Byte.valueOf((byte) 1), Integer.MAX_VALUE, -1));
+ Byte.valueOf((byte) 1), Integer.MAX_VALUE, -1, AcidUtils.Operation.NOT_ACID));
addMapWork(mr, src2, "b", op2);
ReduceWork rWork = new ReduceWork();
@@ -353,7 +354,7 @@ public class TestExecDriver extends Test
Operator<ReduceSinkDesc> op1 = OperatorFactory.get(PlanUtils
.getReduceSinkDesc(Utilities.makeList(getStringColumn("tkey")),
Utilities.makeList(getStringColumn("tkey"),
- getStringColumn("tvalue")), outputColumns, false, -1, 1, -1));
+ getStringColumn("tvalue")), outputColumns, false, -1, 1, -1, AcidUtils.Operation.NOT_ACID));
Operator<ScriptDesc> op0 = OperatorFactory.get(new ScriptDesc("cat",
PlanUtils.getDefaultTableDesc("" + Utilities.tabCode, "key,value"),
@@ -398,7 +399,7 @@ public class TestExecDriver extends Test
Operator<ReduceSinkDesc> op0 = OperatorFactory.get(PlanUtils
.getReduceSinkDesc(Utilities.makeList(getStringColumn("0")), Utilities
.makeList(getStringColumn("0"), getStringColumn("1")),
- outputColumns, false, -1, 1, -1));
+ outputColumns, false, -1, 1, -1, AcidUtils.Operation.NOT_ACID));
Operator<SelectDesc> op4 = OperatorFactory.get(new SelectDesc(Utilities
.makeList(getStringColumn("key"), getStringColumn("value")),
@@ -432,7 +433,7 @@ public class TestExecDriver extends Test
Operator<ReduceSinkDesc> op1 = OperatorFactory.get(PlanUtils
.getReduceSinkDesc(Utilities.makeList(getStringColumn("tkey")),
Utilities.makeList(getStringColumn("tkey"),
- getStringColumn("tvalue")), outputColumns, false, -1, 1, -1));
+ getStringColumn("tvalue")), outputColumns, false, -1, 1, -1, AcidUtils.Operation.NOT_ACID));
Operator<ScriptDesc> op0 = OperatorFactory.get(new ScriptDesc(
"\'cat\'", PlanUtils.getDefaultTableDesc("" + Utilities.tabCode,
Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/exim_00_nonpart_empty.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/exim_00_nonpart_empty.q?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/exim_00_nonpart_empty.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/exim_00_nonpart_empty.q Mon Sep 15 22:46:44 2014
@@ -1,3 +1,6 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+
set hive.test.mode=true;
set hive.test.mode.prefix=;
set hive.test.mode.nosamplelist=exim_department,exim_employee;
Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/input_lazyserde.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/input_lazyserde.q?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/input_lazyserde.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/input_lazyserde.q Mon Sep 15 22:46:44 2014
@@ -1,5 +1,6 @@
-- SORT_QUERY_RESULTS
+DROP TABLE dest1;
CREATE TABLE dest1(a array<int>, b array<string>, c map<string,string>, d int, e string)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '1'
@@ -32,5 +33,21 @@ SELECT * from dest1;
CREATE TABLE destBin(a UNIONTYPE<int, double, array<string>, struct<col1:int,col2:string>>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' STORED AS SEQUENCEFILE;
INSERT OVERWRITE TABLE destBin SELECT create_union( CASE WHEN key < 100 THEN 0 WHEN key < 200 THEN 1 WHEN key < 300 THEN 2 WHEN key < 400 THEN 3 ELSE 0 END, key, 2.0, array("one","two"), struct(5,"five")) FROM srcbucket2;
-SELECT * from destBin ORDER BY a;
+SELECT * from destBin;
DROP TABLE destBin;
+
+DROP TABLE dest2;
+DROP TABLE dest3;
+
+CREATE TABLE dest2 (a map<string,map<string,map<string,uniontype<int, bigint, string, double, boolean, array<string>, map<string,string>>>>>)
+ ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' STORED AS SEQUENCEFILE;
+INSERT OVERWRITE TABLE dest2 SELECT src_thrift.attributes FROM src_thrift;
+SELECT a from dest2 limit 10;
+
+CREATE TABLE dest3 (
+unionfield1 uniontype<int, bigint, string, double, boolean, array<string>, map<string,string>>,
+unionfield2 uniontype<int, bigint, string, double, boolean, array<string>, map<string,string>>,
+unionfield3 uniontype<int, bigint, string, double, boolean, array<string>, map<string,string>>
+) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' STORED AS SEQUENCEFILE;
+INSERT OVERWRITE TABLE dest3 SELECT src_thrift.unionField1,src_thrift.unionField2,src_thrift.unionField3 from src_thrift;
+SELECT unionfield1, unionField2, unionfield3 from dest3 limit 10;
Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/list_bucket_dml_8.q?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/list_bucket_dml_8.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/list_bucket_dml_8.q Mon Sep 15 22:46:44 2014
@@ -69,7 +69,6 @@ show partitions list_bucketing_dynamic_p
desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1');
desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1');
-set hive.merge.current.job.concatenate.list.bucketing=true;
-- concatenate the partition and it will merge files
alter table list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') concatenate;
Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/orc_merge1.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/orc_merge1.q?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/orc_merge1.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/orc_merge1.q Mon Sep 15 22:46:44 2014
@@ -1,51 +1,87 @@
set hive.merge.orcfile.stripe.level=false;
set hive.exec.dynamic.partition=true;
set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.optimize.sort.dynamic.partition=false;
+set mapred.min.split.size=1000;
+set mapred.max.split.size=2000;
+set tez.grouping.min-size=1000;
+set tez.grouping.max-size=2000;
+set hive.merge.tezfiles=false;
+set hive.merge.mapfiles=false;
+set hive.merge.mapredfiles=false;
DROP TABLE orcfile_merge1;
DROP TABLE orcfile_merge1b;
+DROP TABLE orcfile_merge1c;
CREATE TABLE orcfile_merge1 (key INT, value STRING)
PARTITIONED BY (ds STRING, part STRING) STORED AS ORC;
CREATE TABLE orcfile_merge1b (key INT, value STRING)
PARTITIONED BY (ds STRING, part STRING) STORED AS ORC;
+CREATE TABLE orcfile_merge1c (key INT, value STRING)
+ PARTITIONED BY (ds STRING, part STRING) STORED AS ORC;
--- Use non stipe-level merge
+-- merge disabled
EXPLAIN
INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
- SELECT key, value, PMOD(HASH(key), 100) as part
+ SELECT key, value, PMOD(HASH(key), 2) as part
FROM src;
INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
- SELECT key, value, PMOD(HASH(key), 100) as part
+ SELECT key, value, PMOD(HASH(key), 2) as part
FROM src;
-DESC FORMATTED orcfile_merge1 partition (ds='1', part='50');
+DESC FORMATTED orcfile_merge1 partition (ds='1', part='0');
-set hive.merge.orcfile.stripe.level=true;
+set hive.merge.tezfiles=true;
+set hive.merge.mapfiles=true;
+set hive.merge.mapredfiles=true;
+-- auto-merge slow way
EXPLAIN
INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
- SELECT key, value, PMOD(HASH(key), 100) as part
+ SELECT key, value, PMOD(HASH(key), 2) as part
FROM src;
INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
- SELECT key, value, PMOD(HASH(key), 100) as part
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src;
+
+DESC FORMATTED orcfile_merge1b partition (ds='1', part='0');
+
+set hive.merge.orcfile.stripe.level=true;
+-- auto-merge fast way
+EXPLAIN
+ INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src;
+
+INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
FROM src;
-DESC FORMATTED orcfile_merge1 partition (ds='1', part='50');
+DESC FORMATTED orcfile_merge1c partition (ds='1', part='0');
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-- Verify
SELECT SUM(HASH(c)) FROM (
SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
FROM orcfile_merge1 WHERE ds='1'
) t;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-
SELECT SUM(HASH(c)) FROM (
SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
FROM orcfile_merge1b WHERE ds='1'
) t;
+SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge1c WHERE ds='1'
+) t;
+
+select count(*) from orcfile_merge1;
+select count(*) from orcfile_merge1b;
+select count(*) from orcfile_merge1c;
+
DROP TABLE orcfile_merge1;
DROP TABLE orcfile_merge1b;
+DROP TABLE orcfile_merge1c;
Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/stats_only_null.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/stats_only_null.q?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/stats_only_null.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/stats_only_null.q Mon Sep 15 22:46:44 2014
@@ -34,6 +34,17 @@ select count(*), count(a), count(b), cou
select count(*), count(a), count(b), count(c), count(d) from stats_null;
select count(*), count(a), count(b), count(c), count(d) from stats_null_part;
+
+drop table stats_null_part;
+set hive.exec.dynamic.partition.mode=nonstrict;
+CREATE TABLE stats_null_part(a double, b int, c STRING, d smallint) partitioned by (dt int) STORED AS TEXTFILE;
+
+insert into table stats_null_part partition(dt) select a,b,c,d,b from temps_null ;
+analyze table stats_null_part compute statistics for columns;
+
+describe formatted stats_null_part.a partition(dt = 1);
+
+reset hive.exec.dynamic.partition.mode;
drop table stats_null;
drop table stats_null_part;
drop table temps_null;
Modified: hive/branches/cbo/ql/src/test/results/beelinepositive/convert_enum_to_string.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/beelinepositive/convert_enum_to_string.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/beelinepositive/convert_enum_to_string.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/beelinepositive/convert_enum_to_string.q.out Mon Sep 15 22:46:44 2014
@@ -29,9 +29,9 @@ No rows affected
'my_stringlist','array<string>','from deserializer'
'my_structlist','array<struct<my_string:string,my_enum:string>>','from deserializer'
'my_enumlist','array<string>','from deserializer'
-'my_stringset','struct<>','from deserializer'
-'my_enumset','struct<>','from deserializer'
-'my_structset','struct<>','from deserializer'
+'my_stringset','array<string>','from deserializer'
+'my_enumset','array<string>','from deserializer'
+'my_structset','array<struct<my_string:string,my_enum:string>>','from deserializer'
'b','string',''
21 rows selected
>>> !record
Modified: hive/branches/cbo/ql/src/test/results/clientnegative/describe_xpath1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientnegative/describe_xpath1.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientnegative/describe_xpath1.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientnegative/describe_xpath1.q.out Mon Sep 15 22:46:44 2014
@@ -1,4 +1,4 @@
PREHOOK: query: describe src_thrift.$elem$
PREHOOK: type: DESCTABLE
PREHOOK: Input: default@src_thrift
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. cannot find field $elem$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lintString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.mStringString]
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. cannot find field $elem$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lintString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.mStringString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.attributes, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField1, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField2, private org.apache.hadoop.hive.serde2.thrift.test.PropVal
ueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField3]
Modified: hive/branches/cbo/ql/src/test/results/clientnegative/describe_xpath2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientnegative/describe_xpath2.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientnegative/describe_xpath2.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientnegative/describe_xpath2.q.out Mon Sep 15 22:46:44 2014
@@ -1,4 +1,4 @@
PREHOOK: query: describe src_thrift.$key$
PREHOOK: type: DESCTABLE
PREHOOK: Input: default@src_thrift
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. cannot find field $key$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lintString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.mStringString]
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. cannot find field $key$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lintString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.mStringString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.attributes, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField1, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField2, private org.apache.hadoop.hive.serde2.thrift.test.PropValu
eUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField3]
Modified: hive/branches/cbo/ql/src/test/results/clientnegative/invalid_cast_from_binary_1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientnegative/invalid_cast_from_binary_1.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientnegative/invalid_cast_from_binary_1.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientnegative/invalid_cast_from_binary_1.q.out Mon Sep 15 22:46:44 2014
@@ -6,4 +6,4 @@ POSTHOOK: query: create table tbl (a bin
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@tbl
-FAILED: SemanticException Line 0:-1 Wrong arguments 'a': No matching method for class org.apache.hadoop.hive.ql.udf.UDFToInteger with (binary). Possible choices: _FUNC_(bigint) _FUNC_(boolean) _FUNC_(decimal(38,18)) _FUNC_(double) _FUNC_(float) _FUNC_(smallint) _FUNC_(string) _FUNC_(timestamp) _FUNC_(tinyint) _FUNC_(void)
+FAILED: SemanticException Line 0:-1 Wrong arguments 'a': No matching method for class org.apache.hadoop.hive.ql.udf.UDFToInteger with (binary). Possible choices: _FUNC_(bigint) _FUNC_(boolean) _FUNC_(decimal(38,18)) _FUNC_(double) _FUNC_(float) _FUNC_(smallint) _FUNC_(string) _FUNC_(struct<transactionid:bigint,bucketid:int,rowid:bigint>) _FUNC_(timestamp) _FUNC_(tinyint) _FUNC_(void)
Modified: hive/branches/cbo/ql/src/test/results/clientpositive/case_sensitivity.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/case_sensitivity.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientpositive/case_sensitivity.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientpositive/case_sensitivity.q.out Mon Sep 15 22:46:44 2014
@@ -30,17 +30,17 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: src_thrift
- Statistics: Num rows: 11 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (lint[0] > 0) (type: boolean)
- Statistics: Num rows: 3 Data size: 437 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 837 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: lint[1] (type: int), lintstring[0].MYSTRING (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 3 Data size: 437 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 837 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 3 Data size: 437 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 837 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Modified: hive/branches/cbo/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out Mon Sep 15 22:46:44 2014
@@ -29,24 +29,24 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: src_thrift
- Statistics: Num rows: 11 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: lint (type: array<int>), lstring (type: array<string>), mstringstring (type: map<string,string>), aint (type: int), astring (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
- Statistics: Num rows: 11 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
sort order:
Map-reduce partition columns: 1 (type: int)
- Statistics: Num rows: 11 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: array<int>), _col1 (type: array<string>), _col2 (type: map<string,string>), _col3 (type: int), _col4 (type: string)
Reduce Operator Tree:
Select Operator
expressions: VALUE._col0 (type: array<int>), VALUE._col1 (type: array<string>), VALUE._col2 (type: map<string,string>), VALUE._col3 (type: int), VALUE._col4 (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
- Statistics: Num rows: 11 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 11 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
Modified: hive/branches/cbo/ql/src/test/results/clientpositive/convert_enum_to_string.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/convert_enum_to_string.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientpositive/convert_enum_to_string.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientpositive/convert_enum_to_string.q.out Mon Sep 15 22:46:44 2014
@@ -43,9 +43,9 @@ my_enum_structlist_map map<string,array<
my_stringlist array<string> from deserializer
my_structlist array<struct<my_string:string,my_enum:string,optionals:struct<>>> from deserializer
my_enumlist array<string> from deserializer
-my_stringset struct<> from deserializer
-my_enumset struct<> from deserializer
-my_structset struct<> from deserializer
+my_stringset array<string> from deserializer
+my_enumset array<string> from deserializer
+my_structset array<struct<my_string:string,my_enum:string,optionals:struct<>>> from deserializer
optionals struct<> from deserializer
b string
Modified: hive/branches/cbo/ql/src/test/results/clientpositive/exim_00_nonpart_empty.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/exim_00_nonpart_empty.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientpositive/exim_00_nonpart_empty.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientpositive/exim_00_nonpart_empty.q.out Mon Sep 15 22:46:44 2014
@@ -41,8 +41,10 @@ POSTHOOK: type: SWITCHDATABASE
POSTHOOK: Input: database:importer
PREHOOK: query: import from 'ql/test/data/exports/exim_department'
PREHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: query: import from 'ql/test/data/exports/exim_department'
POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: Output: importer@exim_department
PREHOOK: query: describe extended exim_department
PREHOOK: type: DESCTABLE
Modified: hive/branches/cbo/ql/src/test/results/clientpositive/exim_01_nonpart.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/exim_01_nonpart.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientpositive/exim_01_nonpart.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientpositive/exim_01_nonpart.q.out Mon Sep 15 22:46:44 2014
@@ -49,8 +49,10 @@ POSTHOOK: type: SWITCHDATABASE
POSTHOOK: Input: database:importer
PREHOOK: query: import from 'ql/test/data/exports/exim_department'
PREHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: query: import from 'ql/test/data/exports/exim_department'
POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: Output: importer@exim_department
PREHOOK: query: describe extended exim_department
PREHOOK: type: DESCTABLE
Modified: hive/branches/cbo/ql/src/test/results/clientpositive/exim_02_00_part_empty.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/exim_02_00_part_empty.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientpositive/exim_02_00_part_empty.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientpositive/exim_02_00_part_empty.q.out Mon Sep 15 22:46:44 2014
@@ -43,8 +43,10 @@ POSTHOOK: type: SWITCHDATABASE
POSTHOOK: Input: database:importer
PREHOOK: query: import from 'ql/test/data/exports/exim_employee'
PREHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: query: import from 'ql/test/data/exports/exim_employee'
POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: Output: importer@exim_employee
PREHOOK: query: describe extended exim_employee
PREHOOK: type: DESCTABLE
Modified: hive/branches/cbo/ql/src/test/results/clientpositive/exim_02_part.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/exim_02_part.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientpositive/exim_02_part.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientpositive/exim_02_part.q.out Mon Sep 15 22:46:44 2014
@@ -56,8 +56,10 @@ POSTHOOK: type: SWITCHDATABASE
POSTHOOK: Input: database:importer
PREHOOK: query: import from 'ql/test/data/exports/exim_employee'
PREHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: query: import from 'ql/test/data/exports/exim_employee'
POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: Output: importer@exim_employee
POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=tn
PREHOOK: query: describe extended exim_employee
Modified: hive/branches/cbo/ql/src/test/results/clientpositive/exim_03_nonpart_over_compat.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/exim_03_nonpart_over_compat.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientpositive/exim_03_nonpart_over_compat.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientpositive/exim_03_nonpart_over_compat.q.out Mon Sep 15 22:46:44 2014
@@ -61,9 +61,11 @@ POSTHOOK: Output: database:importer
POSTHOOK: Output: importer@exim_department
PREHOOK: query: import from 'ql/test/data/exports/exim_department'
PREHOOK: type: IMPORT
+#### A masked pattern was here ####
PREHOOK: Output: importer@exim_department
POSTHOOK: query: import from 'ql/test/data/exports/exim_department'
POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: Output: importer@exim_department
PREHOOK: query: describe extended exim_department
PREHOOK: type: DESCTABLE
Modified: hive/branches/cbo/ql/src/test/results/clientpositive/exim_04_all_part.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/exim_04_all_part.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientpositive/exim_04_all_part.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientpositive/exim_04_all_part.q.out Mon Sep 15 22:46:44 2014
@@ -95,8 +95,10 @@ POSTHOOK: type: SWITCHDATABASE
POSTHOOK: Input: database:importer
PREHOOK: query: import from 'ql/test/data/exports/exim_employee'
PREHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: query: import from 'ql/test/data/exports/exim_employee'
POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: Output: importer@exim_employee
POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=ka
POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=tn
Modified: hive/branches/cbo/ql/src/test/results/clientpositive/exim_04_evolved_parts.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/exim_04_evolved_parts.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientpositive/exim_04_evolved_parts.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientpositive/exim_04_evolved_parts.q.out Mon Sep 15 22:46:44 2014
@@ -101,8 +101,10 @@ POSTHOOK: type: SWITCHDATABASE
POSTHOOK: Input: database:importer
PREHOOK: query: import from 'ql/test/data/exports/exim_employee'
PREHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: query: import from 'ql/test/data/exports/exim_employee'
POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: Output: importer@exim_employee
POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=ka
POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=tn
Modified: hive/branches/cbo/ql/src/test/results/clientpositive/exim_05_some_part.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/exim_05_some_part.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientpositive/exim_05_some_part.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientpositive/exim_05_some_part.q.out Mon Sep 15 22:46:44 2014
@@ -91,8 +91,10 @@ POSTHOOK: type: SWITCHDATABASE
POSTHOOK: Input: database:importer
PREHOOK: query: import from 'ql/test/data/exports/exim_employee'
PREHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: query: import from 'ql/test/data/exports/exim_employee'
POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: Output: importer@exim_employee
POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=ka
POSTHOOK: Output: importer@exim_employee@emp_country=us/emp_state=ka
Modified: hive/branches/cbo/ql/src/test/results/clientpositive/exim_06_one_part.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/exim_06_one_part.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientpositive/exim_06_one_part.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientpositive/exim_06_one_part.q.out Mon Sep 15 22:46:44 2014
@@ -89,8 +89,10 @@ POSTHOOK: type: SWITCHDATABASE
POSTHOOK: Input: database:importer
PREHOOK: query: import from 'ql/test/data/exports/exim_employee'
PREHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: query: import from 'ql/test/data/exports/exim_employee'
POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: Output: importer@exim_employee
POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=ka
PREHOOK: query: describe extended exim_employee
Modified: hive/branches/cbo/ql/src/test/results/clientpositive/exim_07_all_part_over_nonoverlap.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/exim_07_all_part_over_nonoverlap.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientpositive/exim_07_all_part_over_nonoverlap.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientpositive/exim_07_all_part_over_nonoverlap.q.out Mon Sep 15 22:46:44 2014
@@ -122,9 +122,11 @@ POSTHOOK: Output: importer@exim_employee
POSTHOOK: Output: importer@exim_employee@emp_country=us/emp_state=al
PREHOOK: query: import from 'ql/test/data/exports/exim_employee'
PREHOOK: type: IMPORT
+#### A masked pattern was here ####
PREHOOK: Output: importer@exim_employee
POSTHOOK: query: import from 'ql/test/data/exports/exim_employee'
POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: Output: importer@exim_employee
POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=ka
POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=tn
Modified: hive/branches/cbo/ql/src/test/results/clientpositive/exim_08_nonpart_rename.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/exim_08_nonpart_rename.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientpositive/exim_08_nonpart_rename.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientpositive/exim_08_nonpart_rename.q.out Mon Sep 15 22:46:44 2014
@@ -72,8 +72,10 @@ POSTHOOK: Output: importer@exim_departme
POSTHOOK: Output: importer@exim_department@emp_org=hr
PREHOOK: query: import table exim_imported_dept from 'ql/test/data/exports/exim_department'
PREHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: query: import table exim_imported_dept from 'ql/test/data/exports/exim_department'
POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: Output: importer@exim_imported_dept
PREHOOK: query: describe extended exim_imported_dept
PREHOOK: type: DESCTABLE
Modified: hive/branches/cbo/ql/src/test/results/clientpositive/exim_09_part_spec_nonoverlap.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/exim_09_part_spec_nonoverlap.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientpositive/exim_09_part_spec_nonoverlap.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientpositive/exim_09_part_spec_nonoverlap.q.out Mon Sep 15 22:46:44 2014
@@ -133,9 +133,11 @@ POSTHOOK: Output: importer@exim_employee
POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=ka
PREHOOK: query: import table exim_employee partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee'
PREHOOK: type: IMPORT
+#### A masked pattern was here ####
PREHOOK: Output: importer@exim_employee
POSTHOOK: query: import table exim_employee partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee'
POSTHOOK: type: IMPORT
+#### A masked pattern was here ####
POSTHOOK: Output: importer@exim_employee
POSTHOOK: Output: importer@exim_employee@emp_country=us/emp_state=tn
PREHOOK: query: describe extended exim_employee