You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/08/24 05:43:57 UTC

svn commit: r1620103 [13/27] - in /hive/branches/spark: ./ accumulo-handler/ common/src/java/org/apache/hadoop/hive/ant/ common/src/java/org/apache/hadoop/hive/common/type/ common/src/test/org/apache/hadoop/hive/common/type/ data/files/ hcatalog/stream...

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g Sun Aug 24 03:43:48 2014
@@ -331,6 +331,15 @@ TOK_RESOURCE_LIST;
 TOK_COMPACT;
 TOK_SHOW_COMPACTIONS;
 TOK_SHOW_TRANSACTIONS;
+TOK_DELETE_FROM;
+TOK_UPDATE_TABLE;
+TOK_SET_COLUMNS_CLAUSE;
+TOK_VALUE_ROW;
+TOK_VALUES_TABLE;
+TOK_VIRTUAL_TABLE;
+TOK_VIRTUAL_TABREF;
+TOK_ANONYMOUS;
+TOK_COL_NAME;
 }
 
 
@@ -469,6 +478,9 @@ import java.util.HashMap;
     xlateMap.put("KW_DEFINED", "DEFINED");
     xlateMap.put("KW_SUBQUERY", "SUBQUERY");
     xlateMap.put("KW_REWRITE", "REWRITE");
+    xlateMap.put("KW_UPDATE", "UPDATE");
+
+    xlateMap.put("KW_VALUES", "VALUES");
 
     // Operators
     xlateMap.put("DOT", ".");
@@ -638,6 +650,8 @@ execStatement
     | exportStatement
     | importStatement
     | ddlStatement
+    | deleteStatement
+    | updateStatement
     ;
 
 loadStatement
@@ -2095,11 +2109,28 @@ singleFromStatement
     ( b+=body )+ -> ^(TOK_QUERY fromClause body+)
     ;
 
+/*
+The valuesClause rule below ensures that the parse tree for
+"insert into table FOO values (1,2),(3,4)" looks the same as
+"insert into table FOO select a,b from (values(1,2),(3,4)) as BAR(a,b)" which itself is made to look
+very similar to the tree for "insert into table FOO select a,b from BAR".  Since virtual table name
+is implicit, it's represented as TOK_ANONYMOUS.
+*/
 regularBody[boolean topLevel]
    :
    i=insertClause
+   (
    s=selectStatement[topLevel]
      {$s.tree.getChild(1).replaceChildren(0, 0, $i.tree);} -> {$s.tree}
+     |
+     valuesClause
+      -> ^(TOK_QUERY
+            ^(TOK_FROM
+              ^(TOK_VIRTUAL_TABLE ^(TOK_VIRTUAL_TABREF ^(TOK_ANONYMOUS)) valuesClause)
+             )
+            ^(TOK_INSERT {$i.tree} ^(TOK_SELECT ^(TOK_SELEXPR TOK_ALLCOLREF)))
+          )
+   )
    |
    selectStatement[topLevel]
    ;
@@ -2208,3 +2239,34 @@ limitClause
    :
    KW_LIMIT num=Number -> ^(TOK_LIMIT $num)
    ;
+
+//DELETE FROM <tableName> WHERE ...;
+deleteStatement
+@init { pushMsg("delete statement", state); }
+@after { popMsg(state); }
+   :
+   KW_DELETE KW_FROM tableName (whereClause)? -> ^(TOK_DELETE_FROM tableName whereClause?)
+   ;
+
+/*SET <columName> = (3 + col2)*/
+columnAssignmentClause
+   :
+   tableOrColumn EQUAL^ atomExpression
+   ;
+
+/*SET col1 = 5, col2 = (4 + col4), ...*/
+setColumnsClause
+   :
+   KW_SET columnAssignmentClause (COMMA columnAssignmentClause)* -> ^(TOK_SET_COLUMNS_CLAUSE columnAssignmentClause* )
+   ;
+
+/* 
+  UPDATE <table> 
+  SET col1 = val1, col2 = val2... WHERE ...
+*/
+updateStatement
+@init { pushMsg("update statement", state); }
+@after { popMsg(state); }
+   :
+   KW_UPDATE tableName setColumnsClause whereClause? -> ^(TOK_UPDATE_TABLE tableName setColumnsClause whereClause?)
+   ;

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g Sun Aug 24 03:43:48 2014
@@ -538,5 +538,5 @@ functionIdentifier
 
 nonReserved
     :
-    KW_TRUE | KW_FALSE | KW_LIKE | KW_EXISTS | KW_ASC | KW_DESC | KW_ORDER | KW_GROUP | KW_BY | KW_AS | KW_INSERT | KW_OVERWRITE | KW_OUTER | KW_LEFT | KW_RIGHT | KW_FULL | KW_PARTITION | KW_PARTITIONS | KW_TABLE | KW_TABLES | KW_COLUMNS | KW_INDEX | KW_INDEXES | KW_REBUILD | KW_FUNCTIONS | KW_SHOW | KW_MSCK | KW_REPAIR | KW_DIRECTORY | KW_LOCAL | KW_USING | KW_CLUSTER | KW_DISTRIBUTE | KW_SORT | KW_UNION | KW_LOAD | KW_EXPORT | KW_IMPORT | KW_DATA | KW_INPATH | KW_IS | KW_NULL | KW_CREATE | KW_EXTERNAL | KW_ALTER | KW_CHANGE | KW_FIRST | KW_AFTER | KW_DESCRIBE | KW_DROP | KW_RENAME | KW_IGNORE | KW_PROTECTION | KW_TO | KW_COMMENT | KW_BOOLEAN | KW_TINYINT | KW_SMALLINT | KW_INT | KW_BIGINT | KW_FLOAT | KW_DOUBLE | KW_DATE | KW_DATETIME | KW_TIMESTAMP | KW_DECIMAL | KW_STRING | KW_ARRAY | KW_STRUCT | KW_UNIONTYPE | KW_PARTITIONED | KW_CLUSTERED | KW_SORTED | KW_INTO | KW_BUCKETS | KW_ROW | KW_ROWS | KW_FORMAT | KW_DELIMITED | KW_FIELDS | KW_TERMINATED | KW_ESCAPED | KW_COLLECTION | 
 KW_ITEMS | KW_KEYS | KW_KEY_TYPE | KW_LINES | KW_STORED | KW_FILEFORMAT | KW_INPUTFORMAT | KW_OUTPUTFORMAT | KW_INPUTDRIVER | KW_OUTPUTDRIVER | KW_OFFLINE | KW_ENABLE | KW_DISABLE | KW_READONLY | KW_NO_DROP | KW_LOCATION | KW_BUCKET | KW_OUT | KW_OF | KW_PERCENT | KW_ADD | KW_REPLACE | KW_RLIKE | KW_REGEXP | KW_TEMPORARY | KW_EXPLAIN | KW_FORMATTED | KW_PRETTY | KW_DEPENDENCY | KW_LOGICAL | KW_SERDE | KW_WITH | KW_DEFERRED | KW_SERDEPROPERTIES | KW_DBPROPERTIES | KW_LIMIT | KW_SET | KW_UNSET | KW_TBLPROPERTIES | KW_IDXPROPERTIES | KW_VALUE_TYPE | KW_ELEM_TYPE | KW_MAPJOIN | KW_STREAMTABLE | KW_HOLD_DDLTIME | KW_CLUSTERSTATUS | KW_UTC | KW_UTCTIMESTAMP | KW_LONG | KW_DELETE | KW_PLUS | KW_MINUS | KW_FETCH | KW_INTERSECT | KW_VIEW | KW_IN | KW_DATABASES | KW_MATERIALIZED | KW_SCHEMA | KW_SCHEMAS | KW_GRANT | KW_REVOKE | KW_SSL | KW_UNDO | KW_LOCK | KW_LOCKS | KW_UNLOCK | KW_SHARED | KW_EXCLUSIVE | KW_PROCEDURE | KW_UNSIGNED | KW_WHILE | KW_READ | KW_READS | KW_PURGE | KW_RANGE | KW_AN
 ALYZE | KW_BEFORE | KW_BETWEEN | KW_BOTH | KW_BINARY | KW_CONTINUE | KW_CURSOR | KW_TRIGGER | KW_RECORDREADER | KW_RECORDWRITER | KW_SEMI | KW_LATERAL | KW_TOUCH | KW_ARCHIVE | KW_UNARCHIVE | KW_COMPUTE | KW_STATISTICS | KW_USE | KW_OPTION | KW_CONCATENATE | KW_SHOW_DATABASE | KW_UPDATE | KW_RESTRICT | KW_CASCADE | KW_SKEWED | KW_ROLLUP | KW_CUBE | KW_DIRECTORIES | KW_FOR | KW_GROUPING | KW_SETS | KW_TRUNCATE | KW_NOSCAN | KW_USER | KW_ROLE | KW_ROLES | KW_INNER | KW_DEFINED | KW_ADMIN | KW_JAR | KW_FILE | KW_OWNER | KW_PRINCIPALS | KW_ALL | KW_DEFAULT | KW_NONE | KW_COMPACT | KW_COMPACTIONS | KW_TRANSACTIONS | KW_REWRITE | KW_AUTHORIZATION
+    KW_TRUE | KW_FALSE | KW_LIKE | KW_EXISTS | KW_ASC | KW_DESC | KW_ORDER | KW_GROUP | KW_BY | KW_AS | KW_INSERT | KW_OVERWRITE | KW_OUTER | KW_LEFT | KW_RIGHT | KW_FULL | KW_PARTITION | KW_PARTITIONS | KW_TABLE | KW_TABLES | KW_COLUMNS | KW_INDEX | KW_INDEXES | KW_REBUILD | KW_FUNCTIONS | KW_SHOW | KW_MSCK | KW_REPAIR | KW_DIRECTORY | KW_LOCAL | KW_USING | KW_CLUSTER | KW_DISTRIBUTE | KW_SORT | KW_UNION | KW_LOAD | KW_EXPORT | KW_IMPORT | KW_DATA | KW_INPATH | KW_IS | KW_NULL | KW_CREATE | KW_EXTERNAL | KW_ALTER | KW_CHANGE | KW_FIRST | KW_AFTER | KW_DESCRIBE | KW_DROP | KW_RENAME | KW_IGNORE | KW_PROTECTION | KW_TO | KW_COMMENT | KW_BOOLEAN | KW_TINYINT | KW_SMALLINT | KW_INT | KW_BIGINT | KW_FLOAT | KW_DOUBLE | KW_DATE | KW_DATETIME | KW_TIMESTAMP | KW_DECIMAL | KW_STRING | KW_ARRAY | KW_STRUCT | KW_UNIONTYPE | KW_PARTITIONED | KW_CLUSTERED | KW_SORTED | KW_INTO | KW_BUCKETS | KW_ROW | KW_ROWS | KW_FORMAT | KW_DELIMITED | KW_FIELDS | KW_TERMINATED | KW_ESCAPED | KW_COLLECTION | 
 KW_ITEMS | KW_KEYS | KW_KEY_TYPE | KW_LINES | KW_STORED | KW_FILEFORMAT | KW_INPUTFORMAT | KW_OUTPUTFORMAT | KW_INPUTDRIVER | KW_OUTPUTDRIVER | KW_OFFLINE | KW_ENABLE | KW_DISABLE | KW_READONLY | KW_NO_DROP | KW_LOCATION | KW_BUCKET | KW_OUT | KW_OF | KW_PERCENT | KW_ADD | KW_REPLACE | KW_RLIKE | KW_REGEXP | KW_TEMPORARY | KW_EXPLAIN | KW_FORMATTED | KW_PRETTY | KW_DEPENDENCY | KW_LOGICAL | KW_SERDE | KW_WITH | KW_DEFERRED | KW_SERDEPROPERTIES | KW_DBPROPERTIES | KW_LIMIT | KW_SET | KW_UNSET | KW_TBLPROPERTIES | KW_IDXPROPERTIES | KW_VALUE_TYPE | KW_ELEM_TYPE | KW_MAPJOIN | KW_STREAMTABLE | KW_HOLD_DDLTIME | KW_CLUSTERSTATUS | KW_UTC | KW_UTCTIMESTAMP | KW_LONG | KW_DELETE | KW_PLUS | KW_MINUS | KW_FETCH | KW_INTERSECT | KW_VIEW | KW_IN | KW_DATABASES | KW_MATERIALIZED | KW_SCHEMA | KW_SCHEMAS | KW_GRANT | KW_REVOKE | KW_SSL | KW_UNDO | KW_LOCK | KW_LOCKS | KW_UNLOCK | KW_SHARED | KW_EXCLUSIVE | KW_PROCEDURE | KW_UNSIGNED | KW_WHILE | KW_READ | KW_READS | KW_PURGE | KW_RANGE | KW_AN
 ALYZE | KW_BEFORE | KW_BETWEEN | KW_BOTH | KW_BINARY | KW_CONTINUE | KW_CURSOR | KW_TRIGGER | KW_RECORDREADER | KW_RECORDWRITER | KW_SEMI | KW_LATERAL | KW_TOUCH | KW_ARCHIVE | KW_UNARCHIVE | KW_COMPUTE | KW_STATISTICS | KW_USE | KW_OPTION | KW_CONCATENATE | KW_SHOW_DATABASE | KW_UPDATE | KW_RESTRICT | KW_CASCADE | KW_SKEWED | KW_ROLLUP | KW_CUBE | KW_DIRECTORIES | KW_FOR | KW_GROUPING | KW_SETS | KW_TRUNCATE | KW_NOSCAN | KW_USER | KW_ROLE | KW_ROLES | KW_INNER | KW_DEFINED | KW_ADMIN | KW_JAR | KW_FILE | KW_OWNER | KW_PRINCIPALS | KW_ALL | KW_DEFAULT | KW_NONE | KW_COMPACT | KW_COMPACTIONS | KW_TRANSACTIONS | KW_REWRITE | KW_AUTHORIZATION | KW_VALUES
     ;

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Sun Aug 24 03:43:48 2014
@@ -972,6 +972,8 @@ public class SemanticAnalyzer extends Ba
         ASTNode frm = (ASTNode) ast.getChild(0);
         if (frm.getToken().getType() == HiveParser.TOK_TABREF) {
           processTable(qb, frm);
+        } else if (frm.getToken().getType() == HiveParser.TOK_VIRTUAL_TABLE) {
+          throw new RuntimeException("VALUES() clause is not fully supported yet...");
         } else if (frm.getToken().getType() == HiveParser.TOK_SUBQUERY) {
           processSubQuery(qb, frm);
         } else if (frm.getToken().getType() == HiveParser.TOK_LATERAL_VIEW ||
@@ -1164,6 +1166,10 @@ public class SemanticAnalyzer extends Ba
       case HiveParser.TOK_CTE:
         processCTE(qb, ast);
         break;
+      case HiveParser.TOK_DELETE_FROM:
+        throw new RuntimeException("DELETE is not (yet) implemented...");
+      case HiveParser.TOK_UPDATE_TABLE:
+        throw new RuntimeException("UPDATE is not (yet) implemented...");
       default:
         skipRecursion = false;
         break;
@@ -10337,6 +10343,19 @@ public class SemanticAnalyzer extends Ba
     try {
       Table oldView = getTable(createVwDesc.getViewName(), false);
 
+      // Do not allow view to be defined on temp table
+      Set<String> tableAliases = qb.getTabAliases();
+      for (String alias : tableAliases) {
+        try {
+          Table table = db.getTable(qb.getTabNameForAlias(alias));
+          if (table.isTemporary()) {
+            throw new SemanticException("View definition references temporary table " + alias);
+          }
+        } catch (HiveException ex) {
+          throw new SemanticException(ex);
+        }
+      }
+
       // ALTER VIEW AS SELECT requires the view must exist
       if (createVwDesc.getIsAlterViewAs() && oldView == null) {
         String viewNotExistErrorMsg =

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java Sun Aug 24 03:43:48 2014
@@ -49,6 +49,9 @@ public enum HiveCommand {
       if (command.length > 1 && "role".equalsIgnoreCase(command[1])) {
         // special handling for set role r1 statement
         return null;
+      } else if(command.length > 1 && "from".equalsIgnoreCase(command[1])) {
+        //special handling for SQL "delete from <table> where..."
+        return null;
       } else if (COMMANDS.contains(cmd)) {
         return HiveCommand.valueOf(cmd);
       }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java Sun Aug 24 03:43:48 2014
@@ -18,11 +18,8 @@
 
 package org.apache.hadoop.hive.ql.stats;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
+import com.google.common.base.Joiner;
+import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
@@ -79,8 +76,12 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableTimestampObjectInspector;
 import org.apache.hadoop.io.BytesWritable;
 
-import com.google.common.base.Joiner;
-import com.google.common.collect.Lists;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 public class StatsUtils {
 
@@ -223,7 +224,7 @@ public class StatsUtils {
           if (aggrStats.getPartsFound() != partNames.size() && colState != State.NONE) {
             LOG.debug("Column stats requested for : " + partNames.size() +" partitions. "
               + "Able to retrieve for " + aggrStats.getPartsFound() + " partitions");
-            stats.updateColumnStatsState(State.PARTIAL);
+            colState = State.PARTIAL;
           }
           stats.setColumnStatsState(colState);
         }
@@ -1216,4 +1217,33 @@ public class StatsUtils {
     }
     return result;
   }
+
+  /**
+   * Returns all table aliases from expression nodes
+   * @param columnExprMap - column expression map
+   * @return
+   */
+  public static Set<String> getAllTableAlias(
+      Map<String, ExprNodeDesc> columnExprMap) {
+    Set<String> result = new HashSet<String>();
+    if (columnExprMap != null) {
+      for (ExprNodeDesc end : columnExprMap.values()) {
+        getTableAliasFromExprNode(end, result);
+      }
+    }
+    return result;
+  }
+
+  private static void getTableAliasFromExprNode(ExprNodeDesc end,
+      Set<String> output) {
+
+    if (end instanceof ExprNodeColumnDesc) {
+      output.add(((ExprNodeColumnDesc) end).getTabAlias());
+    } else if (end instanceof ExprNodeGenericFuncDesc) {
+      for (ExprNodeDesc child : end.getChildren()) {
+        getTableAliasFromExprNode(child, output);
+      }
+    }
+
+  }
 }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java Sun Aug 24 03:43:48 2014
@@ -39,6 +39,7 @@ import org.apache.hadoop.hive.serde.serd
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.JobClient;
@@ -485,20 +486,21 @@ public class CompactorMR {
   }
 
   static class CompactorMap<V extends Writable>
-      implements Mapper<NullWritable, CompactorInputSplit,  NullWritable,  NullWritable> {
+      implements Mapper<WritableComparable, CompactorInputSplit,  NullWritable,  NullWritable> {
 
     JobConf jobConf;
     RecordWriter writer;
 
     @Override
-    public void map(NullWritable key, CompactorInputSplit split,
+    public void map(WritableComparable key, CompactorInputSplit split,
                     OutputCollector<NullWritable, NullWritable> nullWritableVOutputCollector,
                     Reporter reporter) throws IOException {
       // This will only get called once, since CompactRecordReader only returns one record,
       // the input split.
       // Based on the split we're passed we go instantiate the real reader and then iterate on it
       // until it finishes.
-      AcidInputFormat aif =
+      @SuppressWarnings("unchecked")//since there is no way to parametrize instance of Class
+      AcidInputFormat<WritableComparable, V> aif =
           instantiate(AcidInputFormat.class, jobConf.get(INPUT_FORMAT_CLASS_NAME));
       ValidTxnList txnList =
           new ValidTxnListImpl(jobConf.get(ValidTxnList.VALID_TXNS_KEY));
@@ -541,7 +543,8 @@ public class CompactorMR {
             .bucket(bucket);
 
         // Instantiate the underlying output format
-        AcidOutputFormat<V> aof =
+        @SuppressWarnings("unchecked")//since there is no way to parametrize instance of Class
+        AcidOutputFormat<WritableComparable, V> aof =
             instantiate(AcidOutputFormat.class, jobConf.get(OUTPUT_FORMAT_CLASS_NAME));
 
         writer = aof.getRawRecordWriter(new Path(jobConf.get(TMP_LOCATION)), options);

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStreamingEvaluator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStreamingEvaluator.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStreamingEvaluator.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStreamingEvaluator.java Sun Aug 24 03:43:48 2014
@@ -179,6 +179,7 @@ public abstract class GenericUDAFStreami
 
       for (int i = 0; i < ss.numFollowing; i++) {
         ss.results.add(getNextResult(ss));
+        ss.numRows++;
       }
       return o;
     }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCase.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCase.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCase.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCase.java Sun Aug 24 03:43:48 2014
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql.udf.generic;
 
+import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -31,6 +32,23 @@ import org.apache.hadoop.hive.serde2.obj
  * thrown. 2. c and f should be compatible types, or an exception will be
  * thrown.
  */
+@Description(
+    name = "case",
+    value = "CASE a WHEN b THEN c [WHEN d THEN e]* [ELSE f] END - "
+        + "When a = b, returns c; when a = d, return e; else return f",
+    extended = "Example:\n "
+    + "SELECT\n"
+    + " CASE deptno\n"
+    + "   WHEN 1 THEN Engineering\n"
+    + "   WHEN 2 THEN Finance\n"
+    + "   ELSE admin\n"
+    + " END,\n"
+    + " CASE zone\n"
+    + "   WHEN 7 THEN Americas\n"
+    + "   ELSE Asia-Pac\n"
+    + " END\n"
+    + " FROM emp_details")
+
 public class GenericUDFCase extends GenericUDF {
   private transient ObjectInspector[] argumentOIs;
   private transient GenericUDFUtils.ReturnObjectInspectorResolver returnOIResolver;

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLag.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLag.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLag.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLag.java Sun Aug 24 03:43:48 2014
@@ -18,8 +18,17 @@
 
 package org.apache.hadoop.hive.ql.udf.generic;
 
+import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.udf.UDFType;
+@Description(
+    name = "lag",
+    value = "LAG  (scalar_expression [,offset] [,default]) OVER ([query_partition_clause] order_by_clause); "
+        + "The LAG function is used to access data from a previous row.",
+    extended = "Example:\n "
+    + "select p1.p_mfgr, p1.p_name, p1.p_size,\n"
+    + " p1.p_size - lag(p1.p_size,1,p1.p_size) over( distribute by p1.p_mfgr sort by p1.p_name) as deltaSz\n"
+    + " from part p1 join part p2 on p1.p_partkey = p2.p_partkey")
 
 @UDFType(impliesOrder = true)
 public class GenericUDFLag extends GenericUDFLeadLag {

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLead.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLead.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLead.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLead.java Sun Aug 24 03:43:48 2014
@@ -18,8 +18,19 @@
 
 package org.apache.hadoop.hive.ql.udf.generic;
 
+import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.udf.UDFType;
+@Description(
+    name = "lead",
+    value = "LEAD (scalar_expression [,offset] [,default]) OVER ([query_partition_clause] order_by_clause); "
+        + "The LEAD function is used to return data from the next row. ",
+    extended = "Example:\n "
+    + "select p_name, p_retailprice, lead(p_retailprice) over() as l1,\n"
+    + " lag(p_retailprice) over() as l2\n"
+    + " from part\n"
+    + " where p_retailprice = 1173.15")
+
 
 @UDFType(impliesOrder = true)
 public class GenericUDFLead extends GenericUDFLeadLag {

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFWhen.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFWhen.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFWhen.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFWhen.java Sun Aug 24 03:43:48 2014
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql.udf.generic;
 
+import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.serde.serdeConstants;
@@ -31,6 +32,23 @@ import org.apache.hadoop.hive.serde2.obj
  * NOTES: 1. a and c should be boolean, or an exception will be thrown. 2. b, d
  * and f should be common types, or an exception will be thrown.
  */
+@Description(
+    name = "when",
+    value = "CASE WHEN a THEN b [WHEN c THEN d]* [ELSE e] END - "
+        + "When a = true, returns b; when c = true, return d; else return e",
+    extended = "Example:\n "
+    + "SELECT\n"
+    + " CASE\n"
+    + "   WHEN deptno=1 THEN Engineering\n"
+    + "   WHEN deptno=2 THEN Finance\n"
+    + "   ELSE admin\n"
+    + " END,\n"
+    + " CASE\n"
+    + "   WHEN zone=7 THEN Americas\n"
+    + "   ELSE Asia-Pac\n"
+    + " END\n"
+    + " FROM emp_details")
+
 public class GenericUDFWhen extends GenericUDF {
   private transient ObjectInspector[] argumentOIs;
   private transient GenericUDFUtils.ReturnObjectInspectorResolver returnOIResolver;

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java Sun Aug 24 03:43:48 2014
@@ -332,7 +332,7 @@ public class TestVectorTypeCasts {
         StringExpr.compare(v, 0, v.length,
             r.vector[1], r.start[1], r.length[1]));
 
-    v = toBytes("9999999999999999");
+    v = toBytes("9999999999999999.00");
     Assert.assertEquals(0,
         StringExpr.compare(v, 0, v.length,
             r.vector[2], r.start[2], r.length[2]));

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java Sun Aug 24 03:43:48 2014
@@ -973,7 +973,7 @@ public class TestInputOutputFormat {
     List<? extends StructField> fields =inspector.getAllStructFieldRefs();
     IntObjectInspector intInspector =
         (IntObjectInspector) fields.get(0).getFieldObjectInspector();
-    assertEquals(0.0, reader.getProgress(), 0.00001);
+    assertEquals(0.33, reader.getProgress(), 0.01);
     while (reader.next(key, value)) {
       assertEquals(++rowNum, intInspector.get(inspector.
           getStructFieldData(serde.deserialize(value), fields.get(0))));

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java Sun Aug 24 03:43:48 2014
@@ -24,6 +24,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.RecordUpdater;
+import org.apache.hadoop.hive.serde2.SerDeStats;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
 import org.apache.hadoop.io.IntWritable;
@@ -95,6 +96,10 @@ public class TestOrcRecordUpdater {
     updater.insert(12, new MyRow("fourth"));
     updater.insert(12, new MyRow("fifth"));
     updater.flush();
+
+    // Check the stats
+    assertEquals(5L, updater.getStats().getRowCount());
+
     Path bucketPath = AcidUtils.createFilename(root, options);
     Path sidePath = OrcRecordUpdater.getSideFile(bucketPath);
     DataInputStream side = fs.open(sidePath);
@@ -158,6 +163,8 @@ public class TestOrcRecordUpdater {
     reader = OrcFile.createReader(bucketPath,
         new OrcFile.ReaderOptions(conf).filesystem(fs));
     assertEquals(6, reader.getNumberOfRows());
+    assertEquals(6L, updater.getStats().getRowCount());
+
     assertEquals(false, fs.exists(sidePath));
   }
 
@@ -182,6 +189,7 @@ public class TestOrcRecordUpdater {
     RecordUpdater updater = new OrcRecordUpdater(root, options);
     updater.update(100, 10, 30, new MyRow("update"));
     updater.delete(100, 40, 60);
+    assertEquals(-1L, updater.getStats().getRowCount());
     updater.close(false);
     Path bucketPath = AcidUtils.createFilename(root, options);
 

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestHiveSchemaConverter.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestHiveSchemaConverter.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestHiveSchemaConverter.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestHiveSchemaConverter.java Sun Aug 24 03:43:48 2014
@@ -88,6 +88,26 @@ public class TestHiveSchemaConverter {
   }
 
   @Test
+  public void testCharType() throws Exception {
+    testConversion(
+        "a",
+        "char(5)",
+        "message hive_schema {\n"
+            + "  optional binary a (UTF8);\n"
+            + "}\n");
+  }
+
+  @Test
+  public void testVarcharType() throws Exception {
+    testConversion(
+        "a",
+        "varchar(10)",
+        "message hive_schema {\n"
+            + "  optional binary a (UTF8);\n"
+            + "}\n");
+  }
+
+  @Test
   public void testArray() throws Exception {
     testConversion("arrayCol",
             "array<int>",

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java Sun Aug 24 03:43:48 2014
@@ -18,15 +18,7 @@
 
 package org.apache.hadoop.hive.ql.io.sarg;
 
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertTrue;
-
-import java.beans.XMLDecoder;
-import java.io.ByteArrayInputStream;
-import java.io.UnsupportedEncodingException;
-import java.util.List;
-import java.util.Set;
-
+import com.google.common.collect.Sets;
 import org.apache.hadoop.hive.common.type.HiveChar;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.common.type.HiveVarchar;
@@ -37,7 +29,15 @@ import org.apache.hadoop.hive.ql.plan.Ex
 import org.apache.hadoop.hive.serde2.io.DateWritable;
 import org.junit.Test;
 
-import com.google.common.collect.Sets;
+import java.beans.XMLDecoder;
+import java.io.ByteArrayInputStream;
+import java.io.UnsupportedEncodingException;
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Set;
+
+import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertTrue;
 
 /**
  * These test the SARG implementation.
@@ -2828,7 +2828,7 @@ public class TestSearchArgumentImpl {
             .build();
     assertEquals("leaf-0 = (LESS_THAN x 1970-01-11)\n" +
         "leaf-1 = (LESS_THAN_EQUALS y hi)\n" +
-        "leaf-2 = (EQUALS z 1)\n" +
+        "leaf-2 = (EQUALS z 1.0)\n" +
         "expr = (and leaf-0 leaf-1 leaf-2)", sarg.toString());
 
     sarg = SearchArgument.FACTORY.newBuilder()
@@ -2847,4 +2847,36 @@ public class TestSearchArgumentImpl {
         "leaf-3 = (NULL_SAFE_EQUALS a stinger)\n" +
         "expr = (and (not leaf-0) (not leaf-1) (not leaf-2) (not leaf-3))", sarg.toString());
   }
+
+  @Test
+  public void testBuilderComplexTypes2() throws Exception {
+    SearchArgument sarg =
+        SearchArgument.FACTORY.newBuilder()
+            .startAnd()
+            .lessThan("x", new DateWritable(10))
+            .lessThanEquals("y", new HiveChar("hi", 10))
+            .equals("z", new BigDecimal("1.0"))
+            .end()
+            .build();
+    assertEquals("leaf-0 = (LESS_THAN x 1970-01-11)\n" +
+        "leaf-1 = (LESS_THAN_EQUALS y hi)\n" +
+        "leaf-2 = (EQUALS z 1.0)\n" +
+        "expr = (and leaf-0 leaf-1 leaf-2)", sarg.toString());
+
+    sarg = SearchArgument.FACTORY.newBuilder()
+        .startNot()
+        .startOr()
+        .isNull("x")
+        .between("y", new BigDecimal(10), 20.0)
+        .in("z", (byte)1, (short)2, (int)3)
+        .nullSafeEquals("a", new HiveVarchar("stinger", 100))
+        .end()
+        .end()
+        .build();
+    assertEquals("leaf-0 = (IS_NULL x)\n" +
+        "leaf-1 = (BETWEEN y 10 20.0)\n" +
+        "leaf-2 = (IN z 1 2 3)\n" +
+        "leaf-3 = (NULL_SAFE_EQUALS a stinger)\n" +
+        "expr = (and (not leaf-0) (not leaf-1) (not leaf-2) (not leaf-3))", sarg.toString());
+  }
 }

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java Sun Aug 24 03:43:48 2014
@@ -21,12 +21,12 @@ import junit.framework.Assert;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.QueryPlan;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
-import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.DummyPartition;import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.log4j.Level;
@@ -137,6 +137,43 @@ public class TestDbTxnManager {
     Assert.assertEquals(0, locks.size());
   }
 
+
+  @Test
+  public void testSingleWritePartition() throws Exception {
+    WriteEntity we = addPartitionOutput(newTable(true), WriteEntity.WriteType.INSERT);
+    QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.openTxn("fred");
+    txnMgr.acquireLocks(qp, ctx, "fred");
+    List<HiveLock> locks = ctx.getHiveLocks();
+    Assert.assertEquals(1, locks.size());
+    Assert.assertEquals(1,
+        TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
+    txnMgr.commitTxn();
+    locks = txnMgr.getLockManager().getLocks(false, false);
+    Assert.assertEquals(0, locks.size());
+  }
+
+  @Test
+  public void testWriteDynamicPartition() throws Exception {
+    WriteEntity we = addDynamicPartitionedOutput(newTable(true), WriteEntity.WriteType.INSERT);
+    QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.openTxn("fred");
+    txnMgr.acquireLocks(qp, ctx, "fred");
+    List<HiveLock> locks = ctx.getHiveLocks();
+    Assert.assertEquals(1, locks.size());
+    /*Assert.assertEquals(1,
+        TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
+    */// Make sure we're locking the whole table, since this is dynamic partitioning
+    ShowLocksResponse rsp = ((DbLockManager)txnMgr.getLockManager()).getLocks();
+    List<ShowLocksResponseElement> elms = rsp.getLocks();
+    Assert.assertEquals(1, elms.size());
+    Assert.assertNotNull(elms.get(0).getTablename());
+    Assert.assertNull(elms.get(0).getPartname());
+    txnMgr.commitTxn();
+    locks = txnMgr.getLockManager().getLocks(false, false);
+    Assert.assertEquals(0, locks.size());
+  }
+
   @Test
   public void testReadWrite() throws Exception {
     Table t = newTable(true);
@@ -252,6 +289,7 @@ public class TestDbTxnManager {
 
   @After
   public void tearDown() throws Exception {
+    if (txnMgr != null) txnMgr.closeTxnManager();
     TxnDbUtil.cleanDb();
   }
 
@@ -318,4 +356,12 @@ public class TestDbTxnManager {
     writeEntities.add(we);
     return we;
   }
+
+  private WriteEntity addDynamicPartitionedOutput(Table t, WriteEntity.WriteType writeType)
+      throws Exception {
+    DummyPartition dp = new DummyPartition(t, "no clue what I should call this");
+    WriteEntity we = new WriteEntity(dp, writeType, false);
+    writeEntities.add(we);
+    return we;
+  }
 }

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java Sun Aug 24 03:43:48 2014
@@ -65,7 +65,6 @@ public class TestQBCompact {
   private AlterTableSimpleDesc parseAndAnalyze(String query) throws Exception {
     ParseDriver hd = new ParseDriver();
     ASTNode head = (ASTNode)hd.parse(query).getChild(0);
-    System.out.println("HERE " + head.dump());
     BaseSemanticAnalyzer a = SemanticAnalyzerFactory.get(conf, head);
     a.analyze(head, new Context(conf));
     List<Task<? extends Serializable>> roots = a.getRootTasks();

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java Sun Aug 24 03:43:48 2014
@@ -36,9 +36,9 @@ import org.apache.hadoop.hive.ql.io.Reco
 import org.apache.hadoop.hive.ql.io.RecordUpdater;
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.*;
 import org.apache.hadoop.util.Progressable;
 import org.apache.thrift.TException;
@@ -276,7 +276,7 @@ public abstract class CompactorTest {
     }
   }
 
-  static class MockInputFormat implements AcidInputFormat<Text> {
+  static class MockInputFormat implements AcidInputFormat<WritableComparable,Text> {
 
     @Override
     public AcidInputFormat.RowReader<Text> getReader(InputSplit split,
@@ -315,7 +315,7 @@ public abstract class CompactorTest {
     }
 
     @Override
-    public RecordReader<NullWritable, Text> getRecordReader(InputSplit inputSplit, JobConf entries,
+    public RecordReader<WritableComparable, Text> getRecordReader(InputSplit inputSplit, JobConf entries,
                                                             Reporter reporter) throws IOException {
       return null;
     }
@@ -398,7 +398,7 @@ public abstract class CompactorTest {
   // This class isn't used and I suspect does totally the wrong thing.  It's only here so that I
   // can provide some output format to the tables and partitions I create.  I actually write to
   // those tables directory.
-  static class MockOutputFormat implements AcidOutputFormat<Text> {
+  static class MockOutputFormat implements AcidOutputFormat<WritableComparable, Text> {
 
     @Override
     public RecordUpdater getRecordUpdater(Path path, Options options) throws
@@ -420,7 +420,7 @@ public abstract class CompactorTest {
     }
 
     @Override
-    public RecordWriter<NullWritable, Text> getRecordWriter(FileSystem fileSystem, JobConf entries,
+    public RecordWriter<WritableComparable, Text> getRecordWriter(FileSystem fileSystem, JobConf entries,
                                                             String s,
                                                             Progressable progressable) throws
         IOException {

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java Sun Aug 24 03:43:48 2014
@@ -187,7 +187,7 @@ public class TestGenericUDFOPDivide exte
     PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs);
     Assert.assertEquals(TypeInfoFactory.getDecimalTypeInfo(11, 7), oi.getTypeInfo());
     HiveDecimalWritable res = (HiveDecimalWritable) udf.evaluate(args);
-    Assert.assertEquals(HiveDecimal.create("0.06171"), res.getHiveDecimal());
+    Assert.assertEquals(HiveDecimal.create("0.0617100"), res.getHiveDecimal());
   }
 
   @Test

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/alter_rename_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/alter_rename_table.q?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/alter_rename_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/alter_rename_table.q Sun Aug 24 03:43:48 2014
@@ -26,3 +26,10 @@ ALTER TABLE source.srcpart RENAME TO tar
 ALTER TABLE source.srcpart RENAME TO target.srcpart;
 
 select * from target.srcpart tablesample (10 rows);
+
+create table source.src like default.src;
+create table source.src1 like default.src;
+load data local inpath '../../data/files/kv1.txt' overwrite into table source.src;
+
+ALTER TABLE source.src RENAME TO target.src1;
+select * from target.src1 tablesample (10 rows);
\ No newline at end of file

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/literal_decimal.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/literal_decimal.q?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/literal_decimal.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/literal_decimal.q Sun Aug 24 03:43:48 2014
@@ -1,5 +1,5 @@
 set hive.fetch.task.conversion=more;
 
-EXPLAIN SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E-99BD, 1E99BD FROM src LIMIT 1;
+EXPLAIN SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E99BD FROM src LIMIT 1;
 
-SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E-99BD, 1E99BD FROM src LIMIT 1;
+SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E99BD FROM src LIMIT 1;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/orc_ppd_decimal.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/orc_ppd_decimal.q?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/orc_ppd_decimal.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/orc_ppd_decimal.q Sun Aug 24 03:43:48 2014
@@ -85,6 +85,18 @@ set hive.optimize.index.filter=true;
 select sum(hash(*)) from newtypesorc where d<=cast('11.22' as float);
 
 set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d<=cast('11.22' as decimal);
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d<=cast('11.22' as decimal);
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d<=11.22BD;
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d<=11.22BD;
+
+set hive.optimize.index.filter=false;
 select sum(hash(*)) from newtypesorc where d<=12;
 
 set hive.optimize.index.filter=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/parquet_types.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/parquet_types.q?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/parquet_types.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/parquet_types.q Sun Aug 24 03:43:48 2014
@@ -8,7 +8,9 @@ CREATE TABLE parquet_types_staging (
   cfloat float,
   cdouble double,
   cstring1 string,
-  t timestamp
+  t timestamp,
+  cchar char(5),
+  cvarchar varchar(10)
 ) ROW FORMAT DELIMITED
 FIELDS TERMINATED BY '|';
 
@@ -19,7 +21,9 @@ CREATE TABLE parquet_types (
   cfloat float,
   cdouble double,
   cstring1 string,
-  t timestamp
+  t timestamp,
+  cchar char(5),
+  cvarchar varchar(10)
 ) STORED AS PARQUET;
 
 LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/windowing.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/windowing.q?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/windowing.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/windowing.q Sun Aug 24 03:43:48 2014
@@ -438,3 +438,9 @@ select p_mfgr, 
 from part 
 where p_mfgr = 'Manufacturer#6'
 ;
+
+-- 46. window sz is same as partition sz
+select p_retailprice, avg(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following), 
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following) 
+from part 
+where p_mfgr='Manufacturer#1';

Modified: hive/branches/spark/ql/src/test/results/clientnegative/authorization_create_func1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/authorization_create_func1.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/authorization_create_func1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/authorization_create_func1.q.out Sun Aug 24 03:43:48 2014
@@ -1 +1 @@
-FAILED: HiveAccessControlException Permission denied: Principal [name=hive_test_user, type=USER] does not have following privileges for operation CREATEFUNCTION [[ADMIN PRIVILEGE] on Object [type=DATABASE, name=default], [ADMIN PRIVILEGE] on Object [type=FUNCTION, name=perm_fn]]
+FAILED: HiveAccessControlException Permission denied: Principal [name=hive_test_user, type=USER] does not have following privileges for operation CREATEFUNCTION [[ADMIN PRIVILEGE] on Object [type=DATABASE, name=default], [ADMIN PRIVILEGE] on Object [type=FUNCTION, name=default.perm_fn]]

Modified: hive/branches/spark/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out Sun Aug 24 03:43:48 2014
@@ -133,10 +133,10 @@ STAGE PLANS:
           condition expressions:
             0 {KEY.reducesinkkey0} {VALUE._col0}
             1 {VALUE._col0}
-          outputColumnNames: _col0, _col1, _col6
+          outputColumnNames: _col0, _col1, _col7
           Statistics: Num rows: 22 Data size: 2310 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string)
+            expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
             outputColumnNames: _col0, _col1, _col2
             Statistics: Num rows: 22 Data size: 2310 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
@@ -209,10 +209,10 @@ STAGE PLANS:
                 keys:
                   0 key (type: int)
                   1 key (type: int)
-                outputColumnNames: _col0, _col1, _col6
+                outputColumnNames: _col0, _col1, _col7
                 Statistics: Num rows: 22 Data size: 2310 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string)
+                  expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
                   outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 22 Data size: 2310 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

Modified: hive/branches/spark/ql/src/test/results/clientnegative/sortmerge_mapjoin_mismatch_1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/sortmerge_mapjoin_mismatch_1.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/sortmerge_mapjoin_mismatch_1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/sortmerge_mapjoin_mismatch_1.q.out Sun Aug 24 03:43:48 2014
@@ -104,10 +104,10 @@ STAGE PLANS:
                 keys:
                   0 key (type: int)
                   1 key (type: int)
-                outputColumnNames: _col0, _col1, _col4, _col5
+                outputColumnNames: _col0, _col1, _col5, _col6
                 Statistics: Num rows: 275 Data size: 2646 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string)
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 275 Data size: 2646 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

Modified: hive/branches/spark/ql/src/test/results/clientnegative/udf_assert_true.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/udf_assert_true.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/udf_assert_true.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/udf_assert_true.q.out Sun Aug 24 03:43:48 2014
@@ -23,10 +23,10 @@ STAGE PLANS:
               Select Operator
                 Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
                 Lateral View Join Operator
-                  outputColumnNames: _col4
+                  outputColumnNames: _col5
                   Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                   Select Operator
-                    expressions: assert_true((_col4 > 0)) (type: void)
+                    expressions: assert_true((_col5 > 0)) (type: void)
                     outputColumnNames: _col0
                     Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                     Limit
@@ -47,10 +47,10 @@ STAGE PLANS:
                   Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
                   function name: explode
                   Lateral View Join Operator
-                    outputColumnNames: _col4
+                    outputColumnNames: _col5
                     Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                     Select Operator
-                      expressions: assert_true((_col4 > 0)) (type: void)
+                      expressions: assert_true((_col5 > 0)) (type: void)
                       outputColumnNames: _col0
                       Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                       Limit
@@ -100,10 +100,10 @@ STAGE PLANS:
               Select Operator
                 Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
                 Lateral View Join Operator
-                  outputColumnNames: _col4
+                  outputColumnNames: _col5
                   Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                   Select Operator
-                    expressions: assert_true((_col4 < 2)) (type: void)
+                    expressions: assert_true((_col5 < 2)) (type: void)
                     outputColumnNames: _col0
                     Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                     Limit
@@ -124,10 +124,10 @@ STAGE PLANS:
                   Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
                   function name: explode
                   Lateral View Join Operator
-                    outputColumnNames: _col4
+                    outputColumnNames: _col5
                     Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                     Select Operator
-                      expressions: assert_true((_col4 < 2)) (type: void)
+                      expressions: assert_true((_col5 < 2)) (type: void)
                       outputColumnNames: _col0
                       Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                       Limit

Modified: hive/branches/spark/ql/src/test/results/clientnegative/udf_assert_true2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/udf_assert_true2.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/udf_assert_true2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/udf_assert_true2.q.out Sun Aug 24 03:43:48 2014
@@ -18,10 +18,10 @@ STAGE PLANS:
               Select Operator
                 Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
                 Lateral View Join Operator
-                  outputColumnNames: _col4
+                  outputColumnNames: _col5
                   Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                   Select Operator
-                    expressions: (1 + assert_true((_col4 < 2))) (type: double)
+                    expressions: (1 + assert_true((_col5 < 2))) (type: double)
                     outputColumnNames: _col0
                     Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                     Limit
@@ -42,10 +42,10 @@ STAGE PLANS:
                   Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
                   function name: explode
                   Lateral View Join Operator
-                    outputColumnNames: _col4
+                    outputColumnNames: _col5
                     Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                     Select Operator
-                      expressions: (1 + assert_true((_col4 < 2))) (type: double)
+                      expressions: (1 + assert_true((_col5 < 2))) (type: double)
                       outputColumnNames: _col0
                       Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                       Limit

Modified: hive/branches/spark/ql/src/test/results/clientnegative/udf_local_resource.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/udf_local_resource.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/udf_local_resource.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/udf_local_resource.q.out Sun Aug 24 03:43:48 2014
@@ -1,5 +1,5 @@
 PREHOOK: query: create function lookup as 'org.apache.hadoop.hive.ql.udf.UDFFileLookup' using file '../../data/files/sales.txt'
 PREHOOK: type: CREATEFUNCTION
 PREHOOK: Output: database:default
-PREHOOK: Output: lookup
+PREHOOK: Output: default.lookup
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.FunctionTask. Hive warehouse is non-local, but ../../data/files/sales.txt specifies file on local filesystem. Resources on non-local warehouse should specify a non-local scheme/path

Modified: hive/branches/spark/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out Sun Aug 24 03:43:48 2014
@@ -1,6 +1,6 @@
 PREHOOK: query: create function lookup as 'org.apache.hadoop.hive.ql.udf.UDFFileLookup' using file 'nonexistent_file.txt'
 PREHOOK: type: CREATEFUNCTION
 PREHOOK: Output: database:default
-PREHOOK: Output: lookup
+PREHOOK: Output: default.lookup
 nonexistent_file.txt does not exist
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.FunctionTask. nonexistent_file.txt does not exist

Modified: hive/branches/spark/ql/src/test/results/clientpositive/allcolref_in_udf.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/allcolref_in_udf.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/allcolref_in_udf.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/allcolref_in_udf.q.out Sun Aug 24 03:43:48 2014
@@ -112,10 +112,10 @@ STAGE PLANS:
           condition expressions:
             0 {VALUE._col0} {VALUE._col1}
             1 {VALUE._col0} {VALUE._col1}
-          outputColumnNames: _col0, _col1, _col4, _col5
+          outputColumnNames: _col0, _col1, _col5, _col6
           Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: 2 (type: int), concat(_col0, _col1, _col4, _col5) (type: string), concat(_col0, _col1) (type: string), concat(_col4, _col5) (type: string), concat(_col0, _col1, _col4) (type: string), concat(_col0, _col4, _col5) (type: string)
+            expressions: 2 (type: int), concat(_col0, _col1, _col5, _col6) (type: string), concat(_col0, _col1) (type: string), concat(_col5, _col6) (type: string), concat(_col0, _col1, _col5) (type: string), concat(_col0, _col5, _col6) (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
             Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
             UDTF Operator

Modified: hive/branches/spark/ql/src/test/results/clientpositive/alter_rename_table.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/alter_rename_table.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/alter_rename_table.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/alter_rename_table.q.out Sun Aug 24 03:43:48 2014
@@ -212,3 +212,56 @@ POSTHOOK: Input: target@srcpart@ds=2008-
 278	val_278	2008-04-08	11
 98	val_98	2008-04-08	11
 484	val_484	2008-04-08	11
+PREHOOK: query: create table source.src like default.src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:source
+PREHOOK: Output: source@source.src
+POSTHOOK: query: create table source.src like default.src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:source
+POSTHOOK: Output: source@source.src
+POSTHOOK: Output: source@src
+PREHOOK: query: create table source.src1 like default.src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:source
+PREHOOK: Output: source@source.src1
+POSTHOOK: query: create table source.src1 like default.src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:source
+POSTHOOK: Output: source@source.src1
+POSTHOOK: Output: source@src1
+PREHOOK: query: load data local inpath '../../data/files/kv1.txt' overwrite into table source.src
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: source@src
+POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' overwrite into table source.src
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: source@src
+PREHOOK: query: ALTER TABLE source.src RENAME TO target.src1
+PREHOOK: type: ALTERTABLE_RENAME
+PREHOOK: Input: source@src
+PREHOOK: Output: source@src
+POSTHOOK: query: ALTER TABLE source.src RENAME TO target.src1
+POSTHOOK: type: ALTERTABLE_RENAME
+POSTHOOK: Input: source@src
+POSTHOOK: Output: source@src
+POSTHOOK: Output: target@src1
+PREHOOK: query: select * from target.src1 tablesample (10 rows)
+PREHOOK: type: QUERY
+PREHOOK: Input: target@src1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from target.src1 tablesample (10 rows)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: target@src1
+#### A masked pattern was here ####
+238	val_238
+86	val_86
+311	val_311
+27	val_27
+165	val_165
+409	val_409
+255	val_255
+278	val_278
+98	val_98
+484	val_484

Modified: hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_join.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_join.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_join.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_join.q.out Sun Aug 24 03:43:48 2014
@@ -193,10 +193,10 @@ STAGE PLANS:
           condition expressions:
             0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1}
             1 {KEY.reducesinkkey0} {VALUE._col0}
-          outputColumnNames: _col0, _col1, _col2, _col5, _col6
+          outputColumnNames: _col0, _col1, _col2, _col6, _col7
           Statistics: Num rows: 41 Data size: 7954 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string)
+            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4
             Statistics: Num rows: 41 Data size: 7954 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
@@ -259,13 +259,13 @@ STAGE PLANS:
           condition expressions:
             0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0}
             1 {KEY.reducesinkkey0} {KEY.reducesinkkey1}
-          outputColumnNames: _col0, _col1, _col2, _col5, _col6
+          outputColumnNames: _col0, _col1, _col2, _col6, _col7
           Statistics: Num rows: 6 Data size: 1164 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
-            predicate: ((_col1 = _col5) and (_col0 = _col6)) (type: boolean)
+            predicate: ((_col1 = _col6) and (_col0 = _col7)) (type: boolean)
             Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string)
+              expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4
               Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE
               File Output Operator
@@ -324,10 +324,10 @@ STAGE PLANS:
           condition expressions:
             0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0}
             1 {KEY.reducesinkkey0} {KEY.reducesinkkey1}
-          outputColumnNames: _col0, _col1, _col2, _col5, _col6
+          outputColumnNames: _col0, _col1, _col2, _col6, _col7
           Statistics: Num rows: 6 Data size: 1164 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string)
+            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4
             Statistics: Num rows: 6 Data size: 1164 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
@@ -390,18 +390,18 @@ STAGE PLANS:
           condition expressions:
             0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0}
             1 {KEY.reducesinkkey0} {KEY.reducesinkkey1}
-          outputColumnNames: _col0, _col1, _col2, _col5, _col6
-          Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE
+          outputColumnNames: _col0, _col1, _col2, _col6, _col7
+          Statistics: Num rows: 11 Data size: 2134 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
-            predicate: (((_col1 = _col5) and (_col0 = _col6)) and (_col6 = _col0)) (type: boolean)
-            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+            predicate: (((_col1 = _col6) and (_col0 = _col7)) and (_col7 = _col0)) (type: boolean)
+            Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string)
+              expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4
-              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE
               File Output Operator
                 compressed: false
-                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE
                 table:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -474,10 +474,10 @@ STAGE PLANS:
             0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1}
             1 {KEY.reducesinkkey0} {VALUE._col0}
             2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1}
-          outputColumnNames: _col0, _col1, _col2, _col5, _col6, _col9, _col10, _col11
+          outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col11, _col12, _col13
           Statistics: Num rows: 658 Data size: 192794 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string), _col9 (type: string), _col10 (type: int), _col11 (type: int)
+            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string), _col11 (type: string), _col12 (type: int), _col13 (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
             Statistics: Num rows: 658 Data size: 192794 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
@@ -553,10 +553,10 @@ STAGE PLANS:
             0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1}
             1 {KEY.reducesinkkey0} {VALUE._col0}
             2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2}
-          outputColumnNames: _col0, _col1, _col2, _col5, _col6, _col9, _col10, _col11, _col12
+          outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col11, _col12, _col13, _col14
           Statistics: Num rows: 47 Data size: 13912 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string), _col9 (type: string), _col10 (type: int), _col11 (type: bigint), _col12 (type: int)
+            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string), _col11 (type: string), _col12 (type: int), _col13 (type: bigint), _col14 (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
             Statistics: Num rows: 47 Data size: 13912 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
@@ -633,10 +633,10 @@ STAGE PLANS:
             0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0}
             1 {KEY.reducesinkkey0} {KEY.reducesinkkey1}
             2 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1}
-          outputColumnNames: _col0, _col1, _col2, _col5, _col6, _col9, _col10, _col11, _col12
+          outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col11, _col12, _col13, _col14
           Statistics: Num rows: 1 Data size: 296 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string), _col9 (type: string), _col10 (type: int), _col11 (type: bigint), _col12 (type: int)
+            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string), _col11 (type: string), _col12 (type: int), _col13 (type: bigint), _col14 (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
             Statistics: Num rows: 1 Data size: 296 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator

Modified: hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_part.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_part.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_part.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_part.q.out Sun Aug 24 03:43:48 2014
@@ -366,14 +366,14 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: PARTIAL
             Select Operator
               expressions: state (type: string)
               outputColumnNames: _col0
-              Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL
               File Output Operator
                 compressed: false
-                Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL
                 table:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -403,14 +403,14 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: PARTIAL
             Select Operator
               expressions: state (type: string), locid (type: int)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: PARTIAL
               File Output Operator
                 compressed: false
-                Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: PARTIAL
                 table:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/authorization_create_func1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/authorization_create_func1.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/authorization_create_func1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/authorization_create_func1.q.out Sun Aug 24 03:43:48 2014
@@ -13,11 +13,11 @@ POSTHOOK: Output: temp_fn
 PREHOOK: query: create function perm_fn as 'org.apache.hadoop.hive.ql.udf.UDFAscii'
 PREHOOK: type: CREATEFUNCTION
 PREHOOK: Output: database:default
-PREHOOK: Output: perm_fn
+PREHOOK: Output: default.perm_fn
 POSTHOOK: query: create function perm_fn as 'org.apache.hadoop.hive.ql.udf.UDFAscii'
 POSTHOOK: type: CREATEFUNCTION
 POSTHOOK: Output: database:default
-POSTHOOK: Output: perm_fn
+POSTHOOK: Output: default.perm_fn
 PREHOOK: query: drop temporary function temp_fn
 PREHOOK: type: DROPFUNCTION
 PREHOOK: Output: temp_fn
@@ -27,8 +27,8 @@ POSTHOOK: Output: temp_fn
 PREHOOK: query: drop function perm_fn
 PREHOOK: type: DROPFUNCTION
 PREHOOK: Output: database:default
-PREHOOK: Output: perm_fn
+PREHOOK: Output: default.perm_fn
 POSTHOOK: query: drop function perm_fn
 POSTHOOK: type: DROPFUNCTION
 POSTHOOK: Output: database:default
-POSTHOOK: Output: perm_fn
+POSTHOOK: Output: default.perm_fn

Modified: hive/branches/spark/ql/src/test/results/clientpositive/auto_join1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/auto_join1.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/auto_join1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/auto_join1.q.out Sun Aug 24 03:43:48 2014
@@ -61,10 +61,10 @@ STAGE PLANS:
                 keys:
                   0 key (type: string)
                   1 key (type: string)
-                outputColumnNames: _col0, _col5
+                outputColumnNames: _col0, _col6
                 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), _col5 (type: string)
+                  expressions: UDFToInteger(_col0) (type: int), _col6 (type: string)
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

Modified: hive/branches/spark/ql/src/test/results/clientpositive/auto_join14.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/auto_join14.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/auto_join14.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/auto_join14.q.out Sun Aug 24 03:43:48 2014
@@ -65,10 +65,10 @@ STAGE PLANS:
                 keys:
                   0 key (type: string)
                   1 key (type: string)
-                outputColumnNames: _col0, _col5
+                outputColumnNames: _col0, _col6
                 Statistics: Num rows: 11 Data size: 1102 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), _col5 (type: string)
+                  expressions: UDFToInteger(_col0) (type: int), _col6 (type: string)
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 11 Data size: 1102 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

Modified: hive/branches/spark/ql/src/test/results/clientpositive/auto_join15.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/auto_join15.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/auto_join15.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/auto_join15.q.out Sun Aug 24 03:43:48 2014
@@ -61,10 +61,10 @@ STAGE PLANS:
                 keys:
                   0 key (type: string)
                   1 key (type: string)
-                outputColumnNames: _col0, _col1, _col4, _col5
+                outputColumnNames: _col0, _col1, _col5, _col6
                 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string)
+                  expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator

Modified: hive/branches/spark/ql/src/test/results/clientpositive/auto_join17.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/auto_join17.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/auto_join17.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/auto_join17.q.out Sun Aug 24 03:43:48 2014
@@ -61,10 +61,10 @@ STAGE PLANS:
                 keys:
                   0 key (type: string)
                   1 key (type: string)
-                outputColumnNames: _col0, _col1, _col4, _col5
+                outputColumnNames: _col0, _col1, _col5, _col6
                 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col4) (type: int), _col5 (type: string)
+                  expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col5) (type: int), _col6 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

Modified: hive/branches/spark/ql/src/test/results/clientpositive/auto_join19.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/auto_join19.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/auto_join19.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/auto_join19.q.out Sun Aug 24 03:43:48 2014
@@ -63,10 +63,10 @@ STAGE PLANS:
                 keys:
                   0 key (type: string)
                   1 key (type: string)
-                outputColumnNames: _col0, _col7
+                outputColumnNames: _col0, _col8
                 Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), _col7 (type: string)
+                  expressions: UDFToInteger(_col0) (type: int), _col8 (type: string)
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

Modified: hive/branches/spark/ql/src/test/results/clientpositive/auto_join2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/auto_join2.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/auto_join2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/auto_join2.q.out Sun Aug 24 03:43:48 2014
@@ -57,7 +57,7 @@ STAGE PLANS:
                   0 {_col0}
                   1 {value}
                 keys:
-                  0 (_col0 + _col4) (type: double)
+                  0 (_col0 + _col5) (type: double)
                   1 UDFToDouble(key) (type: double)
 
   Stage: Stage-6
@@ -78,10 +78,10 @@ STAGE PLANS:
                 keys:
                   0 key (type: string)
                   1 key (type: string)
-                outputColumnNames: _col0, _col4
+                outputColumnNames: _col0, _col5
                 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
-                  predicate: (_col0 + _col4) is not null (type: boolean)
+                  predicate: (_col0 + _col5) is not null (type: boolean)
                   Statistics: Num rows: 16 Data size: 1649 Basic stats: COMPLETE Column stats: NONE
                   Map Join Operator
                     condition map:
@@ -90,12 +90,12 @@ STAGE PLANS:
                       0 {_col0}
                       1 {value}
                     keys:
-                      0 (_col0 + _col4) (type: double)
+                      0 (_col0 + _col5) (type: double)
                       1 UDFToDouble(key) (type: double)
-                    outputColumnNames: _col0, _col9
+                    outputColumnNames: _col0, _col11
                     Statistics: Num rows: 17 Data size: 1813 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: UDFToInteger(_col0) (type: int), _col9 (type: string)
+                      expressions: UDFToInteger(_col0) (type: int), _col11 (type: string)
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 17 Data size: 1813 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator

Modified: hive/branches/spark/ql/src/test/results/clientpositive/auto_join20.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/auto_join20.q.out?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/auto_join20.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/auto_join20.q.out Sun Aug 24 03:43:48 2014
@@ -94,10 +94,10 @@ STAGE PLANS:
                 0 key (type: string)
                 1 key (type: string)
                 2 key (type: string)
-              outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9
+              outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
               Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string)
+                expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
@@ -270,10 +270,10 @@ STAGE PLANS:
                 0 key (type: string)
                 1 key (type: string)
                 2 key (type: string)
-              outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9
+              outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
               Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string)
+                expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator