You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/10/06 06:00:54 UTC

svn commit: r1629563 [14/33] - in /hive/branches/spark: ./ accumulo-handler/ beeline/ beeline/src/java/org/apache/hive/beeline/ bin/ bin/ext/ common/ common/src/java/org/apache/hadoop/hive/conf/ common/src/test/org/apache/hadoop/hive/common/type/ contr...

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseNumeric.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseNumeric.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseNumeric.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseNumeric.java Mon Oct  6 04:00:39 2014
@@ -29,6 +29,7 @@ import org.apache.hadoop.hive.ql.exec.No
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
 import org.apache.hadoop.hive.serde2.io.DoubleWritable;
@@ -44,6 +45,7 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils.PrimitiveGrouping;
 import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.HiveDecimalUtils;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
@@ -318,4 +320,17 @@ public abstract class GenericUDFBaseNume
   public void setAnsiSqlArithmetic(boolean ansiSqlArithmetic) {
     this.ansiSqlArithmetic = ansiSqlArithmetic;
   }
+
+  public PrimitiveTypeInfo deriveMinArgumentCast(
+      ExprNodeDesc childExpr, TypeInfo targetType) {
+    assert targetType instanceof PrimitiveTypeInfo : "Not a primitive type" + targetType;
+    PrimitiveTypeInfo pti = (PrimitiveTypeInfo)targetType;
+    // We only do the minimum cast for decimals. Other types are assumed safe; fix if needed.
+    // We also don't do anything for non-primitive children (maybe we should assert).
+    if ((pti.getPrimitiveCategory() != PrimitiveCategory.DECIMAL)
+        || (!(childExpr.getTypeInfo() instanceof PrimitiveTypeInfo))) return pti;
+    PrimitiveTypeInfo childTi = (PrimitiveTypeInfo)childExpr.getTypeInfo();
+    // If the child is also decimal, no cast is needed (we hope - can target type be narrower?).
+    return HiveDecimalUtils.getDecimalTypeForPrimitiveCategory(childTi);
+  }
 }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFromUtcTimestamp.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFromUtcTimestamp.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFromUtcTimestamp.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFromUtcTimestamp.java Mon Oct  6 04:00:39 2014
@@ -22,6 +22,7 @@ import java.util.TimeZone;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -33,7 +34,9 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 import org.apache.hadoop.io.Text;
 
-
+@Description(name = "from_utc_timestamp",
+             value = "from_utc_timestamp(timestamp, string timezone) - "
+                     + "Assumes given timestamp ist UTC and converts to given timezone (as of Hive 0.8.0)")
 public class GenericUDFFromUtcTimestamp extends GenericUDF {
 
   static final Log LOG = LogFactory.getLog(GenericUDFFromUtcTimestamp.class);

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java Mon Oct  6 04:00:39 2014
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql.udf.generic;
 
+import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
@@ -62,6 +63,11 @@ import org.apache.hadoop.hive.ql.exec.ve
  * otherwise it returns expr3. IF() returns a numeric or string value, depending
  * on the context in which it is used.
  */
+@Description(
+    name = "if",
+    value = "IF(expr1,expr2,expr3) - If expr1 is TRUE (expr1 <> 0 and expr1 <> NULL) then"
+    + " IF() returns expr2; otherwise it returns expr3. IF() returns a numeric or string value,"
+    + " depending on the context in which it is used.")
 @VectorizedExpressions({
   IfExprLongColumnLongColumn.class, IfExprDoubleColumnDoubleColumn.class,
   IfExprLongColumnLongScalar.class, IfExprDoubleColumnDoubleScalar.class,

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java Mon Oct  6 04:00:39 2014
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.udf.generic;
 
+import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions;
@@ -39,6 +40,8 @@ import org.apache.hadoop.hive.serde2.obj
  * Creates a TimestampWritable object using PrimitiveObjectInspectorConverter
  *
  */
+@Description(name = "timestamp",
+value = "cast(date as timestamp) - Returns timestamp")
 @VectorizedExpressions({CastLongToTimestampViaLongToLong.class,
   CastDoubleToTimestampViaDoubleToLong.class, CastDecimalToTimestamp.class})
 public class GenericUDFTimestamp extends GenericUDF {

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToUtcTimestamp.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToUtcTimestamp.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToUtcTimestamp.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToUtcTimestamp.java Mon Oct  6 04:00:39 2014
@@ -17,7 +17,11 @@
  */
 package org.apache.hadoop.hive.ql.udf.generic;
 
+import org.apache.hadoop.hive.ql.exec.Description;
 
+@Description(name = "to_utc_timestamp",
+             value = "to_utc_timestamp(timestamp, string timezone) - "
+                     + "Assumes given timestamp is in given timezone and converts to UTC (as of Hive 0.8.0)")
 public class GenericUDFToUtcTimestamp extends
     GenericUDFFromUtcTimestamp {
 

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java Mon Oct  6 04:00:39 2014
@@ -331,7 +331,8 @@ public class TestOperators extends TestC
       Configuration hconf = new JobConf(TestOperators.class);
       HiveConf.setVar(hconf, HiveConf.ConfVars.HADOOPMAPFILENAME,
           "hdfs:///testDir/testFile");
-      IOContext.get().setInputPath(new Path("hdfs:///testDir/testFile"));
+      IOContext.get(hconf.get(Utilities.INPUT_NAME)).setInputPath(
+          new Path("hdfs:///testDir/testFile"));
 
       // initialize pathToAliases
       ArrayList<String> aliases = new ArrayList<String>();

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java Mon Oct  6 04:00:39 2014
@@ -26,6 +26,7 @@ import java.util.Random;
 
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 import org.apache.hadoop.hive.conf.HiveConf;
 
 public class TestTezSessionPool {
@@ -157,4 +158,29 @@ public class TestTezSessionPool {
         }
       }
     }
+
+  @Test
+  public void testCloseAndOpenDefault() throws Exception {
+    poolManager = new TestTezSessionPoolManager();
+    TezSessionState session = Mockito.mock(TezSessionState.class);
+    Mockito.when(session.isDefault()).thenReturn(false);
+
+    poolManager.closeAndOpen(session, conf, false);
+
+    Mockito.verify(session).close(false);
+    Mockito.verify(session).open(conf, null);
+  }
+
+  @Test
+  public void testCloseAndOpenWithResources() throws Exception {
+    poolManager = new TestTezSessionPoolManager();
+    TezSessionState session = Mockito.mock(TezSessionState.class);
+    Mockito.when(session.isDefault()).thenReturn(false);
+    String[] extraResources = new String[] { "file:///tmp/foo.jar" };
+
+    poolManager.closeAndOpen(session, conf, extraResources, false);
+
+    Mockito.verify(session).close(false);
+    Mockito.verify(session).open(conf, extraResources);
+  }
 }

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java Mon Oct  6 04:00:39 2014
@@ -30,9 +30,11 @@ import static org.mockito.Mockito.when;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
 import java.util.LinkedHashMap;
-import java.util.LinkedList;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -48,6 +50,7 @@ import org.apache.hadoop.hive.ql.plan.Re
 import org.apache.hadoop.hive.ql.plan.TezEdgeProperty;
 import org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType;
 import org.apache.hadoop.hive.ql.plan.TezWork;
+import org.apache.hadoop.hive.ql.plan.TezWork.VertexType;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
 import org.apache.hadoop.mapred.JobConf;
@@ -90,8 +93,11 @@ public class TestTezTask {
     path = mock(Path.class);
     when(path.getFileSystem(any(Configuration.class))).thenReturn(fs);
     when(utils.getTezDir(any(Path.class))).thenReturn(path);
-    when(utils.createVertex(any(JobConf.class), any(BaseWork.class), any(Path.class), any(LocalResource.class),
-        any(List.class), any(FileSystem.class), any(Context.class), anyBoolean(), any(TezWork.class))).thenAnswer(new Answer<Vertex>() {
+    when(
+        utils.createVertex(any(JobConf.class), any(BaseWork.class), any(Path.class),
+            any(LocalResource.class), any(List.class), any(FileSystem.class), any(Context.class),
+            anyBoolean(), any(TezWork.class), any(VertexType.class))).thenAnswer(
+        new Answer<Vertex>() {
 
           @Override
           public Vertex answer(InvocationOnMock invocation) throws Throwable {
@@ -101,8 +107,8 @@ public class TestTezTask {
           }
         });
 
-    when(utils.createEdge(any(JobConf.class), any(Vertex.class),
-        any(Vertex.class), any(TezEdgeProperty.class))).thenAnswer(new Answer<Edge>() {
+    when(utils.createEdge(any(JobConf.class), any(Vertex.class), any(Vertex.class),
+            any(TezEdgeProperty.class), any(VertexType.class))).thenAnswer(new Answer<Edge>() {
 
           @Override
           public Edge answer(InvocationOnMock invocation) throws Throwable {
@@ -204,10 +210,11 @@ public class TestTezTask {
   @Test
   public void testSubmit() throws Exception {
     DAG dag = DAG.create("test");
-    task.submit(conf, dag, path, appLr, sessionState, new LinkedList());
+    task.submit(conf, dag, path, appLr, sessionState, Collections.<LocalResource> emptyList(),
+        new String[0], Collections.<String,LocalResource> emptyMap());
     // validate close/reopen
-    verify(sessionState, times(1)).open(any(HiveConf.class));
-    verify(sessionState, times(1)).close(eq(false));  // now uses pool after HIVE-7043
+    verify(sessionState, times(1)).open(any(HiveConf.class), any(String[].class));
+    verify(sessionState, times(1)).close(eq(true)); // now uses pool after HIVE-7043
     verify(session, times(2)).submitDAG(any(DAG.class));
   }
 
@@ -216,4 +223,54 @@ public class TestTezTask {
     task.close(work, 0);
     verify(op, times(4)).jobClose(any(Configuration.class), eq(true));
   }
+
+  @Test
+  public void testExistingSessionGetsStorageHandlerResources() throws Exception {
+    final String[] inputOutputJars = new String[] {"file:///tmp/foo.jar"};
+    LocalResource res = mock(LocalResource.class);
+    final List<LocalResource> resources = Collections.singletonList(res);
+    final Map<String,LocalResource> resMap = new HashMap<String,LocalResource>();
+    resMap.put("foo.jar", res);
+
+    when(utils.localizeTempFiles(path.toString(), conf, inputOutputJars))
+        .thenReturn(resources);
+    when(utils.getBaseName(res)).thenReturn("foo.jar");
+    when(sessionState.isOpen()).thenReturn(true);
+    when(sessionState.hasResources(inputOutputJars)).thenReturn(false);
+    task.updateSession(sessionState, conf, path, inputOutputJars, resMap);
+    verify(session).addAppMasterLocalFiles(resMap);
+  }
+
+  @Test
+  public void testExtraResourcesAddedToDag() throws Exception {
+    final String[] inputOutputJars = new String[] {"file:///tmp/foo.jar"};
+    LocalResource res = mock(LocalResource.class);
+    final List<LocalResource> resources = Collections.singletonList(res);
+    final Map<String,LocalResource> resMap = new HashMap<String,LocalResource>();
+    resMap.put("foo.jar", res);
+    DAG dag = mock(DAG.class);
+
+    when(utils.localizeTempFiles(path.toString(), conf, inputOutputJars))
+        .thenReturn(resources);
+    when(utils.getBaseName(res)).thenReturn("foo.jar");
+    when(sessionState.isOpen()).thenReturn(true);
+    when(sessionState.hasResources(inputOutputJars)).thenReturn(false);
+    task.addExtraResourcesToDag(sessionState, dag, inputOutputJars, resMap);
+    verify(dag).addTaskLocalFiles(resMap);
+  }
+
+  @Test
+  public void testGetExtraLocalResources() throws Exception {
+    final String[] inputOutputJars = new String[] {"file:///tmp/foo.jar"};
+    LocalResource res = mock(LocalResource.class);
+    final List<LocalResource> resources = Collections.singletonList(res);
+    final Map<String,LocalResource> resMap = new HashMap<String,LocalResource>();
+    resMap.put("foo.jar", res);
+
+    when(utils.localizeTempFiles(path.toString(), conf, inputOutputJars))
+        .thenReturn(resources);
+    when(utils.getBaseName(res)).thenReturn("foo.jar");
+
+    assertEquals(resMap, task.getExtraLocalResources(conf, path, inputOutputJars));
+  }
 }

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java Mon Oct  6 04:00:39 2014
@@ -90,7 +90,9 @@ public class StorageFormats {
    * includes both native Hive storage formats as well as those enumerated in the
    * ADDITIONAL_STORAGE_FORMATS table.
    *
-   * @return List of storage format as paramters.
+   * @return List of storage format as a Collection of Object arrays, each containing (in order):
+   *         Storage format name, SerDe class name, InputFormat class name, OutputFormat class name.
+   *         This list is used as the parameters to JUnit parameterized tests.
    */
   public static Collection<Object[]> asParameters() {
     List<Object[]> parameters = new ArrayList<Object[]>();
@@ -130,5 +132,21 @@ public class StorageFormats {
 
     return parameters;
   }
+
+  /**
+   * Returns a list of the names of storage formats.
+   *
+   * @return List of names of storage formats.
+   */
+  public static Collection<Object[]> names() {
+    List<Object[]> names = new ArrayList<Object[]>();
+    for (StorageFormatDescriptor descriptor : ServiceLoader.load(StorageFormatDescriptor.class)) {
+      String[] formatNames = new String[descriptor.getNames().size()];
+      formatNames = descriptor.getNames().toArray(formatNames);
+      String[] params = { formatNames[0] };
+      names.add(params);
+    }
+    return names;
+  }
 }
 

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java Mon Oct  6 04:00:39 2014
@@ -115,7 +115,8 @@ public class TestHiveBinarySearchRecordR
   }
 
   private void resetIOContext() {
-    ioContext = IOContext.get();
+    conf.set(Utilities.INPUT_NAME, "TestHiveBinarySearchRecordReader");
+    ioContext = IOContext.get(conf.get(Utilities.INPUT_NAME));
     ioContext.setUseSorted(false);
     ioContext.setIsBinarySearching(false);
     ioContext.setEndBinarySearch(false);
@@ -124,6 +125,7 @@ public class TestHiveBinarySearchRecordR
   }
 
   private void init() throws IOException {
+    conf = new JobConf();
     resetIOContext();
     rcfReader = mock(RCFileRecordReader.class);
     when(rcfReader.next((LongWritable)anyObject(),
@@ -131,7 +133,6 @@ public class TestHiveBinarySearchRecordR
     // Since the start is 0, and the length is 100, the first call to sync should be with the value
     // 50 so return that for getPos()
     when(rcfReader.getPos()).thenReturn(50L);
-    conf = new JobConf();
     conf.setBoolean("hive.input.format.sorted", true);
 
     TableDesc tblDesc = Utilities.defaultTd;

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java Mon Oct  6 04:00:39 2014
@@ -165,7 +165,7 @@ public class TestSymlinkTextInputFormat 
             + " failed with exit code= " + ecode);
       }
 
-      String cmd = "select key from " + tblName;
+      String cmd = "select key*1 from " + tblName;
       drv.compile(cmd);
 
       //create scratch dir

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java Mon Oct  6 04:00:39 2014
@@ -1633,7 +1633,7 @@ public class TestInputOutputFormat {
       assertEquals("mock:/combinationAcid/p=1/00000" + bucket + "_0",
           combineSplit.getPath(bucket).toString());
       assertEquals(0, combineSplit.getOffset(bucket));
-      assertEquals(227, combineSplit.getLength(bucket));
+      assertEquals(225, combineSplit.getLength(bucket));
     }
     String[] hosts = combineSplit.getLocations();
     assertEquals(2, hosts.length);

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestNewIntegerEncoding.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestNewIntegerEncoding.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestNewIntegerEncoding.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestNewIntegerEncoding.java Mon Oct  6 04:00:39 2014
@@ -335,6 +335,104 @@ public class TestNewIntegerEncoding {
   }
 
   @Test
+  public void testDeltaOverflow() throws Exception {
+    ObjectInspector inspector;
+    synchronized (TestOrcFile.class) {
+      inspector = ObjectInspectorFactory
+          .getReflectionObjectInspector(Long.class,
+              ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
+    }
+
+    long[] inp = new long[]{4513343538618202719l, 4513343538618202711l,
+        2911390882471569739l,
+        -9181829309989854913l};
+    List<Long> input = Lists.newArrayList(Longs.asList(inp));
+
+    Writer writer = OrcFile.createWriter(
+        testFilePath,
+        OrcFile.writerOptions(conf).inspector(inspector).stripeSize(100000)
+            .compress(CompressionKind.NONE).bufferSize(10000));
+    for (Long l : input) {
+      writer.addRow(l);
+    }
+    writer.close();
+
+    Reader reader = OrcFile
+        .createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
+    RecordReader rows = reader.rows();
+    int idx = 0;
+    while (rows.hasNext()) {
+      Object row = rows.next(null);
+      assertEquals(input.get(idx++).longValue(), ((LongWritable) row).get());
+    }
+  }
+
+  @Test
+  public void testDeltaOverflow2() throws Exception {
+    ObjectInspector inspector;
+    synchronized (TestOrcFile.class) {
+      inspector = ObjectInspectorFactory
+          .getReflectionObjectInspector(Long.class,
+              ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
+    }
+
+    long[] inp = new long[]{Long.MAX_VALUE, 4513343538618202711l,
+        2911390882471569739l,
+        Long.MIN_VALUE};
+    List<Long> input = Lists.newArrayList(Longs.asList(inp));
+
+    Writer writer = OrcFile.createWriter(
+        testFilePath,
+        OrcFile.writerOptions(conf).inspector(inspector).stripeSize(100000)
+            .compress(CompressionKind.NONE).bufferSize(10000));
+    for (Long l : input) {
+      writer.addRow(l);
+    }
+    writer.close();
+
+    Reader reader = OrcFile
+        .createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
+    RecordReader rows = reader.rows();
+    int idx = 0;
+    while (rows.hasNext()) {
+      Object row = rows.next(null);
+      assertEquals(input.get(idx++).longValue(), ((LongWritable) row).get());
+    }
+  }
+
+  @Test
+  public void testDeltaOverflow3() throws Exception {
+    ObjectInspector inspector;
+    synchronized (TestOrcFile.class) {
+      inspector = ObjectInspectorFactory
+          .getReflectionObjectInspector(Long.class,
+              ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
+    }
+
+    long[] inp = new long[]{-4513343538618202711l, -2911390882471569739l, -2,
+        Long.MAX_VALUE};
+    List<Long> input = Lists.newArrayList(Longs.asList(inp));
+
+    Writer writer = OrcFile.createWriter(
+        testFilePath,
+        OrcFile.writerOptions(conf).inspector(inspector).stripeSize(100000)
+            .compress(CompressionKind.NONE).bufferSize(10000));
+    for (Long l : input) {
+      writer.addRow(l);
+    }
+    writer.close();
+
+    Reader reader = OrcFile
+        .createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
+    RecordReader rows = reader.rows();
+    int idx = 0;
+    while (rows.hasNext()) {
+      Object row = rows.next(null);
+      assertEquals(input.get(idx++).longValue(), ((LongWritable) row).get());
+    }
+  }
+
+  @Test
   public void testIntegerMin() throws Exception {
     ObjectInspector inspector;
     synchronized (TestOrcFile.class) {

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java Mon Oct  6 04:00:39 2014
@@ -1754,9 +1754,9 @@ public class TestOrcFile {
           stripe.getDataLength() < 5000);
     }
     // with HIVE-7832, the dictionaries will be disabled after writing the first
-    // stripe as there are too many distinct values. Hence only 3 stripes as
+    // stripe as there are too many distinct values. Hence only 4 stripes as
     // compared to 25 stripes in version 0.11 (above test case)
-    assertEquals(3, i);
+    assertEquals(4, i);
     assertEquals(2500, reader.getNumberOfRows());
   }
 

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestSerializationUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestSerializationUtils.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestSerializationUtils.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestSerializationUtils.java Mon Oct  6 04:00:39 2014
@@ -17,15 +17,18 @@
  */
 package org.apache.hadoop.hive.ql.io.orc;
 
-import org.junit.Test;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.InputStream;
 import java.math.BigInteger;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
+import org.junit.Test;
+
+import com.google.common.math.LongMath;
 
 public class TestSerializationUtils {
 
@@ -112,6 +115,47 @@ public class TestSerializationUtils {
         SerializationUtils.readBigInteger(fromBuffer(buffer)));
   }
 
+  @Test
+  public void testSubtractionOverflow() {
+    // cross check results with Guava results below
+    SerializationUtils utils = new SerializationUtils();
+    assertEquals(false, utils.isSafeSubtract(22222222222L, Long.MIN_VALUE));
+    assertEquals(false, utils.isSafeSubtract(-22222222222L, Long.MAX_VALUE));
+    assertEquals(false, utils.isSafeSubtract(Long.MIN_VALUE, Long.MAX_VALUE));
+    assertEquals(true, utils.isSafeSubtract(-1553103058346370095L, 6553103058346370095L));
+    assertEquals(true, utils.isSafeSubtract(0, Long.MAX_VALUE));
+    assertEquals(true, utils.isSafeSubtract(Long.MIN_VALUE, 0));
+  }
+
+  @Test
+  public void testSubtractionOverflowGuava() {
+    try {
+      LongMath.checkedSubtract(22222222222L, Long.MIN_VALUE);
+      fail("expected ArithmeticException for overflow");
+    } catch (ArithmeticException ex) {
+      assertEquals(ex.getMessage(), "overflow");
+    }
+
+    try {
+      LongMath.checkedSubtract(-22222222222L, Long.MAX_VALUE);
+      fail("expected ArithmeticException for overflow");
+    } catch (ArithmeticException ex) {
+      assertEquals(ex.getMessage(), "overflow");
+    }
+
+    try {
+      LongMath.checkedSubtract(Long.MIN_VALUE, Long.MAX_VALUE);
+      fail("expected ArithmeticException for overflow");
+    } catch (ArithmeticException ex) {
+      assertEquals(ex.getMessage(), "overflow");
+    }
+
+    assertEquals(-8106206116692740190L,
+        LongMath.checkedSubtract(-1553103058346370095L, 6553103058346370095L));
+    assertEquals(-Long.MAX_VALUE, LongMath.checkedSubtract(0, Long.MAX_VALUE));
+    assertEquals(Long.MIN_VALUE, LongMath.checkedSubtract(Long.MIN_VALUE, 0));
+  }
+
   public static void main(String[] args) throws Exception {
     TestSerializationUtils test = new TestSerializationUtils();
     test.testDoubles();

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java Mon Oct  6 04:00:39 2014
@@ -21,14 +21,18 @@ package org.apache.hadoop.hive.ql.metada
 import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
 
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.regex.Pattern;
 
 import junit.framework.TestCase;
 
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
@@ -45,6 +49,7 @@ import org.apache.hadoop.hive.serde.serd
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer;
 import org.apache.hadoop.hive.serde2.thrift.test.Complex;
+import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.mapred.SequenceFileInputFormat;
 import org.apache.hadoop.mapred.SequenceFileOutputFormat;
 import org.apache.hadoop.mapred.TextInputFormat;
@@ -63,6 +68,9 @@ public class TestHive extends TestCase {
   protected void setUp() throws Exception {
     super.setUp();
     hiveConf = new HiveConf(this.getClass());
+    // enable trash so it can be tested
+    hiveConf.setFloat("fs.trash.checkpoint.interval", 30);  // FS_TRASH_CHECKPOINT_INTERVAL_KEY (hadoop-2)
+    hiveConf.setFloat("fs.trash.interval", 30);             // FS_TRASH_INTERVAL_KEY (hadoop-2)
     SessionState.start(hiveConf);
     try {
       hm = Hive.get(hiveConf);
@@ -79,6 +87,9 @@ public class TestHive extends TestCase {
   protected void tearDown() throws Exception {
     try {
       super.tearDown();
+      // disable trash
+      hiveConf.setFloat("fs.trash.checkpoint.interval", 30);  // FS_TRASH_CHECKPOINT_INTERVAL_KEY (hadoop-2)
+      hiveConf.setFloat("fs.trash.interval", 30);             // FS_TRASH_INTERVAL_KEY (hadoop-2)
       Hive.closeCurrent();
     } catch (Exception e) {
       System.err.println(StringUtils.stringifyException(e));
@@ -294,7 +305,7 @@ public class TestHive extends TestCase {
     try {
       String dbName = "db_for_testgettables";
       String table1Name = "table1";
-      hm.dropDatabase(dbName, true, true);
+      hm.dropDatabase(dbName, true, true, true);
 
       Database db = new Database();
       db.setName(dbName);
@@ -330,16 +341,92 @@ public class TestHive extends TestCase {
 
       // Drop all tables
       for (String tableName : hm.getAllTables(dbName)) {
+        Table table = hm.getTable(dbName, tableName);
         hm.dropTable(dbName, tableName);
+        assertFalse(fs.exists(table.getPath()));
       }
       hm.dropDatabase(dbName);
     } catch (Throwable e) {
       System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testGetTables() failed");
+      System.err.println("testGetAndDropTables() failed");
       throw e;
     }
   }
 
+  public void testDropTableTrash() throws Throwable {
+    if (!ShimLoader.getHadoopShims().supportTrashFeature()) {
+      return; // it's hadoop-1
+    }
+    try {
+      String dbName = "db_for_testdroptable";
+      hm.dropDatabase(dbName, true, true, true);
+
+      Database db = new Database();
+      db.setName(dbName);
+      hm.createDatabase(db);
+
+      List<String> ts = new ArrayList<String>(2);
+      String tableBaseName = "droptable";
+      ts.add(tableBaseName + "1");
+      ts.add(tableBaseName + "2");
+      Table tbl1 = createTestTable(dbName, ts.get(0));
+      hm.createTable(tbl1);
+      Table tbl2 = createTestTable(dbName, ts.get(1));
+      hm.createTable(tbl2);
+      // test dropping tables and trash behavior
+      Table table1 = hm.getTable(dbName, ts.get(0));
+      assertNotNull(table1);
+      assertEquals(ts.get(0), table1.getTableName());
+      Path path1 = table1.getPath();
+      FileSystem fs = path1.getFileSystem(hiveConf);
+      assertTrue(fs.exists(path1));
+      // drop table and check that trash works
+      Path trashDir = ShimLoader.getHadoopShims().getCurrentTrashPath(hiveConf, fs);
+      assertNotNull("trash directory should not be null", trashDir);
+      Path trash1 = mergePaths(trashDir, path1);
+      Path pathglob = trash1.suffix("*");;
+      FileStatus before[] = fs.globStatus(pathglob);
+      hm.dropTable(dbName, ts.get(0));
+      assertFalse(fs.exists(path1));
+      FileStatus after[] = fs.globStatus(pathglob);
+      assertTrue("trash dir before and after DROP TABLE noPURGE are not different",
+                 before.length != after.length);
+
+      // drop a table without saving to trash by setting the purge option
+      Table table2 = hm.getTable(dbName, ts.get(1));
+      assertNotNull(table2);
+      assertEquals(ts.get(1), table2.getTableName());
+      Path path2 = table2.getPath();
+      assertTrue(fs.exists(path2));
+      Path trash2 = mergePaths(trashDir, path2);
+      System.out.println("trashDir2 is " + trash2);
+      pathglob = trash2.suffix("*");
+      before = fs.globStatus(pathglob);
+      hm.dropTable(dbName, ts.get(1), true, true, true); // deleteData, ignoreUnknownTable, ifPurge
+      assertFalse(fs.exists(path2));
+      after = fs.globStatus(pathglob);
+      Arrays.sort(before);
+      Arrays.sort(after);
+      assertEquals("trash dir before and after DROP TABLE PURGE are different",
+                   before.length, after.length);
+      assertTrue("trash dir before and after DROP TABLE PURGE are different",
+                 Arrays.equals(before, after));
+
+      // Drop all tables
+      for (String tableName : hm.getAllTables(dbName)) {
+        Table table = hm.getTable(dbName, tableName);
+        hm.dropTable(dbName, tableName);
+        assertFalse(fs.exists(table.getPath()));
+      }
+      hm.dropDatabase(dbName);
+    } catch (Throwable e) {
+      System.err.println(StringUtils.stringifyException(e));
+      System.err.println("testDropTableTrash() failed");
+      throw e;
+    }
+  }
+
+
   public void testPartition() throws Throwable {
     try {
       String tableName = "table_for_testpartition";
@@ -533,4 +620,39 @@ public class TestHive extends TestCase {
     newHiveObj = Hive.get(newHconf);
     assertTrue(prevHiveObj != newHiveObj);
   }
+
+  // shamelessly copied from Path in hadoop-2
+  private static final String SEPARATOR = "/";
+  private static final char SEPARATOR_CHAR = '/';
+
+  private static final String CUR_DIR = ".";
+
+  private static final boolean WINDOWS
+      = System.getProperty("os.name").startsWith("Windows");
+
+  private static final Pattern hasDriveLetterSpecifier =
+      Pattern.compile("^/?[a-zA-Z]:");
+
+  private static Path mergePaths(Path path1, Path path2) {
+    String path2Str = path2.toUri().getPath();
+    path2Str = path2Str.substring(startPositionWithoutWindowsDrive(path2Str));
+    // Add path components explicitly, because simply concatenating two path
+    // string is not safe, for example:
+    // "/" + "/foo" yields "//foo", which will be parsed as authority in Path
+    return new Path(path1.toUri().getScheme(),
+        path1.toUri().getAuthority(),
+        path1.toUri().getPath() + path2Str);
+  }
+
+  private static int startPositionWithoutWindowsDrive(String path) {
+    if (hasWindowsDrive(path)) {
+      return path.charAt(0) ==  SEPARATOR_CHAR ? 3 : 2;
+    } else {
+      return 0;
+    }
+  }
+
+  private static boolean hasWindowsDrive(String path) {
+    return (WINDOWS && hasDriveLetterSpecifier.matcher(path).find());
+  }
 }

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java Mon Oct  6 04:00:39 2014
@@ -84,6 +84,13 @@ public class TestHiveRemote extends Test
   }
 
   /**
+   * Cannot control trash in remote metastore, so skip this test
+   */
+  @Override
+  public void testDropTableTrash() {
+  }
+
+  /**
    * Finds a free port.
    *
    * @return a free port

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java Mon Oct  6 04:00:39 2014
@@ -24,6 +24,9 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+/**
+ * various Parser tests for INSERT/UPDATE/DELETE
+ */
 public class TestIUD {
   private static HiveConf conf;
 
@@ -102,6 +105,18 @@ public class TestIUD {
       ast.toStringTree());
   }
   @Test
+  public void testUpdateWithWhereSingleSetExpr() throws ParseException {
+    ASTNode ast = parse("UPDATE src SET key = -3+(5*9)%8, val = cast(6.1 + c as INT), d = d - 1 WHERE value IS NULL");
+    Assert.assertEquals("AST doesn't match",
+      "(TOK_UPDATE_TABLE (TOK_TABNAME src) " +
+        "(TOK_SET_COLUMNS_CLAUSE " +
+        "(= (TOK_TABLE_OR_COL key) (+ (- 3) (% (* 5 9) 8))) " +
+        "(= (TOK_TABLE_OR_COL val) (TOK_FUNCTION TOK_INT (+ 6.1 (TOK_TABLE_OR_COL c)))) " +
+        "(= (TOK_TABLE_OR_COL d) (- (TOK_TABLE_OR_COL d) 1))) " +
+        "(TOK_WHERE (TOK_FUNCTION TOK_ISNULL (TOK_TABLE_OR_COL value))))",
+      ast.toStringTree());
+  }
+  @Test
   public void testUpdateWithWhereMultiSet() throws ParseException {
     ASTNode ast = parse("UPDATE src SET key = 3, value = 8 WHERE VALUE = 1230997");
     Assert.assertEquals("AST doesn't match", 
@@ -207,13 +222,13 @@ public class TestIUD {
   }
   @Test
   public void testInsertIntoTableFromAnonymousTable() throws ParseException {
-    ASTNode ast = parse("insert into table page_view values(1,2),(3,4)");
+    ASTNode ast = parse("insert into table page_view values(-1,2),(3,+4)");
     Assert.assertEquals("AST doesn't match",
       "(TOK_QUERY " +
         "(TOK_FROM " +
           "(TOK_VIRTUAL_TABLE " +
           "(TOK_VIRTUAL_TABREF TOK_ANONYMOUS) " +
-          "(TOK_VALUES_TABLE (TOK_VALUE_ROW 1 2) (TOK_VALUE_ROW 3 4)))) " +
+          "(TOK_VALUES_TABLE (TOK_VALUE_ROW (- 1) 2) (TOK_VALUE_ROW 3 (+ 4))))) " +
         "(TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME page_view))) " +
           "(TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))",
       ast.toStringTree());

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java Mon Oct  6 04:00:39 2014
@@ -266,9 +266,12 @@ public class TestUpdateDeleteSemanticAna
 
     // I have to create the tables here (rather than in setup()) because I need the Hive
     // connection, which is conviently created by the semantic analyzer.
-    db.createTable("T", Arrays.asList("a", "b"), null, OrcInputFormat.class, OrcOutputFormat.class);
+    Map<String, String> params = new HashMap<String, String>(1);
+    params.put(SemanticAnalyzer.ACID_TABLE_PROPERTY, "true");
+    db.createTable("T", Arrays.asList("a", "b"), null, OrcInputFormat.class,
+        OrcOutputFormat.class, 2, Arrays.asList("a"), params);
     db.createTable("U", Arrays.asList("a", "b"), Arrays.asList("ds"), OrcInputFormat.class,
-        OrcOutputFormat.class);
+        OrcOutputFormat.class, 2, Arrays.asList("a"), params);
     Table u = db.getTable("U");
     Map<String, String> partVals = new HashMap<String, String>(2);
     partVals.put("ds", "yesterday");
@@ -280,7 +283,7 @@ public class TestUpdateDeleteSemanticAna
     // validate the plan
     sem.validate();
 
-    QueryPlan plan = new QueryPlan(query, sem, 0L, testName);
+    QueryPlan plan = new QueryPlan(query, sem, 0L, testName, null);
 
     return new ReturnInfo(tree, sem, plan);
   }

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java Mon Oct  6 04:00:39 2014
@@ -23,11 +23,16 @@ import java.util.List;
 import junit.framework.Assert;
 
 import org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType;
+import org.apache.hadoop.mapred.JobConf;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
 public class TestTezWork {
 
+  private static final String MR_JAR_PROPERTY = "tmpjars";
   private List<BaseWork> nodes;
   private TezWork work;
 
@@ -156,4 +161,75 @@ public class TestTezWork {
       Assert.assertEquals(sorted.get(i), nodes.get(4-i));
     }
   }
+
+  @Test
+  public void testConfigureJars() throws Exception {
+    final JobConf conf = new JobConf();
+    conf.set(MR_JAR_PROPERTY, "file:///tmp/foo1.jar");
+    BaseWork baseWork = Mockito.mock(BaseWork.class);
+    Mockito.doAnswer(new Answer<Void>() {
+
+      @Override
+      public Void answer(InvocationOnMock invocation) throws Throwable {
+        conf.set(MR_JAR_PROPERTY, "file:///tmp/foo2.jar");
+        return null;
+      }
+
+    }).when(baseWork).configureJobConf(conf);
+
+    work.add(baseWork);
+    work.configureJobConfAndExtractJars(conf);
+    Assert.assertEquals("file:///tmp/foo1.jar,file:///tmp/foo2.jar", conf.get(MR_JAR_PROPERTY));
+  }
+
+  @Test
+  public void testConfigureJarsNoExtraJars() throws Exception {
+    final JobConf conf = new JobConf();
+    conf.set(MR_JAR_PROPERTY, "file:///tmp/foo1.jar");
+    BaseWork baseWork = Mockito.mock(BaseWork.class);
+
+    work.add(baseWork);
+    work.configureJobConfAndExtractJars(conf);
+    Assert.assertEquals("file:///tmp/foo1.jar", conf.get(MR_JAR_PROPERTY));
+  }
+
+  @Test
+  public void testConfigureJarsWithNull() throws Exception {
+    final JobConf conf = new JobConf();
+    conf.set(MR_JAR_PROPERTY, "file:///tmp/foo1.jar");
+    BaseWork baseWork = Mockito.mock(BaseWork.class);
+    Mockito.doAnswer(new Answer<Void>() {
+
+      @Override
+      public Void answer(InvocationOnMock invocation) throws Throwable {
+        conf.unset(MR_JAR_PROPERTY);
+        return null;
+      }
+
+    }).when(baseWork).configureJobConf(conf);
+
+    work.add(baseWork);
+    work.configureJobConfAndExtractJars(conf);
+    Assert.assertEquals("file:///tmp/foo1.jar", conf.get(MR_JAR_PROPERTY));
+  }
+
+  @Test
+  public void testConfigureJarsStartingWithNull() throws Exception {
+    final JobConf conf = new JobConf();
+    conf.unset(MR_JAR_PROPERTY);
+    BaseWork baseWork = Mockito.mock(BaseWork.class);
+    Mockito.doAnswer(new Answer<Void>() {
+
+      @Override
+      public Void answer(InvocationOnMock invocation) throws Throwable {
+        conf.setStrings(MR_JAR_PROPERTY, "file:///tmp/foo1.jar", "file:///tmp/foo2.jar");
+        return null;
+      }
+
+    }).when(baseWork).configureJobConf(conf);
+
+    work.add(baseWork);
+    work.configureJobConfAndExtractJars(conf);
+    Assert.assertEquals("file:///tmp/foo1.jar,file:///tmp/foo2.jar", conf.get(MR_JAR_PROPERTY));
+  }
 }

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/TestUDFMath.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/TestUDFMath.java?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/TestUDFMath.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/TestUDFMath.java Mon Oct  6 04:00:39 2014
@@ -98,14 +98,6 @@ public class TestUDFMath {
     input = createDecimal("7.38905609893065");
     DoubleWritable res = udf.evaluate(input);
     Assert.assertEquals(2.0, res.get(), 0.000001);
-    
-    DoubleWritable input = new DoubleWritable(9.0);
-    res = udf.evaluate(createDecimal("3.0"), input);
-    Assert.assertEquals(2.0, res.get(), 0.000001);
-
-    DoubleWritable base = new DoubleWritable(3.0);
-    res = udf.evaluate(base, createDecimal("9.0"));
-    Assert.assertEquals(2.0, res.get(), 0.000001);
 
     res = udf.evaluate(createDecimal("3.0"), createDecimal("9.0"));
     Assert.assertEquals(2.0, res.get(), 0.000001);

Modified: hive/branches/spark/ql/src/test/queries/clientnegative/acid_overwrite.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientnegative/acid_overwrite.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientnegative/acid_overwrite.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientnegative/acid_overwrite.q Mon Oct  6 04:00:39 2014
@@ -1,9 +1,8 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;
+create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_uanp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint < 0 order by cint limit 10;
 insert overwrite table acid_uanp select cint, cast(cstring1 as varchar(128)) from alltypesorc;

Modified: hive/branches/spark/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q Mon Oct  6 04:00:39 2014
@@ -5,12 +5,11 @@ set hive.security.authorization.enabled=
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 
 -- check update without update priv
-create table auth_nodel(i int) clustered by (i) into 2 buckets stored as orc;;
+create table auth_nodel(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 set user.name=user1;
 delete from auth_nodel where i > 0;

Modified: hive/branches/spark/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q Mon Oct  6 04:00:39 2014
@@ -5,12 +5,11 @@ set hive.security.authorization.enabled=
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 
 -- check update without update priv
-create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc;;
+create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 set user.name=user1;
 update auth_noupd set i = 0 where i > 0;

Modified: hive/branches/spark/ql/src/test/queries/clientnegative/authorization_uri_create_table1.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientnegative/authorization_uri_create_table1.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientnegative/authorization_uri_create_table1.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientnegative/authorization_uri_create_table1.q Mon Oct  6 04:00:39 2014
@@ -7,6 +7,6 @@ dfs ${system:test.dfs.mkdir} ${system:te
 dfs -touchz ${system:test.tmp.dir}/a_uri_crtab1/1.txt;
 dfs -chmod 555 ${system:test.tmp.dir}/a_uri_crtab1/1.txt;
 
-create table t1(i int) location '${system:test.tmp.dir}/a_uri_crtab_ext';
+create table t1(i int) location '${system:test.tmp.dir}/a_uri_crtab1';
 
 -- Attempt to create table with dir that does not have write permission should fail

Modified: hive/branches/spark/ql/src/test/queries/clientnegative/update_no_such_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientnegative/update_no_such_table.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientnegative/update_no_such_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientnegative/update_no_such_table.q Mon Oct  6 04:00:39 2014
@@ -1,6 +1,5 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 

Modified: hive/branches/spark/ql/src/test/queries/clientnegative/update_partition_col.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientnegative/update_partition_col.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientnegative/update_partition_col.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientnegative/update_partition_col.q Mon Oct  6 04:00:39 2014
@@ -1,8 +1,7 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create table foo(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc;
+create table foo(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 update foo set ds = 'fred';

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/acid_vectorization.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/acid_vectorization.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/acid_vectorization.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/acid_vectorization.q Mon Oct  6 04:00:39 2014
@@ -1,11 +1,10 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.vectorized.execution.enabled=true;
 
-CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC;
+CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true');
 insert into table acid_vectorized select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10;
 set hive.vectorized.execution.enabled=true;
 insert into table acid_vectorized values (1, 'bar');

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_groupby.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_groupby.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_groupby.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_groupby.q Mon Oct  6 04:00:39 2014
@@ -1,4 +1,25 @@
 set hive.stats.fetch.column.stats=true;
+set hive.map.aggr.hash.percentmemory=0.0f;
+
+-- hash aggregation is disabled
+
+-- There are different cases for Group By depending on map/reduce side, hash aggregation,
+-- grouping sets and column stats. If we don't have column stats, we just assume hash
+-- aggregation is disabled. Following are the possible cases and rule for cardinality
+-- estimation
+
+-- MAP SIDE:
+-- Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows
+-- Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet
+-- Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism)
+-- Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet)
+-- Case 5: column stats, NO hash aggregation, NO grouping sets — numRows
+-- Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet
+
+-- REDUCE SIDE:
+-- Case 7: NO column stats — numRows / 2
+-- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet)
+-- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct)
 
 create table if not exists loc_staging (
   state string,
@@ -29,71 +50,91 @@ from ( select state as a, locid as b, co
      ) sq1
 group by a,c;
 
-analyze table loc_orc compute statistics for columns state,locid,zip,year;
+analyze table loc_orc compute statistics for columns state,locid,year;
 
--- only one distinct value in year column + 1 NULL value
--- map-side GBY: numRows: 8 (map-side will not do any reduction)
--- reduce-side GBY: numRows: 2
+-- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8
+-- Case 9: column stats, NO grouping sets - caridnality = 2
 explain select year from loc_orc group by year;
 
--- map-side GBY: numRows: 8
--- reduce-side GBY: numRows: 4
+-- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8
+-- Case 9: column stats, NO grouping sets - caridnality = 8
 explain select state,locid from loc_orc group by state,locid;
 
--- map-side GBY numRows: 32 reduce-side GBY numRows: 16
+-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32
+-- Case 8: column stats, grouping sets - cardinality = 32
 explain select state,locid from loc_orc group by state,locid with cube;
 
--- map-side GBY numRows: 24 reduce-side GBY numRows: 12
+-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24
+-- Case 8: column stats, grouping sets - cardinality = 24
 explain select state,locid from loc_orc group by state,locid with rollup;
 
--- map-side GBY numRows: 8 reduce-side GBY numRows: 4
+-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8
+-- Case 8: column stats, grouping sets - cardinality = 8
 explain select state,locid from loc_orc group by state,locid grouping sets((state));
 
--- map-side GBY numRows: 16 reduce-side GBY numRows: 8
+-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16
+-- Case 8: column stats, grouping sets - cardinality = 16
 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid));
 
--- map-side GBY numRows: 24 reduce-side GBY numRows: 12
+-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24
+-- Case 8: column stats, grouping sets - cardinality = 24
 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),());
 
--- map-side GBY numRows: 32 reduce-side GBY numRows: 16
+-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32
+-- Case 8: column stats, grouping sets - cardinality = 32
 explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),());
 
-set hive.stats.map.parallelism=10;
+set hive.map.aggr.hash.percentmemory=0.5f;
+set mapred.max.split.size=80;
+-- map-side parallelism will be 10
 
--- map-side GBY: numRows: 80 (map-side will not do any reduction)
--- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2)
+-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4
+-- Case 9: column stats, NO grouping sets - caridnality = 2
 explain select year from loc_orc group by year;
 
--- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7)
+-- Case 4: column stats, hash aggregation, grouping sets - cardinality = 16
+-- Case 8: column stats, grouping sets - cardinality = 16
 explain select state,locid from loc_orc group by state,locid with cube;
 
+-- ndvProduct becomes 0 as zip does not have column stats
+-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4
+-- Case 9: column stats, NO grouping sets - caridnality = 2
+explain select state,zip from loc_orc group by state,zip;
+
+set mapred.max.split.size=1000;
 set hive.stats.fetch.column.stats=false;
-set hive.stats.map.parallelism=1;
 
--- map-side GBY numRows: 32 reduce-side GBY numRows: 16
+-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
+-- Case 7: NO column stats - cardinality = 16
 explain select state,locid from loc_orc group by state,locid with cube;
 
--- map-side GBY numRows: 24 reduce-side GBY numRows: 12
+-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24
+-- Case 7: NO column stats - cardinality = 12
 explain select state,locid from loc_orc group by state,locid with rollup;
 
--- map-side GBY numRows: 8 reduce-side GBY numRows: 4
+-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8
+-- Case 7: NO column stats - cardinality = 4
 explain select state,locid from loc_orc group by state,locid grouping sets((state));
 
--- map-side GBY numRows: 16 reduce-side GBY numRows: 8
+-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 16
+-- Case 7: NO column stats - cardinality = 8
 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid));
 
--- map-side GBY numRows: 24 reduce-side GBY numRows: 12
+-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24
+-- Case 7: NO column stats - cardinality = 12
 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),());
 
--- map-side GBY numRows: 32 reduce-side GBY numRows: 16
+-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
+-- Case 7: NO column stats - cardinality = 16
 explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),());
 
-set hive.stats.map.parallelism=10;
+set mapred.max.split.size=80;
 
--- map-side GBY: numRows: 80 (map-side will not do any reduction)
--- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2)
+-- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8
+-- Case 7: NO column stats - cardinality = 4
 explain select year from loc_orc group by year;
 
--- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7)
+-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
+-- Case 7: NO column stats - cardinality = 16
 explain select state,locid from loc_orc group by state,locid with cube;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_part.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_part.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_part.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_part.q Mon Oct  6 04:00:39 2014
@@ -65,6 +65,9 @@ explain select zip from loc_orc;
 -- basicStatState: COMPLETE colStatState: PARTIAL
 explain select state from loc_orc;
 
+-- basicStatState: COMPLETE colStatState: COMPLETE
+explain select year from loc_orc;
+
 -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL
 -- basicStatState: COMPLETE colStatState: PARTIAL
 explain select state,locid from loc_orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/authorization_delete.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/authorization_delete.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/authorization_delete.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/authorization_delete.q Mon Oct  6 04:00:39 2014
@@ -4,13 +4,12 @@ set hive.security.authenticator.manager=
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 set user.name=user1;
 -- current user has been set (comment line before the set cmd is resulting in parse error!!)
 
-CREATE TABLE t_auth_del(i int) clustered by (i) into 2 buckets stored as orc;
+CREATE TABLE t_auth_del(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 -- grant update privilege to another user
 GRANT DELETE ON t_auth_del TO USER userWIns;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/authorization_delete_own_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/authorization_delete_own_table.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/authorization_delete_own_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/authorization_delete_own_table.q Mon Oct  6 04:00:39 2014
@@ -5,12 +5,11 @@ set hive.security.authorization.enabled=
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 
 set user.name=user1;
-create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc;;
+create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 delete from auth_noupd where i > 0;
 
 set user.name=hive_admin_user;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/authorization_update.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/authorization_update.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/authorization_update.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/authorization_update.q Mon Oct  6 04:00:39 2014
@@ -4,13 +4,12 @@ set hive.security.authenticator.manager=
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 set user.name=user1;
 -- current user has been set (comment line before the set cmd is resulting in parse error!!)
 
-CREATE TABLE t_auth_up(i int) clustered by (i) into 2 buckets stored as orc;
+CREATE TABLE t_auth_up(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 CREATE TABLE t_select(i int);
 GRANT ALL ON TABLE t_select TO ROLE public;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/authorization_update_own_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/authorization_update_own_table.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/authorization_update_own_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/authorization_update_own_table.q Mon Oct  6 04:00:39 2014
@@ -5,12 +5,11 @@ set hive.security.authorization.enabled=
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 
 set user.name=user1;
-create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc;;
+create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 update auth_noupd set i = 0 where i > 0;
 
 set user.name=hive_admin_user;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/create_func1.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/create_func1.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/create_func1.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/create_func1.q Mon Oct  6 04:00:39 2014
@@ -2,11 +2,16 @@
 -- qtest_get_java_boolean should already be created during test initialization
 select qtest_get_java_boolean('true'), qtest_get_java_boolean('false') from src limit 1;
 
+describe function extended qtest_get_java_boolean;
+
 create database mydb;
 create function mydb.func1 as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper';
 
 show functions mydb.func1;
 
+describe function extended mydb.func1;
+
+
 select mydb.func1('abc') from src limit 1;
 
 drop function mydb.func1;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/decimal_udf.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/decimal_udf.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/decimal_udf.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/decimal_udf.q Mon Oct  6 04:00:39 2014
@@ -39,6 +39,9 @@ SELECT key - '1.0' FROM DECIMAL_UDF;
 EXPLAIN SELECT key * key FROM DECIMAL_UDF;
 SELECT key * key FROM DECIMAL_UDF;
 
+EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0;
+SELECT key, value FROM DECIMAL_UDF where key * value > 0;
+
 EXPLAIN SELECT key * value FROM DECIMAL_UDF;
 SELECT key * value FROM DECIMAL_UDF;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q Mon Oct  6 04:00:39 2014
@@ -1,9 +1,8 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;
+create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_danp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint < 0 order by cint limit 10;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_partitioned.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_partitioned.q Mon Oct  6 04:00:39 2014
@@ -1,9 +1,8 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc;
+create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_dap partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10;
 insert into table acid_dap partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > 1000 order by cint limit 10;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_orig_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_orig_table.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_orig_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_orig_table.q Mon Oct  6 04:00:39 2014
@@ -1,6 +1,5 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/delete_orig_table;
@@ -18,7 +17,7 @@ create table acid_dot(
     ctimestamp1 TIMESTAMP,
     ctimestamp2 TIMESTAMP,
     cboolean1 BOOLEAN,
-    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc location '${system:test.tmp.dir}/delete_orig_table';
+    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc location '${system:test.tmp.dir}/delete_orig_table' TBLPROPERTIES ('transactional'='true');
 
 select count(*) from acid_dot;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_tmp_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_tmp_table.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_tmp_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_tmp_table.q Mon Oct  6 04:00:39 2014
@@ -1,9 +1,8 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;
+create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_no_match.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_no_match.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_no_match.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_no_match.q Mon Oct  6 04:00:39 2014
@@ -1,9 +1,8 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;
+create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_dwnm select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q Mon Oct  6 04:00:39 2014
@@ -1,9 +1,8 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;
+create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_dwnp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_partitioned.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_partitioned.q Mon Oct  6 04:00:39 2014
@@ -1,9 +1,8 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc;
+create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_dwp partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10;
 insert into table acid_dwp partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > -10000000 order by cint limit 10;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_whole_partition.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_whole_partition.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_whole_partition.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_whole_partition.q Mon Oct  6 04:00:39 2014
@@ -1,9 +1,8 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
-create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc;
+create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 insert into table acid_dwhp partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10;
 insert into table acid_dwhp partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > -10000000 order by cint limit 10;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q Mon Oct  6 04:00:39 2014
@@ -19,6 +19,9 @@ load data local inpath '../../data/files
 load data local inpath '../../data/files/agg_01-p2.txt' into table agg_01 partition (dim_shops_id=2);
 load data local inpath '../../data/files/agg_01-p3.txt' into table agg_01 partition (dim_shops_id=3);
 
+analyze table dim_shops compute statistics;
+analyze table agg_01 partition (dim_shops_id) compute statistics;
+
 select * from dim_shops;
 select * from agg_01;
 
@@ -40,6 +43,73 @@ d1.label in ('foo', 'bar')
 GROUP BY d1.label
 ORDER BY d1.label;
 
+set hive.tez.dynamic.partition.pruning.max.event.size=1000000;
+set hive.tez.dynamic.partition.pruning.max.data.size=1;
+
+EXPLAIN SELECT d1.label, count(*), sum(agg.amount)
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and
+d1.label in ('foo', 'bar')
+GROUP BY d1.label
+ORDER BY d1.label;
+
+SELECT d1.label, count(*), sum(agg.amount)
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and
+d1.label in ('foo', 'bar')
+GROUP BY d1.label
+ORDER BY d1.label;
+
+EXPLAIN SELECT d1.label
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id;
+
+SELECT d1.label
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id;
+
+EXPLAIN SELECT agg.amount
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and agg.dim_shops_id = 1;
+
+SELECT agg.amount
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and agg.dim_shops_id = 1;
+
+set hive.tez.dynamic.partition.pruning.max.event.size=1;
+set hive.tez.dynamic.partition.pruning.max.data.size=1000000;
+
+EXPLAIN SELECT d1.label, count(*), sum(agg.amount)
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and
+d1.label in ('foo', 'bar')
+GROUP BY d1.label
+ORDER BY d1.label;
+
+SELECT d1.label, count(*), sum(agg.amount)
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and
+d1.label in ('foo', 'bar')
+GROUP BY d1.label
+ORDER BY d1.label;
+
+set hive.tez.dynamic.partition.pruning.max.event.size=100000;
+set hive.tez.dynamic.partition.pruning.max.data.size=1000000;
+
 EXPLAIN 
 SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo'
 UNION ALL
@@ -47,4 +117,4 @@ SELECT amount FROM agg_01, dim_shops WHE
 
 SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo'
 UNION ALL
-SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar';
\ No newline at end of file
+SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar';

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q Mon Oct  6 04:00:39 2014
@@ -108,6 +108,13 @@ set hive.optimize.sort.dynamic.partition
 explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i;
 set hive.optimize.sort.dynamic.partition=true;
 explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i;
+explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from (select * from over1k_orc order by i limit 10) tmp where t is null or t=27;
+
+set hive.optimize.sort.dynamic.partition=false;
+explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t;
+set hive.optimize.sort.dynamic.partition=true;
+-- tests for HIVE-8162, only partition column 't' should be in last RS operator
+explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t;
 
 set hive.optimize.sort.dynamic.partition=false;
 insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q Mon Oct  6 04:00:39 2014
@@ -102,6 +102,13 @@ set hive.optimize.sort.dynamic.partition
 explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i;
 set hive.optimize.sort.dynamic.partition=true;
 explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i;
+explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from (select * from over1k order by i limit 10) tmp where t is null or t=27;
+
+set hive.optimize.sort.dynamic.partition=false;
+explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t;
+set hive.optimize.sort.dynamic.partition=true;
+-- tests for HIVE-8162, only partition column 't' should be in last RS operator
+explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t;
 
 set hive.optimize.sort.dynamic.partition=false;
 insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_into1.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_into1.q?rev=1629563&r1=1629562&r2=1629563&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_into1.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_into1.q Mon Oct  6 04:00:39 2014
@@ -1,3 +1,4 @@
+set hive.compute.query.using.stats=true;
 DROP TABLE insert_into1;
 
 CREATE TABLE insert_into1 (key int, value string);
@@ -7,14 +8,18 @@ INSERT INTO TABLE insert_into1 SELECT * 
 SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
 ) t;
-
+explain 
+select count(*) from insert_into1;
+select count(*) from insert_into1;
 EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100;
 INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100;
 SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
 ) t;
 
+explain
 SELECT COUNT(*) FROM insert_into1;
+select count(*) from insert_into1;
 
 EXPLAIN INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src LIMIT 10;
 INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src LIMIT 10;
@@ -22,5 +27,10 @@ SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
 ) t;
 
+explain
+SELECT COUNT(*) FROM insert_into1;
+select count(*) from insert_into1;
 
 DROP TABLE insert_into1;
+
+set hive.compute.query.using.stats=false;