You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/10/06 05:44:26 UTC

svn commit: r1629562 [11/38] - in /hive/branches/spark: ./ accumulo-handler/ beeline/ beeline/src/java/org/apache/hive/beeline/ bin/ext/ common/ common/src/java/org/apache/hadoop/hive/conf/ common/src/test/org/apache/hadoop/hive/common/type/ contrib/sr...

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java Mon Oct  6 03:44:13 2014
@@ -331,8 +331,7 @@ public class TestOperators extends TestC
       Configuration hconf = new JobConf(TestOperators.class);
       HiveConf.setVar(hconf, HiveConf.ConfVars.HADOOPMAPFILENAME,
           "hdfs:///testDir/testFile");
-      IOContext.get(hconf.get(Utilities.INPUT_NAME)).setInputPath(
-          new Path("hdfs:///testDir/testFile"));
+      IOContext.get().setInputPath(new Path("hdfs:///testDir/testFile"));
 
       // initialize pathToAliases
       ArrayList<String> aliases = new ArrayList<String>();

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java Mon Oct  6 03:44:13 2014
@@ -26,7 +26,6 @@ import java.util.Random;
 
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mockito;
 import org.apache.hadoop.hive.conf.HiveConf;
 
 public class TestTezSessionPool {
@@ -158,29 +157,4 @@ public class TestTezSessionPool {
         }
       }
     }
-
-  @Test
-  public void testCloseAndOpenDefault() throws Exception {
-    poolManager = new TestTezSessionPoolManager();
-    TezSessionState session = Mockito.mock(TezSessionState.class);
-    Mockito.when(session.isDefault()).thenReturn(false);
-
-    poolManager.closeAndOpen(session, conf, false);
-
-    Mockito.verify(session).close(false);
-    Mockito.verify(session).open(conf, null);
-  }
-
-  @Test
-  public void testCloseAndOpenWithResources() throws Exception {
-    poolManager = new TestTezSessionPoolManager();
-    TezSessionState session = Mockito.mock(TezSessionState.class);
-    Mockito.when(session.isDefault()).thenReturn(false);
-    String[] extraResources = new String[] { "file:///tmp/foo.jar" };
-
-    poolManager.closeAndOpen(session, conf, extraResources, false);
-
-    Mockito.verify(session).close(false);
-    Mockito.verify(session).open(conf, extraResources);
-  }
 }

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java Mon Oct  6 03:44:13 2014
@@ -30,11 +30,9 @@ import static org.mockito.Mockito.when;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
 import java.util.LinkedHashMap;
+import java.util.LinkedList;
 import java.util.List;
-import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -50,7 +48,6 @@ import org.apache.hadoop.hive.ql.plan.Re
 import org.apache.hadoop.hive.ql.plan.TezEdgeProperty;
 import org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType;
 import org.apache.hadoop.hive.ql.plan.TezWork;
-import org.apache.hadoop.hive.ql.plan.TezWork.VertexType;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
 import org.apache.hadoop.mapred.JobConf;
@@ -93,11 +90,8 @@ public class TestTezTask {
     path = mock(Path.class);
     when(path.getFileSystem(any(Configuration.class))).thenReturn(fs);
     when(utils.getTezDir(any(Path.class))).thenReturn(path);
-    when(
-        utils.createVertex(any(JobConf.class), any(BaseWork.class), any(Path.class),
-            any(LocalResource.class), any(List.class), any(FileSystem.class), any(Context.class),
-            anyBoolean(), any(TezWork.class), any(VertexType.class))).thenAnswer(
-        new Answer<Vertex>() {
+    when(utils.createVertex(any(JobConf.class), any(BaseWork.class), any(Path.class), any(LocalResource.class),
+        any(List.class), any(FileSystem.class), any(Context.class), anyBoolean(), any(TezWork.class))).thenAnswer(new Answer<Vertex>() {
 
           @Override
           public Vertex answer(InvocationOnMock invocation) throws Throwable {
@@ -107,8 +101,8 @@ public class TestTezTask {
           }
         });
 
-    when(utils.createEdge(any(JobConf.class), any(Vertex.class), any(Vertex.class),
-            any(TezEdgeProperty.class), any(VertexType.class))).thenAnswer(new Answer<Edge>() {
+    when(utils.createEdge(any(JobConf.class), any(Vertex.class),
+        any(Vertex.class), any(TezEdgeProperty.class))).thenAnswer(new Answer<Edge>() {
 
           @Override
           public Edge answer(InvocationOnMock invocation) throws Throwable {
@@ -210,11 +204,10 @@ public class TestTezTask {
   @Test
   public void testSubmit() throws Exception {
     DAG dag = DAG.create("test");
-    task.submit(conf, dag, path, appLr, sessionState, Collections.<LocalResource> emptyList(),
-        new String[0], Collections.<String,LocalResource> emptyMap());
+    task.submit(conf, dag, path, appLr, sessionState, new LinkedList());
     // validate close/reopen
-    verify(sessionState, times(1)).open(any(HiveConf.class), any(String[].class));
-    verify(sessionState, times(1)).close(eq(true)); // now uses pool after HIVE-7043
+    verify(sessionState, times(1)).open(any(HiveConf.class));
+    verify(sessionState, times(1)).close(eq(false));  // now uses pool after HIVE-7043
     verify(session, times(2)).submitDAG(any(DAG.class));
   }
 
@@ -223,54 +216,4 @@ public class TestTezTask {
     task.close(work, 0);
     verify(op, times(4)).jobClose(any(Configuration.class), eq(true));
   }
-
-  @Test
-  public void testExistingSessionGetsStorageHandlerResources() throws Exception {
-    final String[] inputOutputJars = new String[] {"file:///tmp/foo.jar"};
-    LocalResource res = mock(LocalResource.class);
-    final List<LocalResource> resources = Collections.singletonList(res);
-    final Map<String,LocalResource> resMap = new HashMap<String,LocalResource>();
-    resMap.put("foo.jar", res);
-
-    when(utils.localizeTempFiles(path.toString(), conf, inputOutputJars))
-        .thenReturn(resources);
-    when(utils.getBaseName(res)).thenReturn("foo.jar");
-    when(sessionState.isOpen()).thenReturn(true);
-    when(sessionState.hasResources(inputOutputJars)).thenReturn(false);
-    task.updateSession(sessionState, conf, path, inputOutputJars, resMap);
-    verify(session).addAppMasterLocalFiles(resMap);
-  }
-
-  @Test
-  public void testExtraResourcesAddedToDag() throws Exception {
-    final String[] inputOutputJars = new String[] {"file:///tmp/foo.jar"};
-    LocalResource res = mock(LocalResource.class);
-    final List<LocalResource> resources = Collections.singletonList(res);
-    final Map<String,LocalResource> resMap = new HashMap<String,LocalResource>();
-    resMap.put("foo.jar", res);
-    DAG dag = mock(DAG.class);
-
-    when(utils.localizeTempFiles(path.toString(), conf, inputOutputJars))
-        .thenReturn(resources);
-    when(utils.getBaseName(res)).thenReturn("foo.jar");
-    when(sessionState.isOpen()).thenReturn(true);
-    when(sessionState.hasResources(inputOutputJars)).thenReturn(false);
-    task.addExtraResourcesToDag(sessionState, dag, inputOutputJars, resMap);
-    verify(dag).addTaskLocalFiles(resMap);
-  }
-
-  @Test
-  public void testGetExtraLocalResources() throws Exception {
-    final String[] inputOutputJars = new String[] {"file:///tmp/foo.jar"};
-    LocalResource res = mock(LocalResource.class);
-    final List<LocalResource> resources = Collections.singletonList(res);
-    final Map<String,LocalResource> resMap = new HashMap<String,LocalResource>();
-    resMap.put("foo.jar", res);
-
-    when(utils.localizeTempFiles(path.toString(), conf, inputOutputJars))
-        .thenReturn(resources);
-    when(utils.getBaseName(res)).thenReturn("foo.jar");
-
-    assertEquals(resMap, task.getExtraLocalResources(conf, path, inputOutputJars));
-  }
 }

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java Mon Oct  6 03:44:13 2014
@@ -115,8 +115,7 @@ public class TestHiveBinarySearchRecordR
   }
 
   private void resetIOContext() {
-    conf.set(Utilities.INPUT_NAME, "TestHiveBinarySearchRecordReader");
-    ioContext = IOContext.get(conf.get(Utilities.INPUT_NAME));
+    ioContext = IOContext.get();
     ioContext.setUseSorted(false);
     ioContext.setIsBinarySearching(false);
     ioContext.setEndBinarySearch(false);
@@ -125,7 +124,6 @@ public class TestHiveBinarySearchRecordR
   }
 
   private void init() throws IOException {
-    conf = new JobConf();
     resetIOContext();
     rcfReader = mock(RCFileRecordReader.class);
     when(rcfReader.next((LongWritable)anyObject(),
@@ -133,6 +131,7 @@ public class TestHiveBinarySearchRecordR
     // Since the start is 0, and the length is 100, the first call to sync should be with the value
     // 50 so return that for getPos()
     when(rcfReader.getPos()).thenReturn(50L);
+    conf = new JobConf();
     conf.setBoolean("hive.input.format.sorted", true);
 
     TableDesc tblDesc = Utilities.defaultTd;

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java Mon Oct  6 03:44:13 2014
@@ -165,7 +165,7 @@ public class TestSymlinkTextInputFormat 
             + " failed with exit code= " + ecode);
       }
 
-      String cmd = "select key*1 from " + tblName;
+      String cmd = "select key from " + tblName;
       drv.compile(cmd);
 
       //create scratch dir

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java Mon Oct  6 03:44:13 2014
@@ -1633,7 +1633,7 @@ public class TestInputOutputFormat {
       assertEquals("mock:/combinationAcid/p=1/00000" + bucket + "_0",
           combineSplit.getPath(bucket).toString());
       assertEquals(0, combineSplit.getOffset(bucket));
-      assertEquals(225, combineSplit.getLength(bucket));
+      assertEquals(227, combineSplit.getLength(bucket));
     }
     String[] hosts = combineSplit.getLocations();
     assertEquals(2, hosts.length);

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestNewIntegerEncoding.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestNewIntegerEncoding.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestNewIntegerEncoding.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestNewIntegerEncoding.java Mon Oct  6 03:44:13 2014
@@ -335,104 +335,6 @@ public class TestNewIntegerEncoding {
   }
 
   @Test
-  public void testDeltaOverflow() throws Exception {
-    ObjectInspector inspector;
-    synchronized (TestOrcFile.class) {
-      inspector = ObjectInspectorFactory
-          .getReflectionObjectInspector(Long.class,
-              ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
-    }
-
-    long[] inp = new long[]{4513343538618202719l, 4513343538618202711l,
-        2911390882471569739l,
-        -9181829309989854913l};
-    List<Long> input = Lists.newArrayList(Longs.asList(inp));
-
-    Writer writer = OrcFile.createWriter(
-        testFilePath,
-        OrcFile.writerOptions(conf).inspector(inspector).stripeSize(100000)
-            .compress(CompressionKind.NONE).bufferSize(10000));
-    for (Long l : input) {
-      writer.addRow(l);
-    }
-    writer.close();
-
-    Reader reader = OrcFile
-        .createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
-    RecordReader rows = reader.rows();
-    int idx = 0;
-    while (rows.hasNext()) {
-      Object row = rows.next(null);
-      assertEquals(input.get(idx++).longValue(), ((LongWritable) row).get());
-    }
-  }
-
-  @Test
-  public void testDeltaOverflow2() throws Exception {
-    ObjectInspector inspector;
-    synchronized (TestOrcFile.class) {
-      inspector = ObjectInspectorFactory
-          .getReflectionObjectInspector(Long.class,
-              ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
-    }
-
-    long[] inp = new long[]{Long.MAX_VALUE, 4513343538618202711l,
-        2911390882471569739l,
-        Long.MIN_VALUE};
-    List<Long> input = Lists.newArrayList(Longs.asList(inp));
-
-    Writer writer = OrcFile.createWriter(
-        testFilePath,
-        OrcFile.writerOptions(conf).inspector(inspector).stripeSize(100000)
-            .compress(CompressionKind.NONE).bufferSize(10000));
-    for (Long l : input) {
-      writer.addRow(l);
-    }
-    writer.close();
-
-    Reader reader = OrcFile
-        .createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
-    RecordReader rows = reader.rows();
-    int idx = 0;
-    while (rows.hasNext()) {
-      Object row = rows.next(null);
-      assertEquals(input.get(idx++).longValue(), ((LongWritable) row).get());
-    }
-  }
-
-  @Test
-  public void testDeltaOverflow3() throws Exception {
-    ObjectInspector inspector;
-    synchronized (TestOrcFile.class) {
-      inspector = ObjectInspectorFactory
-          .getReflectionObjectInspector(Long.class,
-              ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
-    }
-
-    long[] inp = new long[]{-4513343538618202711l, -2911390882471569739l, -2,
-        Long.MAX_VALUE};
-    List<Long> input = Lists.newArrayList(Longs.asList(inp));
-
-    Writer writer = OrcFile.createWriter(
-        testFilePath,
-        OrcFile.writerOptions(conf).inspector(inspector).stripeSize(100000)
-            .compress(CompressionKind.NONE).bufferSize(10000));
-    for (Long l : input) {
-      writer.addRow(l);
-    }
-    writer.close();
-
-    Reader reader = OrcFile
-        .createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
-    RecordReader rows = reader.rows();
-    int idx = 0;
-    while (rows.hasNext()) {
-      Object row = rows.next(null);
-      assertEquals(input.get(idx++).longValue(), ((LongWritable) row).get());
-    }
-  }
-
-  @Test
   public void testIntegerMin() throws Exception {
     ObjectInspector inspector;
     synchronized (TestOrcFile.class) {

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java Mon Oct  6 03:44:13 2014
@@ -1754,9 +1754,9 @@ public class TestOrcFile {
           stripe.getDataLength() < 5000);
     }
     // with HIVE-7832, the dictionaries will be disabled after writing the first
-    // stripe as there are too many distinct values. Hence only 4 stripes as
+    // stripe as there are too many distinct values. Hence only 3 stripes as
     // compared to 25 stripes in version 0.11 (above test case)
-    assertEquals(4, i);
+    assertEquals(3, i);
     assertEquals(2500, reader.getNumberOfRows());
   }
 

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestSerializationUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestSerializationUtils.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestSerializationUtils.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestSerializationUtils.java Mon Oct  6 03:44:13 2014
@@ -17,18 +17,15 @@
  */
 package org.apache.hadoop.hive.ql.io.orc;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import org.junit.Test;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.InputStream;
 import java.math.BigInteger;
 
-import org.junit.Test;
-
-import com.google.common.math.LongMath;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
 
 public class TestSerializationUtils {
 
@@ -115,47 +112,6 @@ public class TestSerializationUtils {
         SerializationUtils.readBigInteger(fromBuffer(buffer)));
   }
 
-  @Test
-  public void testSubtractionOverflow() {
-    // cross check results with Guava results below
-    SerializationUtils utils = new SerializationUtils();
-    assertEquals(false, utils.isSafeSubtract(22222222222L, Long.MIN_VALUE));
-    assertEquals(false, utils.isSafeSubtract(-22222222222L, Long.MAX_VALUE));
-    assertEquals(false, utils.isSafeSubtract(Long.MIN_VALUE, Long.MAX_VALUE));
-    assertEquals(true, utils.isSafeSubtract(-1553103058346370095L, 6553103058346370095L));
-    assertEquals(true, utils.isSafeSubtract(0, Long.MAX_VALUE));
-    assertEquals(true, utils.isSafeSubtract(Long.MIN_VALUE, 0));
-  }
-
-  @Test
-  public void testSubtractionOverflowGuava() {
-    try {
-      LongMath.checkedSubtract(22222222222L, Long.MIN_VALUE);
-      fail("expected ArithmeticException for overflow");
-    } catch (ArithmeticException ex) {
-      assertEquals(ex.getMessage(), "overflow");
-    }
-
-    try {
-      LongMath.checkedSubtract(-22222222222L, Long.MAX_VALUE);
-      fail("expected ArithmeticException for overflow");
-    } catch (ArithmeticException ex) {
-      assertEquals(ex.getMessage(), "overflow");
-    }
-
-    try {
-      LongMath.checkedSubtract(Long.MIN_VALUE, Long.MAX_VALUE);
-      fail("expected ArithmeticException for overflow");
-    } catch (ArithmeticException ex) {
-      assertEquals(ex.getMessage(), "overflow");
-    }
-
-    assertEquals(-8106206116692740190L,
-        LongMath.checkedSubtract(-1553103058346370095L, 6553103058346370095L));
-    assertEquals(-Long.MAX_VALUE, LongMath.checkedSubtract(0, Long.MAX_VALUE));
-    assertEquals(Long.MIN_VALUE, LongMath.checkedSubtract(Long.MIN_VALUE, 0));
-  }
-
   public static void main(String[] args) throws Exception {
     TestSerializationUtils test = new TestSerializationUtils();
     test.testDoubles();

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java Mon Oct  6 03:44:13 2014
@@ -18,12 +18,9 @@
 
 package org.apache.hadoop.hive.ql.metadata;
 
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_KEY;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -31,10 +28,7 @@ import java.util.Map;
 
 import junit.framework.TestCase;
 
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.TrashPolicy;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
@@ -69,9 +63,6 @@ public class TestHive extends TestCase {
   protected void setUp() throws Exception {
     super.setUp();
     hiveConf = new HiveConf(this.getClass());
-    // enable trash so it can be tested
-    hiveConf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, 30);
-    hiveConf.setFloat(FS_TRASH_INTERVAL_KEY, 30);
     SessionState.start(hiveConf);
     try {
       hm = Hive.get(hiveConf);
@@ -88,9 +79,6 @@ public class TestHive extends TestCase {
   protected void tearDown() throws Exception {
     try {
       super.tearDown();
-      // disable trash
-      hiveConf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, 30);
-      hiveConf.setFloat(FS_TRASH_INTERVAL_KEY, 30);
       Hive.closeCurrent();
     } catch (Exception e) {
       System.err.println(StringUtils.stringifyException(e));
@@ -306,7 +294,7 @@ public class TestHive extends TestCase {
     try {
       String dbName = "db_for_testgettables";
       String table1Name = "table1";
-      hm.dropDatabase(dbName, true, true, true);
+      hm.dropDatabase(dbName, true, true);
 
       Database db = new Database();
       db.setName(dbName);
@@ -342,92 +330,16 @@ public class TestHive extends TestCase {
 
       // Drop all tables
       for (String tableName : hm.getAllTables(dbName)) {
-        Table table = hm.getTable(dbName, tableName);
         hm.dropTable(dbName, tableName);
-        assertFalse(fs.exists(table.getPath()));
       }
       hm.dropDatabase(dbName);
     } catch (Throwable e) {
       System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testGetAndDropTables() failed");
+      System.err.println("testGetTables() failed");
       throw e;
     }
   }
 
-  public void testDropTableTrash() throws Throwable {
-    try {
-      String dbName = "db_for_testdroptable";
-      hm.dropDatabase(dbName, true, true, true);
-
-      Database db = new Database();
-      db.setName(dbName);
-      hm.createDatabase(db);
-
-      List<String> ts = new ArrayList<String>(2);
-      String tableBaseName = "droptable";
-      ts.add(tableBaseName + "1");
-      ts.add(tableBaseName + "2");
-      Table tbl1 = createTestTable(dbName, ts.get(0));
-      hm.createTable(tbl1);
-      Table tbl2 = createTestTable(dbName, ts.get(1));
-      hm.createTable(tbl2);
-      // test dropping tables and trash behavior
-      Table table1 = hm.getTable(dbName, ts.get(0));
-      assertNotNull(table1);
-      assertEquals(ts.get(0), table1.getTableName());
-      Path path1 = table1.getPath();
-      FileSystem fs = path1.getFileSystem(hiveConf);
-      assertTrue(fs.exists(path1));
-      // drop table and check that trash works
-      TrashPolicy tp = TrashPolicy.getInstance(hiveConf, fs, fs.getHomeDirectory());
-      assertNotNull("TrashPolicy instance should not be null", tp);
-      assertTrue("TrashPolicy is not enabled for filesystem: " + fs.getUri(), tp.isEnabled());
-      Path trashDir = tp.getCurrentTrashDir();
-      assertNotNull("trash directory should not be null", trashDir);
-      Path trash1 = Path.mergePaths(trashDir, path1);
-      Path pathglob = trash1.suffix("*");;
-      FileStatus before[] = fs.globStatus(pathglob);
-      hm.dropTable(dbName, ts.get(0));
-      assertFalse(fs.exists(path1));
-      FileStatus after[] = fs.globStatus(pathglob);
-      assertTrue("trash dir before and after DROP TABLE noPURGE are not different",
-                 before.length != after.length);
-
-      // drop a table without saving to trash by setting the purge option
-      Table table2 = hm.getTable(dbName, ts.get(1));
-      assertNotNull(table2);
-      assertEquals(ts.get(1), table2.getTableName());
-      Path path2 = table2.getPath();
-      assertTrue(fs.exists(path2));
-      Path trash2 = Path.mergePaths(trashDir, path2);
-      System.out.println("trashDir2 is " + trash2);
-      pathglob = trash2.suffix("*");
-      before = fs.globStatus(pathglob);
-      hm.dropTable(dbName, ts.get(1), true, true, true); // deleteData, ignoreUnknownTable, ifPurge
-      assertFalse(fs.exists(path2));
-      after = fs.globStatus(pathglob);
-      Arrays.sort(before);
-      Arrays.sort(after);
-      assertEquals("trash dir before and after DROP TABLE PURGE are different",
-                   before.length, after.length);
-      assertTrue("trash dir before and after DROP TABLE PURGE are different",
-                 Arrays.equals(before, after));
-
-      // Drop all tables
-      for (String tableName : hm.getAllTables(dbName)) {
-        Table table = hm.getTable(dbName, tableName);
-        hm.dropTable(dbName, tableName);
-        assertFalse(fs.exists(table.getPath()));
-      }
-      hm.dropDatabase(dbName);
-    } catch (Throwable e) {
-      System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testDropTableTrash() failed");
-      throw e;
-    }
-  }
-
-
   public void testPartition() throws Throwable {
     try {
       String tableName = "table_for_testpartition";

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java Mon Oct  6 03:44:13 2014
@@ -84,13 +84,6 @@ public class TestHiveRemote extends Test
   }
 
   /**
-   * Cannot control trash in remote metastore, so skip this test
-   */
-  @Override
-  public void testDropTableTrash() {
-  }
-
-  /**
    * Finds a free port.
    *
    * @return a free port

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java Mon Oct  6 03:44:13 2014
@@ -24,9 +24,6 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-/**
- * various Parser tests for INSERT/UPDATE/DELETE
- */
 public class TestIUD {
   private static HiveConf conf;
 
@@ -105,18 +102,6 @@ public class TestIUD {
       ast.toStringTree());
   }
   @Test
-  public void testUpdateWithWhereSingleSetExpr() throws ParseException {
-    ASTNode ast = parse("UPDATE src SET key = -3+(5*9)%8, val = cast(6.1 + c as INT), d = d - 1 WHERE value IS NULL");
-    Assert.assertEquals("AST doesn't match",
-      "(TOK_UPDATE_TABLE (TOK_TABNAME src) " +
-        "(TOK_SET_COLUMNS_CLAUSE " +
-        "(= (TOK_TABLE_OR_COL key) (+ (- 3) (% (* 5 9) 8))) " +
-        "(= (TOK_TABLE_OR_COL val) (TOK_FUNCTION TOK_INT (+ 6.1 (TOK_TABLE_OR_COL c)))) " +
-        "(= (TOK_TABLE_OR_COL d) (- (TOK_TABLE_OR_COL d) 1))) " +
-        "(TOK_WHERE (TOK_FUNCTION TOK_ISNULL (TOK_TABLE_OR_COL value))))",
-      ast.toStringTree());
-  }
-  @Test
   public void testUpdateWithWhereMultiSet() throws ParseException {
     ASTNode ast = parse("UPDATE src SET key = 3, value = 8 WHERE VALUE = 1230997");
     Assert.assertEquals("AST doesn't match", 
@@ -222,13 +207,13 @@ public class TestIUD {
   }
   @Test
   public void testInsertIntoTableFromAnonymousTable() throws ParseException {
-    ASTNode ast = parse("insert into table page_view values(-1,2),(3,+4)");
+    ASTNode ast = parse("insert into table page_view values(1,2),(3,4)");
     Assert.assertEquals("AST doesn't match",
       "(TOK_QUERY " +
         "(TOK_FROM " +
           "(TOK_VIRTUAL_TABLE " +
           "(TOK_VIRTUAL_TABREF TOK_ANONYMOUS) " +
-          "(TOK_VALUES_TABLE (TOK_VALUE_ROW (- 1) 2) (TOK_VALUE_ROW 3 (+ 4))))) " +
+          "(TOK_VALUES_TABLE (TOK_VALUE_ROW 1 2) (TOK_VALUE_ROW 3 4)))) " +
         "(TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME page_view))) " +
           "(TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))",
       ast.toStringTree());

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java Mon Oct  6 03:44:13 2014
@@ -23,16 +23,11 @@ import java.util.List;
 import junit.framework.Assert;
 
 import org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType;
-import org.apache.hadoop.mapred.JobConf;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
 
 public class TestTezWork {
 
-  private static final String MR_JAR_PROPERTY = "tmpjars";
   private List<BaseWork> nodes;
   private TezWork work;
 
@@ -161,75 +156,4 @@ public class TestTezWork {
       Assert.assertEquals(sorted.get(i), nodes.get(4-i));
     }
   }
-
-  @Test
-  public void testConfigureJars() throws Exception {
-    final JobConf conf = new JobConf();
-    conf.set(MR_JAR_PROPERTY, "file:///tmp/foo1.jar");
-    BaseWork baseWork = Mockito.mock(BaseWork.class);
-    Mockito.doAnswer(new Answer<Void>() {
-
-      @Override
-      public Void answer(InvocationOnMock invocation) throws Throwable {
-        conf.set(MR_JAR_PROPERTY, "file:///tmp/foo2.jar");
-        return null;
-      }
-
-    }).when(baseWork).configureJobConf(conf);
-
-    work.add(baseWork);
-    work.configureJobConfAndExtractJars(conf);
-    Assert.assertEquals("file:///tmp/foo1.jar,file:///tmp/foo2.jar", conf.get(MR_JAR_PROPERTY));
-  }
-
-  @Test
-  public void testConfigureJarsNoExtraJars() throws Exception {
-    final JobConf conf = new JobConf();
-    conf.set(MR_JAR_PROPERTY, "file:///tmp/foo1.jar");
-    BaseWork baseWork = Mockito.mock(BaseWork.class);
-
-    work.add(baseWork);
-    work.configureJobConfAndExtractJars(conf);
-    Assert.assertEquals("file:///tmp/foo1.jar", conf.get(MR_JAR_PROPERTY));
-  }
-
-  @Test
-  public void testConfigureJarsWithNull() throws Exception {
-    final JobConf conf = new JobConf();
-    conf.set(MR_JAR_PROPERTY, "file:///tmp/foo1.jar");
-    BaseWork baseWork = Mockito.mock(BaseWork.class);
-    Mockito.doAnswer(new Answer<Void>() {
-
-      @Override
-      public Void answer(InvocationOnMock invocation) throws Throwable {
-        conf.unset(MR_JAR_PROPERTY);
-        return null;
-      }
-
-    }).when(baseWork).configureJobConf(conf);
-
-    work.add(baseWork);
-    work.configureJobConfAndExtractJars(conf);
-    Assert.assertEquals("file:///tmp/foo1.jar", conf.get(MR_JAR_PROPERTY));
-  }
-
-  @Test
-  public void testConfigureJarsStartingWithNull() throws Exception {
-    final JobConf conf = new JobConf();
-    conf.unset(MR_JAR_PROPERTY);
-    BaseWork baseWork = Mockito.mock(BaseWork.class);
-    Mockito.doAnswer(new Answer<Void>() {
-
-      @Override
-      public Void answer(InvocationOnMock invocation) throws Throwable {
-        conf.setStrings(MR_JAR_PROPERTY, "file:///tmp/foo1.jar", "file:///tmp/foo2.jar");
-        return null;
-      }
-
-    }).when(baseWork).configureJobConf(conf);
-
-    work.add(baseWork);
-    work.configureJobConfAndExtractJars(conf);
-    Assert.assertEquals("file:///tmp/foo1.jar,file:///tmp/foo2.jar", conf.get(MR_JAR_PROPERTY));
-  }
 }

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/TestUDFMath.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/TestUDFMath.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/TestUDFMath.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/TestUDFMath.java Mon Oct  6 03:44:13 2014
@@ -98,6 +98,14 @@ public class TestUDFMath {
     input = createDecimal("7.38905609893065");
     DoubleWritable res = udf.evaluate(input);
     Assert.assertEquals(2.0, res.get(), 0.000001);
+    
+    DoubleWritable input = new DoubleWritable(9.0);
+    res = udf.evaluate(createDecimal("3.0"), input);
+    Assert.assertEquals(2.0, res.get(), 0.000001);
+
+    DoubleWritable base = new DoubleWritable(3.0);
+    res = udf.evaluate(base, createDecimal("9.0"));
+    Assert.assertEquals(2.0, res.get(), 0.000001);
 
     res = udf.evaluate(createDecimal("3.0"), createDecimal("9.0"));
     Assert.assertEquals(2.0, res.get(), 0.000001);

Modified: hive/branches/spark/ql/src/test/queries/clientnegative/acid_overwrite.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientnegative/acid_overwrite.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientnegative/acid_overwrite.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientnegative/acid_overwrite.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;

Modified: hive/branches/spark/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q Mon Oct  6 03:44:13 2014
@@ -5,6 +5,7 @@ set hive.security.authorization.enabled=
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 

Modified: hive/branches/spark/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q Mon Oct  6 03:44:13 2014
@@ -5,6 +5,7 @@ set hive.security.authorization.enabled=
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 

Modified: hive/branches/spark/ql/src/test/queries/clientnegative/update_no_such_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientnegative/update_no_such_table.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientnegative/update_no_such_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientnegative/update_no_such_table.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 

Modified: hive/branches/spark/ql/src/test/queries/clientnegative/update_partition_col.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientnegative/update_partition_col.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientnegative/update_partition_col.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientnegative/update_partition_col.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table foo(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/acid_vectorization.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/acid_vectorization.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/acid_vectorization.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/acid_vectorization.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.vectorized.execution.enabled=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_groupby.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_groupby.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_groupby.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_groupby.q Mon Oct  6 03:44:13 2014
@@ -1,25 +1,4 @@
 set hive.stats.fetch.column.stats=true;
-set hive.map.aggr.hash.percentmemory=0.0f;
-
--- hash aggregation is disabled
-
--- There are different cases for Group By depending on map/reduce side, hash aggregation,
--- grouping sets and column stats. If we don't have column stats, we just assume hash
--- aggregation is disabled. Following are the possible cases and rule for cardinality
--- estimation
-
--- MAP SIDE:
--- Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows
--- Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet
--- Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism)
--- Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet)
--- Case 5: column stats, NO hash aggregation, NO grouping sets — numRows
--- Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet
-
--- REDUCE SIDE:
--- Case 7: NO column stats — numRows / 2
--- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet)
--- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct)
 
 create table if not exists loc_staging (
   state string,
@@ -50,91 +29,71 @@ from ( select state as a, locid as b, co
      ) sq1
 group by a,c;
 
-analyze table loc_orc compute statistics for columns state,locid,year;
+analyze table loc_orc compute statistics for columns state,locid,zip,year;
 
--- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8
--- Case 9: column stats, NO grouping sets - caridnality = 2
+-- only one distinct value in year column + 1 NULL value
+-- map-side GBY: numRows: 8 (map-side will not do any reduction)
+-- reduce-side GBY: numRows: 2
 explain select year from loc_orc group by year;
 
--- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8
--- Case 9: column stats, NO grouping sets - caridnality = 8
+-- map-side GBY: numRows: 8
+-- reduce-side GBY: numRows: 4
 explain select state,locid from loc_orc group by state,locid;
 
--- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32
--- Case 8: column stats, grouping sets - cardinality = 32
+-- map-side GBY numRows: 32 reduce-side GBY numRows: 16
 explain select state,locid from loc_orc group by state,locid with cube;
 
--- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24
--- Case 8: column stats, grouping sets - cardinality = 24
+-- map-side GBY numRows: 24 reduce-side GBY numRows: 12
 explain select state,locid from loc_orc group by state,locid with rollup;
 
--- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8
--- Case 8: column stats, grouping sets - cardinality = 8
+-- map-side GBY numRows: 8 reduce-side GBY numRows: 4
 explain select state,locid from loc_orc group by state,locid grouping sets((state));
 
--- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16
--- Case 8: column stats, grouping sets - cardinality = 16
+-- map-side GBY numRows: 16 reduce-side GBY numRows: 8
 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid));
 
--- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24
--- Case 8: column stats, grouping sets - cardinality = 24
+-- map-side GBY numRows: 24 reduce-side GBY numRows: 12
 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),());
 
--- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32
--- Case 8: column stats, grouping sets - cardinality = 32
+-- map-side GBY numRows: 32 reduce-side GBY numRows: 16
 explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),());
 
-set hive.map.aggr.hash.percentmemory=0.5f;
-set mapred.max.split.size=80;
--- map-side parallelism will be 10
+set hive.stats.map.parallelism=10;
 
--- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4
--- Case 9: column stats, NO grouping sets - caridnality = 2
+-- map-side GBY: numRows: 80 (map-side will not do any reduction)
+-- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2)
 explain select year from loc_orc group by year;
 
--- Case 4: column stats, hash aggregation, grouping sets - cardinality = 16
--- Case 8: column stats, grouping sets - cardinality = 16
+-- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7)
 explain select state,locid from loc_orc group by state,locid with cube;
 
--- ndvProduct becomes 0 as zip does not have column stats
--- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4
--- Case 9: column stats, NO grouping sets - caridnality = 2
-explain select state,zip from loc_orc group by state,zip;
-
-set mapred.max.split.size=1000;
 set hive.stats.fetch.column.stats=false;
+set hive.stats.map.parallelism=1;
 
--- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
--- Case 7: NO column stats - cardinality = 16
+-- map-side GBY numRows: 32 reduce-side GBY numRows: 16
 explain select state,locid from loc_orc group by state,locid with cube;
 
--- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24
--- Case 7: NO column stats - cardinality = 12
+-- map-side GBY numRows: 24 reduce-side GBY numRows: 12
 explain select state,locid from loc_orc group by state,locid with rollup;
 
--- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8
--- Case 7: NO column stats - cardinality = 4
+-- map-side GBY numRows: 8 reduce-side GBY numRows: 4
 explain select state,locid from loc_orc group by state,locid grouping sets((state));
 
--- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 16
--- Case 7: NO column stats - cardinality = 8
+-- map-side GBY numRows: 16 reduce-side GBY numRows: 8
 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid));
 
--- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24
--- Case 7: NO column stats - cardinality = 12
+-- map-side GBY numRows: 24 reduce-side GBY numRows: 12
 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),());
 
--- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
--- Case 7: NO column stats - cardinality = 16
+-- map-side GBY numRows: 32 reduce-side GBY numRows: 16
 explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),());
 
-set mapred.max.split.size=80;
+set hive.stats.map.parallelism=10;
 
--- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8
--- Case 7: NO column stats - cardinality = 4
+-- map-side GBY: numRows: 80 (map-side will not do any reduction)
+-- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2)
 explain select year from loc_orc group by year;
 
--- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
--- Case 7: NO column stats - cardinality = 16
+-- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7)
 explain select state,locid from loc_orc group by state,locid with cube;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_part.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_part.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_part.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_part.q Mon Oct  6 03:44:13 2014
@@ -65,9 +65,6 @@ explain select zip from loc_orc;
 -- basicStatState: COMPLETE colStatState: PARTIAL
 explain select state from loc_orc;
 
--- basicStatState: COMPLETE colStatState: COMPLETE
-explain select year from loc_orc;
-
 -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL
 -- basicStatState: COMPLETE colStatState: PARTIAL
 explain select state,locid from loc_orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/authorization_delete.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/authorization_delete.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/authorization_delete.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/authorization_delete.q Mon Oct  6 03:44:13 2014
@@ -4,6 +4,7 @@ set hive.security.authenticator.manager=
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 set user.name=user1;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/authorization_delete_own_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/authorization_delete_own_table.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/authorization_delete_own_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/authorization_delete_own_table.q Mon Oct  6 03:44:13 2014
@@ -5,6 +5,7 @@ set hive.security.authorization.enabled=
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/authorization_update.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/authorization_update.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/authorization_update.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/authorization_update.q Mon Oct  6 03:44:13 2014
@@ -4,6 +4,7 @@ set hive.security.authenticator.manager=
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 set user.name=user1;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/authorization_update_own_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/authorization_update_own_table.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/authorization_update_own_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/authorization_update_own_table.q Mon Oct  6 03:44:13 2014
@@ -5,6 +5,7 @@ set hive.security.authorization.enabled=
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/create_func1.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/create_func1.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/create_func1.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/create_func1.q Mon Oct  6 03:44:13 2014
@@ -2,16 +2,11 @@
 -- qtest_get_java_boolean should already be created during test initialization
 select qtest_get_java_boolean('true'), qtest_get_java_boolean('false') from src limit 1;
 
-describe function extended qtest_get_java_boolean;
-
 create database mydb;
 create function mydb.func1 as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper';
 
 show functions mydb.func1;
 
-describe function extended mydb.func1;
-
-
 select mydb.func1('abc') from src limit 1;
 
 drop function mydb.func1;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/decimal_udf.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/decimal_udf.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/decimal_udf.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/decimal_udf.q Mon Oct  6 03:44:13 2014
@@ -39,9 +39,6 @@ SELECT key - '1.0' FROM DECIMAL_UDF;
 EXPLAIN SELECT key * key FROM DECIMAL_UDF;
 SELECT key * key FROM DECIMAL_UDF;
 
-EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0;
-SELECT key, value FROM DECIMAL_UDF where key * value > 0;
-
 EXPLAIN SELECT key * value FROM DECIMAL_UDF;
 SELECT key * value FROM DECIMAL_UDF;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_partitioned.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_partitioned.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_orig_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_orig_table.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_orig_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_orig_table.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/delete_orig_table;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_tmp_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_tmp_table.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_tmp_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_tmp_table.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_no_match.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_no_match.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_no_match.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_no_match.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_partitioned.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_partitioned.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_whole_partition.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_whole_partition.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_whole_partition.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_whole_partition.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q Mon Oct  6 03:44:13 2014
@@ -19,9 +19,6 @@ load data local inpath '../../data/files
 load data local inpath '../../data/files/agg_01-p2.txt' into table agg_01 partition (dim_shops_id=2);
 load data local inpath '../../data/files/agg_01-p3.txt' into table agg_01 partition (dim_shops_id=3);
 
-analyze table dim_shops compute statistics;
-analyze table agg_01 partition (dim_shops_id) compute statistics;
-
 select * from dim_shops;
 select * from agg_01;
 
@@ -43,73 +40,6 @@ d1.label in ('foo', 'bar')
 GROUP BY d1.label
 ORDER BY d1.label;
 
-set hive.tez.dynamic.partition.pruning.max.event.size=1000000;
-set hive.tez.dynamic.partition.pruning.max.data.size=1;
-
-EXPLAIN SELECT d1.label, count(*), sum(agg.amount)
-FROM agg_01 agg,
-dim_shops d1
-WHERE agg.dim_shops_id = d1.id
-and
-d1.label in ('foo', 'bar')
-GROUP BY d1.label
-ORDER BY d1.label;
-
-SELECT d1.label, count(*), sum(agg.amount)
-FROM agg_01 agg,
-dim_shops d1
-WHERE agg.dim_shops_id = d1.id
-and
-d1.label in ('foo', 'bar')
-GROUP BY d1.label
-ORDER BY d1.label;
-
-EXPLAIN SELECT d1.label
-FROM agg_01 agg,
-dim_shops d1
-WHERE agg.dim_shops_id = d1.id;
-
-SELECT d1.label
-FROM agg_01 agg,
-dim_shops d1
-WHERE agg.dim_shops_id = d1.id;
-
-EXPLAIN SELECT agg.amount
-FROM agg_01 agg,
-dim_shops d1
-WHERE agg.dim_shops_id = d1.id
-and agg.dim_shops_id = 1;
-
-SELECT agg.amount
-FROM agg_01 agg,
-dim_shops d1
-WHERE agg.dim_shops_id = d1.id
-and agg.dim_shops_id = 1;
-
-set hive.tez.dynamic.partition.pruning.max.event.size=1;
-set hive.tez.dynamic.partition.pruning.max.data.size=1000000;
-
-EXPLAIN SELECT d1.label, count(*), sum(agg.amount)
-FROM agg_01 agg,
-dim_shops d1
-WHERE agg.dim_shops_id = d1.id
-and
-d1.label in ('foo', 'bar')
-GROUP BY d1.label
-ORDER BY d1.label;
-
-SELECT d1.label, count(*), sum(agg.amount)
-FROM agg_01 agg,
-dim_shops d1
-WHERE agg.dim_shops_id = d1.id
-and
-d1.label in ('foo', 'bar')
-GROUP BY d1.label
-ORDER BY d1.label;
-
-set hive.tez.dynamic.partition.pruning.max.event.size=100000;
-set hive.tez.dynamic.partition.pruning.max.data.size=1000000;
-
 EXPLAIN 
 SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo'
 UNION ALL
@@ -117,4 +47,4 @@ SELECT amount FROM agg_01, dim_shops WHE
 
 SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo'
 UNION ALL
-SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar';
+SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar';
\ No newline at end of file

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q Mon Oct  6 03:44:13 2014
@@ -108,13 +108,6 @@ set hive.optimize.sort.dynamic.partition
 explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i;
 set hive.optimize.sort.dynamic.partition=true;
 explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i;
-explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from (select * from over1k_orc order by i limit 10) tmp where t is null or t=27;
-
-set hive.optimize.sort.dynamic.partition=false;
-explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t;
-set hive.optimize.sort.dynamic.partition=true;
--- tests for HIVE-8162, only partition column 't' should be in last RS operator
-explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t;
 
 set hive.optimize.sort.dynamic.partition=false;
 insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q Mon Oct  6 03:44:13 2014
@@ -102,13 +102,6 @@ set hive.optimize.sort.dynamic.partition
 explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i;
 set hive.optimize.sort.dynamic.partition=true;
 explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i;
-explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from (select * from over1k order by i limit 10) tmp where t is null or t=27;
-
-set hive.optimize.sort.dynamic.partition=false;
-explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t;
-set hive.optimize.sort.dynamic.partition=true;
--- tests for HIVE-8162, only partition column 't' should be in last RS operator
-explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t;
 
 set hive.optimize.sort.dynamic.partition=false;
 insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_orig_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_orig_table.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_orig_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_orig_table.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_iot(

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_update_delete.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_update_delete.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_update_delete.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_update_delete.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q Mon Oct  6 03:44:13 2014
@@ -1,6 +1,7 @@
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table ivdp(i int,

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_ivnp(ti tinyint,

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_orig_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_orig_table.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_orig_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_orig_table.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_ivot(

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_partitioned.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_partitioned.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_ivp(ti tinyint,

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_tmp_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_tmp_table.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_tmp_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_tmp_table.q Mon Oct  6 03:44:13 2014
@@ -1,12 +1,12 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc;
 
 insert into table acid_ivtt values 
     (1, 109.23, 'mary had a little lamb'),
-    (429496729, 0.14, 'its fleece was white as snow'),
-    (-29496729, -0.14, 'negative values test');
+    (429496729, 0.14, 'its fleece was white as snow');
 
 select i, de, vc from acid_ivtt order by i;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/parquet_types.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/parquet_types.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/parquet_types.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/parquet_types.q Mon Oct  6 03:44:13 2014
@@ -10,14 +10,9 @@ CREATE TABLE parquet_types_staging (
   cstring1 string,
   t timestamp,
   cchar char(5),
-  cvarchar varchar(10),
-  m1 map<string, varchar(3)>,
-  l1 array<int>,
-  st1 struct<c1:int, c2:char(1)>
+  cvarchar varchar(10)
 ) ROW FORMAT DELIMITED
-FIELDS TERMINATED BY '|'
-COLLECTION ITEMS TERMINATED BY ','
-MAP KEYS TERMINATED BY ':';
+FIELDS TERMINATED BY '|';
 
 CREATE TABLE parquet_types (
   cint int,
@@ -28,10 +23,7 @@ CREATE TABLE parquet_types (
   cstring1 string,
   t timestamp,
   cchar char(5),
-  cvarchar varchar(10),
-  m1 map<string, varchar(3)>,
-  l1 array<int>,
-  st1 struct<c1:int, c2:char(1)>
+  cvarchar varchar(10)
 ) STORED AS PARQUET;
 
 LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging;
@@ -40,8 +32,6 @@ INSERT OVERWRITE TABLE parquet_types SEL
 
 SELECT * FROM parquet_types;
 
-SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar) FROM parquet_types;
-
 SELECT ctinyint,
   MAX(cint),
   MIN(csmallint),

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/partition_wise_fileformat2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/partition_wise_fileformat2.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/partition_wise_fileformat2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/partition_wise_fileformat2.q Mon Oct  6 03:44:13 2014
@@ -1,4 +1,4 @@
--- SORT_BEFORE_DIFF
+
 
 create table partition_test_partitioned(key string, value string) partitioned by (dt string);
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/quote2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/quote2.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/quote2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/quote2.q Mon Oct  6 03:44:13 2014
@@ -10,7 +10,6 @@ SELECT
     'abc\\\\\'',  "abc\\\\\"",
     'abc\\\\\\',  "abc\\\\\\",
     'abc""""\\',  "abc''''\\",
-    'mysql_%\\_\%', 'mysql\\\_\\\\\%',
     "awk '{print NR\"\\t\"$0}'",
     'tab\ttab',   "tab\ttab"
 FROM src
@@ -25,7 +24,6 @@ SELECT
     'abc\\\\\'',  "abc\\\\\"",
     'abc\\\\\\',  "abc\\\\\\",
     'abc""""\\',  "abc''''\\",
-    'mysql_%\\_\%', 'mysql\\\_\\\\\%',
     "awk '{print NR\"\\t\"$0}'",
     'tab\ttab',   "tab\ttab"
 FROM src

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q Mon Oct  6 03:44:13 2014
@@ -1,6 +1,7 @@
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_uami(i int,

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_all_non_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_all_non_partitioned.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_all_non_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_all_non_partitioned.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_all_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_all_partitioned.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_all_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_all_partitioned.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_all_types.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_all_types.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_all_types.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_all_types.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_uat(ti tinyint,
@@ -52,11 +53,4 @@ update acid_uat set
 
 select * from acid_uat order by i;
 
-update acid_uat set
-  ti = ti * 2,
-  si = cast(f as int),
-  d = floor(de)
-  where s = 'aw724t8c5558x2xneC624';
-
 
-select * from acid_uat order by i;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_orig_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_orig_table.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_orig_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_orig_table.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/update_orig_table;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_tmp_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_tmp_table.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_tmp_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_tmp_table.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_utt(a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_two_cols.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_two_cols.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_two_cols.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_two_cols.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_utc(a int, b varchar(128), c float) clustered by (a) into 2 buckets stored as orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_where_no_match.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_where_no_match.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_where_no_match.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_where_no_match.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_wnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_where_non_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_where_non_partitioned.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_where_non_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_where_non_partitioned.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_uwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_where_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_where_partitioned.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_where_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_where_partitioned.q Mon Oct  6 03:44:13 2014
@@ -1,5 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
 
 create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/vector_char_simple.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/vector_char_simple.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/vector_char_simple.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/vector_char_simple.q Mon Oct  6 03:44:13 2014
@@ -41,16 +41,3 @@ order by key desc
 limit 5;
 
 drop table char_2;
-
-
--- Implicit conversion.  Occurs in reduce-side under Tez.
-create table char_3 (
-  field char(12)
-) stored as orc;
-
-explain
-insert into table char_3 select cint from alltypesorc limit 10;
-
-insert into table char_3 select cint from alltypesorc limit 10;
-
-drop table char_3;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/vector_varchar_simple.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/vector_varchar_simple.q?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/vector_varchar_simple.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/vector_varchar_simple.q Mon Oct  6 03:44:13 2014
@@ -1,12 +1,12 @@
 SET hive.vectorized.execution.enabled=true;
-drop table varchar_2;
+drop table char_2;
 
-create table varchar_2 (
+create table char_2 (
   key varchar(10),
   value varchar(20)
 ) stored as orc;
 
-insert overwrite table varchar_2 select * from src;
+insert overwrite table char_2 select * from src;
 
 select key, value
 from src
@@ -14,13 +14,13 @@ order by key asc
 limit 5;
 
 explain select key, value
-from varchar_2
+from char_2
 order by key asc
 limit 5;
 
 -- should match the query from src
 select key, value
-from varchar_2
+from char_2
 order by key asc
 limit 5;
 
@@ -30,26 +30,14 @@ order by key desc
 limit 5;
 
 explain select key, value
-from varchar_2
+from char_2
 order by key desc
 limit 5;
 
 -- should match the query from src
 select key, value
-from varchar_2
+from char_2
 order by key desc
 limit 5;
 
-drop table varchar_2;
-
--- Implicit conversion.  Occurs in reduce-side under Tez.
-create table varchar_3 (
-  field varchar(25)
-) stored as orc;
-
-explain
-insert into table varchar_3 select cint from alltypesorc limit 10;
-
-insert into table varchar_3 select cint from alltypesorc limit 10;
-
-drop table varchar_3;
+drop table char_2;