You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/11/18 01:48:46 UTC

svn commit: r1640263 [8/12] - in /hive/branches/spark: ./ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/ accu...

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java Tue Nov 18 00:48:40 2014
@@ -52,6 +52,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 import java.util.Stack;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
  * Super class for all of the compactor test modules.
@@ -65,7 +66,7 @@ public abstract class CompactorTest {
   protected long sleepTime = 1000;
   protected HiveConf conf;
 
-  private final MetaStoreThread.BooleanPointer stop = new MetaStoreThread.BooleanPointer();
+  private final AtomicBoolean stop = new AtomicBoolean();
   private final File tmpdir;
 
   protected CompactorTest() throws Exception {
@@ -92,7 +93,7 @@ public abstract class CompactorTest {
     startThread('c', true);
   }
 
-  protected void startCleaner(MetaStoreThread.BooleanPointer looped) throws Exception {
+  protected void startCleaner(AtomicBoolean looped) throws Exception {
     startThread('c', false, looped);
   }
 
@@ -190,7 +191,7 @@ public abstract class CompactorTest {
   }
 
   protected void stopThread() {
-    stop.boolVal = true;
+    stop.set(true);
   }
 
   private StorageDescriptor newStorageDescriptor(String location, List<Order> sortCols) {
@@ -218,10 +219,10 @@ public abstract class CompactorTest {
 
   // I can't do this with @Before because I want to be able to control when the thead starts
   private void startThread(char type, boolean stopAfterOne) throws Exception {
-    startThread(type, stopAfterOne, new MetaStoreThread.BooleanPointer());
+    startThread(type, stopAfterOne, new AtomicBoolean());
   }
 
-  private void startThread(char type, boolean stopAfterOne, MetaStoreThread.BooleanPointer looped)
+  private void startThread(char type, boolean stopAfterOne, AtomicBoolean looped)
     throws Exception {
     TxnDbUtil.setConfValues(conf);
     CompactorThread t = null;
@@ -233,7 +234,7 @@ public abstract class CompactorTest {
     }
     t.setThreadId((int) t.getId());
     t.setHiveConf(conf);
-    stop.boolVal = stopAfterOne;
+    stop.set(stopAfterOne);
     t.init(stop, looped);
     if (stopAfterOne) t.run();
     else t.start();

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java Tue Nov 18 00:48:40 2014
@@ -30,6 +30,7 @@ import org.junit.Test;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
  * Tests for the compactor Cleaner thread
@@ -285,12 +286,12 @@ public class TestCleaner extends Compact
     LockRequest req = new LockRequest(components, "me", "localhost");
     LockResponse res = txnHandler.lock(req);
 
-    MetaStoreThread.BooleanPointer looped = new MetaStoreThread.BooleanPointer();
-    looped.boolVal = false;
+    AtomicBoolean looped = new AtomicBoolean();
+    looped.set(false);
     startCleaner(looped);
 
     // Make sure the compactor has a chance to run once
-    while (!looped.boolVal) {
+    while (!looped.get()) {
       Thread.currentThread().sleep(100);
     }
 
@@ -310,9 +311,9 @@ public class TestCleaner extends Compact
 
     // Unlock the previous lock
     txnHandler.unlock(new UnlockRequest(res.getLockid()));
-    looped.boolVal = false;
+    looped.set(false);
 
-    while (!looped.boolVal) {
+    while (!looped.get()) {
       Thread.currentThread().sleep(100);
     }
     stopThread();
@@ -356,12 +357,12 @@ public class TestCleaner extends Compact
     LockRequest req = new LockRequest(components, "me", "localhost");
     LockResponse res = txnHandler.lock(req);
 
-    MetaStoreThread.BooleanPointer looped = new MetaStoreThread.BooleanPointer();
-    looped.boolVal = false;
+    AtomicBoolean looped = new AtomicBoolean();
+    looped.set(false);
     startCleaner(looped);
 
     // Make sure the compactor has a chance to run once
-    while (!looped.boolVal) {
+    while (!looped.get()) {
       Thread.currentThread().sleep(100);
     }
 
@@ -383,9 +384,9 @@ public class TestCleaner extends Compact
 
     // Unlock the previous lock
     txnHandler.unlock(new UnlockRequest(res.getLockid()));
-    looped.boolVal = false;
+    looped.set(false);
 
-    while (!looped.boolVal) {
+    while (!looped.get()) {
       Thread.currentThread().sleep(100);
     }
     stopThread();

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java Tue Nov 18 00:48:40 2014
@@ -243,6 +243,32 @@ public class TestInitiator extends Compa
   }
 
   @Test
+  public void noCompactWhenNoCompactSetLowerCase() throws Exception {
+    Map<String, String> parameters = new HashMap<String, String>(1);
+    parameters.put("no_auto_compaction", "true");
+    Table t = newTable("default", "ncwncs", false, parameters);
+
+    HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD, 10);
+
+    for (int i = 0; i < 11; i++) {
+      long txnid = openTxn();
+      LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
+      comp.setTablename("ncwncs");
+      List<LockComponent> components = new ArrayList<LockComponent>(1);
+      components.add(comp);
+      LockRequest req = new LockRequest(components, "me", "localhost");
+      req.setTxnid(txnid);
+      LockResponse res = txnHandler.lock(req);
+      txnHandler.abortTxn(new AbortTxnRequest(txnid));
+    }
+
+    startInitiator();
+
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    Assert.assertEquals(0, rsp.getCompactsSize());
+  }
+
+  @Test
   public void noCompactWhenCompactAlreadyScheduled() throws Exception {
     Table t = newTable("default", "ncwcas", false);
 

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java Tue Nov 18 00:48:40 2014
@@ -187,7 +187,7 @@ public class TestGenericUDFOPDivide exte
     PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs);
     Assert.assertEquals(TypeInfoFactory.getDecimalTypeInfo(11, 7), oi.getTypeInfo());
     HiveDecimalWritable res = (HiveDecimalWritable) udf.evaluate(args);
-    Assert.assertEquals(HiveDecimal.create("0.0617100"), res.getHiveDecimal());
+    Assert.assertEquals(HiveDecimal.create("0.06171"), res.getHiveDecimal());
   }
 
   @Test

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPNumeric.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPNumeric.java?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPNumeric.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPNumeric.java Tue Nov 18 00:48:40 2014
@@ -1,3 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 package org.apache.hadoop.hive.ql.udf.generic;
 
 import org.apache.hadoop.hive.conf.HiveConf;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/create_like.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/create_like.q?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/create_like.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/create_like.q Tue Nov 18 00:48:40 2014
@@ -64,3 +64,9 @@ DESCRIBE FORMATTED doctors;
 
 CREATE TABLE doctors2 like doctors;
 DESCRIBE FORMATTED doctors2;
+
+CREATE TABLE PropertiedParquetTable(a INT, b STRING) STORED AS PARQUET TBLPROPERTIES("parquet.compression"="LZO");
+CREATE TABLE LikePropertiedParquetTable LIKE PropertiedParquetTable;
+
+DESCRIBE FORMATTED LikePropertiedParquetTable;
+

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/ctas_colname.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/ctas_colname.q?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/ctas_colname.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/ctas_colname.q Tue Nov 18 00:48:40 2014
@@ -26,20 +26,20 @@ select * from x5;
 
 -- sub queries
 explain
-create table x6 as select * from (select *, max(key) from src1) a;
-create table x6 as select * from (select *, max(key) from src1) a;
+create table x6 as select * from (select *, key + 1 from src1) a;
+create table x6 as select * from (select *, key + 1 from src1) a;
 describe formatted x6;
 select * from x6;
 
 explain
-create table x7 as select * from (select * from src group by key) a;
-create table x7 as select * from (select * from src group by key) a;
+create table x7 as select * from (select *, count(value) from src group by key, value) a;
+create table x7 as select * from (select *, count(value) from src group by key, value) a;
 describe formatted x7;
 select * from x7;
 
 explain
-create table x8 as select * from (select * from src group by key having key < 9) a;
-create table x8 as select * from (select * from src group by key having key < 9) a;
+create table x8 as select * from (select *, count(value) from src group by key, value having key < 9) a;
+create table x8 as select * from (select *, count(value) from src group by key, value having key < 9) a;
 describe formatted x8;
 select * from x8;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/database_drop.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/database_drop.q?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/database_drop.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/database_drop.q Tue Nov 18 00:48:40 2014
@@ -75,6 +75,12 @@ CREATE EXTERNAL TABLE extab1(id INT, nam
               STORED AS TEXTFILE
               LOCATION 'file:${system:test.tmp.dir}/dbcascade/extab1';
 
+-- add a table, create index (give a name for index table)
+CREATE TABLE temp_tbl3 (id INT, name STRING);
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' into table temp_tbl3;
+CREATE INDEX temp_tbl3_idx ON TABLE temp_tbl3(id) AS 'COMPACT' with DEFERRED REBUILD IN TABLE temp_tbl3_idx_tbl;
+ALTER INDEX temp_tbl3_idx ON temp_tbl3 REBUILD;
+
 -- drop the database with cascade
 DROP DATABASE db5 CASCADE;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge_incompat1.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge_incompat1.q?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge_incompat1.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge_incompat1.q Tue Nov 18 00:48:40 2014
@@ -19,6 +19,7 @@ insert into table orc_merge5b select use
 set hive.exec.orc.write.format=0.11;
 insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
 insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
+insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
 
 -- 5 files total
 analyze table orc_merge5b compute statistics noscan;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/parquet_create.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/parquet_create.q?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/parquet_create.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/parquet_create.q Tue Nov 18 00:48:40 2014
@@ -28,7 +28,6 @@ SELECT * FROM parquet_create_staging;
 
 INSERT OVERWRITE TABLE parquet_create SELECT * FROM parquet_create_staging;
 
-SELECT * FROM parquet_create group by id;
 SELECT id, count(0) FROM parquet_create group by id;
 SELECT str from parquet_create;
 SELECT mp from parquet_create;

Modified: hive/branches/spark/ql/src/test/resources/orc-file-dump-dictionary-threshold.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/resources/orc-file-dump-dictionary-threshold.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/resources/orc-file-dump-dictionary-threshold.out (original)
+++ hive/branches/spark/ql/src/test/resources/orc-file-dump-dictionary-threshold.out Tue Nov 18 00:48:40 2014
@@ -1,4 +1,5 @@
 Structure for TestFileDump.testDump.orc
+File Version: 0.12 with HIVE_8732
 Rows: 21000
 Compression: ZLIB
 Compression size: 10000
@@ -182,6 +183,6 @@ Stripes:
     Row group index column 3:
       Entry 0: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048-15494-15674-15726-16006-16056-16180-16304-16332-16452-16598-16730-16810-16994-17210-17268-17786-17962-18214-18444-18446-18724-18912-18952-19164-19348-19400-19546-19776-19896-20084 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-
 7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788-19204-19254-19518-19596-19786-19874-19904-20390-20752-20936 positions: 0,0,0,0,0
 
-File length: 2033557 bytes
+File length: 2033559 bytes
 Padding length: 0 bytes
 Padding ratio: 0%

Modified: hive/branches/spark/ql/src/test/resources/orc-file-dump.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/resources/orc-file-dump.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/resources/orc-file-dump.out (original)
+++ hive/branches/spark/ql/src/test/resources/orc-file-dump.out Tue Nov 18 00:48:40 2014
@@ -1,4 +1,5 @@
 Structure for TestFileDump.testDump.orc
+File Version: 0.12 with HIVE_8732
 Rows: 21000
 Compression: ZLIB
 Compression size: 10000
@@ -187,6 +188,6 @@ Stripes:
     Row group index column 3:
       Entry 0: count: 1000 min: Darkness, max: worst positions: 0,0,0
 
-File length: 270760 bytes
+File length: 270762 bytes
 Padding length: 0 bytes
 Padding ratio: 0%

Modified: hive/branches/spark/ql/src/test/results/clientnegative/authorization_caseinsensitivity.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/authorization_caseinsensitivity.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/authorization_caseinsensitivity.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/authorization_caseinsensitivity.q.out Tue Nov 18 00:48:40 2014
@@ -13,7 +13,6 @@ POSTHOOK: type: SHOW_ROLES
 admin
 public
 testrole
-
 PREHOOK: query: drop role TESTROLE
 PREHOOK: type: DROPROLE
 POSTHOOK: query: drop role TESTROLE
@@ -24,7 +23,6 @@ POSTHOOK: query: show roles
 POSTHOOK: type: SHOW_ROLES
 admin
 public
-
 PREHOOK: query: create role TESTROLE
 PREHOOK: type: CREATEROLE
 POSTHOOK: query: create role TESTROLE
@@ -36,7 +34,6 @@ POSTHOOK: type: SHOW_ROLES
 admin
 public
 testrole
-
 PREHOOK: query: grant role testROLE to user hive_admin_user
 PREHOOK: type: GRANT_ROLE
 POSTHOOK: query: grant role testROLE to user hive_admin_user
@@ -56,7 +53,6 @@ POSTHOOK: type: SHOW_ROLES
 admin
 public
 testrole
-
 PREHOOK: query: create role TESTRoLE
 PREHOOK: type: CREATEROLE
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Role testrole already exists.

Modified: hive/branches/spark/ql/src/test/results/clientnegative/authorization_drop_db_cascade.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/authorization_drop_db_cascade.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/authorization_drop_db_cascade.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/authorization_drop_db_cascade.q.out Tue Nov 18 00:48:40 2014
@@ -55,5 +55,4 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 public
-
 FAILED: HiveAccessControlException Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation DROPDATABASE [[OBJECT OWNERSHIP] on Object [type=TABLE_OR_VIEW, name=dba2.tab2]]

Modified: hive/branches/spark/ql/src/test/results/clientnegative/authorization_drop_db_empty.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/authorization_drop_db_empty.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/authorization_drop_db_empty.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/authorization_drop_db_empty.q.out Tue Nov 18 00:48:40 2014
@@ -21,7 +21,6 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 public
-
 PREHOOK: query: drop database dba1
 PREHOOK: type: DROPDATABASE
 PREHOOK: Input: database:dba1
@@ -37,7 +36,6 @@ POSTHOOK: query: -- check if dropping db
 show current roles
 POSTHOOK: type: SHOW_ROLES
 public
-
 PREHOOK: query: create database dba2
 PREHOOK: type: CREATEDATABASE
 PREHOOK: Output: database:dba2
@@ -49,5 +47,4 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 public
-
 FAILED: HiveAccessControlException Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation DROPDATABASE [[OBJECT OWNERSHIP] on Object [type=DATABASE, name=dba2]]

Modified: hive/branches/spark/ql/src/test/results/clientnegative/authorization_drop_role_no_admin.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/authorization_drop_role_no_admin.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/authorization_drop_role_no_admin.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/authorization_drop_role_no_admin.q.out Tue Nov 18 00:48:40 2014
@@ -7,7 +7,6 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 admin
-
 PREHOOK: query: create role r1
 PREHOOK: type: CREATEROLE
 POSTHOOK: query: create role r1
@@ -21,7 +20,6 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 public
-
 PREHOOK: query: drop role r1
 PREHOOK: type: DROPROLE
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Current user : hive_admin_user is not allowed to drop role. User has to belong to ADMIN role and have it as current role, for this action.

Modified: hive/branches/spark/ql/src/test/results/clientnegative/authorization_priv_current_role_neg.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/authorization_priv_current_role_neg.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/authorization_priv_current_role_neg.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/authorization_priv_current_role_neg.q.out Tue Nov 18 00:48:40 2014
@@ -42,7 +42,6 @@ show current roles
 POSTHOOK: type: SHOW_ROLES
 public
 role2
-
 PREHOOK: query: grant all on table tpriv_current_role to user user3
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@tpriv_current_role

Modified: hive/branches/spark/ql/src/test/results/clientnegative/authorization_role_case.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/authorization_role_case.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/authorization_role_case.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/authorization_role_case.q.out Tue Nov 18 00:48:40 2014
@@ -14,7 +14,6 @@ admin
 mixCaseRole1
 mixCaseRole2
 public
-
 PREHOOK: query: create table t1(i int)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default

Modified: hive/branches/spark/ql/src/test/results/clientnegative/authorization_rolehierarchy_privs.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/authorization_rolehierarchy_privs.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/authorization_rolehierarchy_privs.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/authorization_rolehierarchy_privs.q.out Tue Nov 18 00:48:40 2014
@@ -3,7 +3,6 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 public
-
 PREHOOK: query: set role ADMIN
 PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: set role ADMIN
@@ -64,7 +63,6 @@ public
 role1
 role2
 role3
-
 PREHOOK: query: select * from t1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
@@ -78,7 +76,6 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 public
-
 PREHOOK: query: grant select on t1 to role role2
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@t1
@@ -93,7 +90,6 @@ public
 role1
 role2
 role3
-
 PREHOOK: query: select * from t1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
@@ -111,7 +107,6 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 admin
-
 PREHOOK: query: revoke select on table t1 from role role2
 PREHOOK: type: REVOKE_PRIVILEGE
 PREHOOK: Output: default@t1
@@ -139,7 +134,6 @@ role1
 role2
 role3
 role4
-
 PREHOOK: query: select * from t1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
@@ -153,7 +147,6 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 public
-
 PREHOOK: query: set role ADMIN
 PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: set role ADMIN
@@ -175,7 +168,6 @@ role1
 role2
 role3
 role4
-
 PREHOOK: query: select * from t1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
@@ -189,7 +181,6 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 public
-
 PREHOOK: query: set role ADMIN
 PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: set role ADMIN
@@ -206,5 +197,4 @@ public
 role1
 role2
 role4
-
 FAILED: HiveAccessControlException Permission denied: Principal [name=user1, type=USER] does not have following privileges for operation QUERY [[SELECT] on Object [type=TABLE_OR_VIEW, name=default.t1]]

Modified: hive/branches/spark/ql/src/test/results/clientnegative/date_literal2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/date_literal2.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/date_literal2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/date_literal2.q.out Tue Nov 18 00:48:40 2014
@@ -1 +1 @@
-FAILED: SemanticException Unable to convert date literal string to date value.
+FAILED: SemanticException Unable to convert time literal '2001/01/01' to time value.

Modified: hive/branches/spark/ql/src/test/results/clientnegative/date_literal3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/date_literal3.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/date_literal3.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/date_literal3.q.out Tue Nov 18 00:48:40 2014
@@ -1 +1 @@
-FAILED: SemanticException Unable to convert date literal string to date value.
+FAILED: SemanticException Unable to convert time literal '2001-01-32' to time value.

Modified: hive/branches/spark/ql/src/test/results/clientnegative/illegal_partition_type4.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/illegal_partition_type4.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/illegal_partition_type4.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/illegal_partition_type4.q.out Tue Nov 18 00:48:40 2014
@@ -6,4 +6,4 @@ POSTHOOK: query: create table tab1(s str
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@tab1
-FAILED: SemanticException Unable to convert date literal string to date value.
+FAILED: SemanticException Unable to convert time literal 'foo' to time value.

Copied: hive/branches/spark/ql/src/test/results/clientpositive/acid_join.q.out (from r1637419, hive/trunk/ql/src/test/results/clientpositive/acid_join.q.out)
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/acid_join.q.out?p2=hive/branches/spark/ql/src/test/results/clientpositive/acid_join.q.out&p1=hive/trunk/ql/src/test/results/clientpositive/acid_join.q.out&r1=1637419&r2=1640263&rev=1640263&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/acid_join.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/acid_join.q.out Tue Nov 18 00:48:40 2014
@@ -65,7 +65,7 @@ POSTHOOK: query: select * from acidjoin3
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acidjoin3
 #### A masked pattern was here ####
-aaa	35	3.00
+aaa	35	3
 bbb	32	3.01
 ccc	32	3.02
 ddd	35	3.03

Modified: hive/branches/spark/ql/src/test/results/clientpositive/alter_merge_orc.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/alter_merge_orc.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/alter_merge_orc.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/alter_merge_orc.q.out Tue Nov 18 00:48:40 2014
@@ -48,9 +48,9 @@ columns:struct columns { i32 key, string
 partitioned:false
 partitionColumns:
 totalNumberFiles:3
-totalFileSize:7488
-maxFileSize:2496
-minFileSize:2496
+totalFileSize:7494
+maxFileSize:2498
+minFileSize:2498
 #### A masked pattern was here ####
 
 PREHOOK: query: select count(1) from src_orc_merge_test
@@ -91,9 +91,9 @@ columns:struct columns { i32 key, string
 partitioned:false
 partitionColumns:
 totalNumberFiles:1
-totalFileSize:7167
-maxFileSize:7167
-minFileSize:7167
+totalFileSize:7169
+maxFileSize:7169
+minFileSize:7169
 #### A masked pattern was here ####
 
 PREHOOK: query: select count(1) from src_orc_merge_test
@@ -171,9 +171,9 @@ columns:struct columns { i32 key, string
 partitioned:true
 partitionColumns:struct partition_columns { string ds}
 totalNumberFiles:3
-totalFileSize:7488
-maxFileSize:2496
-minFileSize:2496
+totalFileSize:7494
+maxFileSize:2498
+minFileSize:2498
 #### A masked pattern was here ####
 
 PREHOOK: query: select count(1) from src_orc_merge_test_part
@@ -218,9 +218,9 @@ columns:struct columns { i32 key, string
 partitioned:true
 partitionColumns:struct partition_columns { string ds}
 totalNumberFiles:1
-totalFileSize:7167
-maxFileSize:7167
-minFileSize:7167
+totalFileSize:7169
+maxFileSize:7169
+minFileSize:7169
 #### A masked pattern was here ####
 
 PREHOOK: query: select count(1) from src_orc_merge_test_part

Modified: hive/branches/spark/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out Tue Nov 18 00:48:40 2014
@@ -48,9 +48,9 @@ columns:struct columns { i32 key, string
 partitioned:false
 partitionColumns:
 totalNumberFiles:3
-totalFileSize:7488
-maxFileSize:2496
-minFileSize:2496
+totalFileSize:7494
+maxFileSize:2498
+minFileSize:2498
 #### A masked pattern was here ####
 
 PREHOOK: query: desc extended src_orc_merge_test_stat
@@ -94,7 +94,7 @@ Table Parameters:	 	 
 	numFiles            	3                   
 	numRows             	1500                
 	rawDataSize         	141000              
-	totalSize           	7488                
+	totalSize           	7494                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -146,7 +146,7 @@ Table Parameters:	 	 
 	numFiles            	1                   
 	numRows             	1500                
 	rawDataSize         	141000              
-	totalSize           	7167                
+	totalSize           	7169                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -216,9 +216,9 @@ columns:struct columns { i32 key, string
 partitioned:true
 partitionColumns:struct partition_columns { string ds}
 totalNumberFiles:3
-totalFileSize:7488
-maxFileSize:2496
-minFileSize:2496
+totalFileSize:7494
+maxFileSize:2498
+minFileSize:2498
 #### A masked pattern was here ####
 
 PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011')
@@ -249,7 +249,7 @@ Partition Parameters:	 	 
 	numFiles            	3                   
 	numRows             	1500                
 	rawDataSize         	141000              
-	totalSize           	7488                
+	totalSize           	7494                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -300,7 +300,7 @@ Partition Parameters:	 	 
 	numFiles            	3                   
 	numRows             	1500                
 	rawDataSize         	141000              
-	totalSize           	7488                
+	totalSize           	7494                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -359,7 +359,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	1500                
 	rawDataSize         	141000              
-	totalSize           	7167                
+	totalSize           	7169                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

Modified: hive/branches/spark/ql/src/test/results/clientpositive/alter_partition_change_col.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/alter_partition_change_col.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/alter_partition_change_col.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/alter_partition_change_col.q.out Tue Nov 18 00:48:40 2014
@@ -280,7 +280,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123
 #### A masked pattern was here ####
-Beck	0.0	abc	123
+Beck	0	abc	123
 Beck	77.341	abc	123
 Beck	79.9	abc	123
 Cluck	5.96	abc	123
@@ -288,7 +288,7 @@ Mary	33.33	abc	123
 Mary	4.329	abc	123
 Snow	55.71	abc	123
 Tom	-12.25	abc	123
-Tom	19.00	abc	123
+Tom	19	abc	123
 Tom	234.79	abc	123
 PREHOOK: query: select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__'
 PREHOOK: type: QUERY
@@ -347,7 +347,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123
 #### A masked pattern was here ####
-Beck	0.0	abc	123
+Beck	0	abc	123
 Beck	77.341	abc	123
 Beck	79.9	abc	123
 Cluck	5.96	abc	123
@@ -355,7 +355,7 @@ Mary	33.33	abc	123
 Mary	4.329	abc	123
 Snow	55.71	abc	123
 Tom	-12.25	abc	123
-Tom	19.00	abc	123
+Tom	19	abc	123
 Tom	234.79	abc	123
 PREHOOK: query: select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__'
 PREHOOK: type: QUERY
@@ -367,7 +367,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0.0	__HIVE_DEFAULT_PARTITION__	123
+Beck	0	__HIVE_DEFAULT_PARTITION__	123
 Beck	77.341	__HIVE_DEFAULT_PARTITION__	123
 Beck	79.9	__HIVE_DEFAULT_PARTITION__	123
 Cluck	5.96	__HIVE_DEFAULT_PARTITION__	123
@@ -375,7 +375,7 @@ Mary	33.33	__HIVE_DEFAULT_PARTITION__	12
 Mary	4.329	__HIVE_DEFAULT_PARTITION__	123
 Snow	55.71	__HIVE_DEFAULT_PARTITION__	123
 Tom	-12.25	__HIVE_DEFAULT_PARTITION__	123
-Tom	19.00	__HIVE_DEFAULT_PARTITION__	123
+Tom	19	__HIVE_DEFAULT_PARTITION__	123
 Tom	234.79	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: -- Try out replace columns
 alter table alter_partition_change_col1 partition (p1='abc', p2='123') replace columns (c1 string)
@@ -449,7 +449,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0.0	__HIVE_DEFAULT_PARTITION__	123
+Beck	0	__HIVE_DEFAULT_PARTITION__	123
 Beck	77.341	__HIVE_DEFAULT_PARTITION__	123
 Beck	79.9	__HIVE_DEFAULT_PARTITION__	123
 Cluck	5.96	__HIVE_DEFAULT_PARTITION__	123
@@ -457,7 +457,7 @@ Mary	33.33	__HIVE_DEFAULT_PARTITION__	12
 Mary	4.329	__HIVE_DEFAULT_PARTITION__	123
 Snow	55.71	__HIVE_DEFAULT_PARTITION__	123
 Tom	-12.25	__HIVE_DEFAULT_PARTITION__	123
-Tom	19.00	__HIVE_DEFAULT_PARTITION__	123
+Tom	19	__HIVE_DEFAULT_PARTITION__	123
 Tom	234.79	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: alter table alter_partition_change_col1 replace columns (c1 string)
 PREHOOK: type: ALTERTABLE_REPLACECOLS
@@ -593,7 +593,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0.0	__HIVE_DEFAULT_PARTITION__	123
+Beck	0	__HIVE_DEFAULT_PARTITION__	123
 Beck	77.341	__HIVE_DEFAULT_PARTITION__	123
 Beck	79.9	__HIVE_DEFAULT_PARTITION__	123
 Cluck	5.96	__HIVE_DEFAULT_PARTITION__	123
@@ -601,7 +601,7 @@ Mary	33.33	__HIVE_DEFAULT_PARTITION__	12
 Mary	4.329	__HIVE_DEFAULT_PARTITION__	123
 Snow	55.71	__HIVE_DEFAULT_PARTITION__	123
 Tom	-12.25	__HIVE_DEFAULT_PARTITION__	123
-Tom	19.00	__HIVE_DEFAULT_PARTITION__	123
+Tom	19	__HIVE_DEFAULT_PARTITION__	123
 Tom	234.79	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: alter table alter_partition_change_col1 partition (p1='abc', p2='123') add columns (c2 decimal(14,4))
 PREHOOK: type: ALTERTABLE_ADDCOLS
@@ -638,7 +638,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123
 #### A masked pattern was here ####
-Beck	0.0	abc	123
+Beck	0	abc	123
 Beck	77.341	abc	123
 Beck	79.9	abc	123
 Cluck	5.96	abc	123
@@ -646,7 +646,7 @@ Mary	33.33	abc	123
 Mary	4.329	abc	123
 Snow	55.71	abc	123
 Tom	-12.25	abc	123
-Tom	19.00	abc	123
+Tom	19	abc	123
 Tom	234.79	abc	123
 PREHOOK: query: select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__'
 PREHOOK: type: QUERY
@@ -658,7 +658,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0.0	__HIVE_DEFAULT_PARTITION__	123
+Beck	0	__HIVE_DEFAULT_PARTITION__	123
 Beck	77.341	__HIVE_DEFAULT_PARTITION__	123
 Beck	79.9	__HIVE_DEFAULT_PARTITION__	123
 Cluck	5.96	__HIVE_DEFAULT_PARTITION__	123
@@ -666,7 +666,7 @@ Mary	33.33	__HIVE_DEFAULT_PARTITION__	12
 Mary	4.329	__HIVE_DEFAULT_PARTITION__	123
 Snow	55.71	__HIVE_DEFAULT_PARTITION__	123
 Tom	-12.25	__HIVE_DEFAULT_PARTITION__	123
-Tom	19.00	__HIVE_DEFAULT_PARTITION__	123
+Tom	19	__HIVE_DEFAULT_PARTITION__	123
 Tom	234.79	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: -- Try changing column for all partitions at once
 alter table alter_partition_change_col1 partition (p1, p2='123') change column c2 c2 decimal(10,0)

Modified: hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_part.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_part.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_part.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_part.q.out Tue Nov 18 00:48:40 2014
@@ -98,11 +98,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 5 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 5 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 5 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 5 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL
             ListSink
 
 PREHOOK: query: -- partition level analyze statistics for specific parition
@@ -135,11 +135,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 2 Data size: 323 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2 Data size: 325 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), '__HIVE_DEFAULT_PARTITION__' (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 2 Data size: 323 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 2 Data size: 325 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: -- basicStatState: PARTIAL colStatState: NONE
@@ -158,11 +158,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 9 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 9 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 9 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 9 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL
             ListSink
 
 PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE
@@ -181,11 +181,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 7 Data size: 402 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), '2001' (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 7 Data size: 402 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: -- partition level analyze statistics for all partitions
@@ -222,11 +222,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 1 Data size: 323 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 325 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), '__HIVE_DEFAULT_PARTITION__' (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 1 Data size: 323 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 325 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE
@@ -245,11 +245,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL
             ListSink
 
 PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE
@@ -268,11 +268,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL
             ListSink
 
 PREHOOK: query: -- both partitions will be pruned
@@ -331,11 +331,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: zip (type: bigint)
             outputColumnNames: _col0
-            Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL
@@ -354,7 +354,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: state (type: string)
             outputColumnNames: _col0
@@ -377,7 +377,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: year (type: string)
             outputColumnNames: _col0
@@ -402,7 +402,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: state (type: string), locid (type: int)
             outputColumnNames: _col0, _col1
@@ -425,7 +425,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 7 Data size: 402 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: state (type: string), locid (type: int)
             outputColumnNames: _col0, _col1
@@ -448,11 +448,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 1 Data size: 323 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 325 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: state (type: string), locid (type: int)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 1 Data size: 323 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 325 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL
@@ -471,11 +471,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: loc_orc
-          Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL
             ListSink
 
 PREHOOK: query: -- This is to test filter expression evaluation on partition column
@@ -496,7 +496,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 7 Data size: 402 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (locid > 0) (type: boolean)
               Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
@@ -532,7 +532,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 7 Data size: 402 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (locid > 0) (type: boolean)
               Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
@@ -568,7 +568,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: loc_orc
-            Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 7 Data size: 402 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (locid > 0) (type: boolean)
               Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE

Modified: hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_table.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_table.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_table.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_table.q.out Tue Nov 18 00:48:40 2014
@@ -89,11 +89,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: emp_orc
-          Statistics: Num rows: 3 Data size: 364 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 3 Data size: 366 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: lastname (type: string), deptid (type: int)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 3 Data size: 364 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 3 Data size: 366 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: -- table level analyze statistics
@@ -122,11 +122,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: emp_orc
-          Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: lastname (type: string), deptid (type: int)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: -- column level partial statistics
@@ -155,11 +155,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: emp_orc
-          Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: lastname (type: string), deptid (type: int)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: PARTIAL
             ListSink
 
 PREHOOK: query: -- all selected columns have statistics
@@ -180,7 +180,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: emp_orc
-          Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: deptid (type: int)
             outputColumnNames: _col0
@@ -213,11 +213,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: emp_orc
-          Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: lastname (type: string), deptid (type: int)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE
@@ -236,7 +236,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: emp_orc
-          Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: lastname (type: string)
             outputColumnNames: _col0
@@ -259,7 +259,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: emp_orc
-          Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: deptid (type: int)
             outputColumnNames: _col0
@@ -282,7 +282,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: emp_orc
-          Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: lastname (type: string), deptid (type: int)
             outputColumnNames: _col0, _col1

Modified: hive/branches/spark/ql/src/test/results/clientpositive/authorization_admin_almighty1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/authorization_admin_almighty1.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/authorization_admin_almighty1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/authorization_admin_almighty1.q.out Tue Nov 18 00:48:40 2014
@@ -15,7 +15,6 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 public
-
 PREHOOK: query: set role ADMIN
 PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: set role ADMIN
@@ -25,7 +24,6 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 admin
-
 PREHOOK: query: select * from t1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1

Modified: hive/branches/spark/ql/src/test/results/clientpositive/authorization_grant_public_role.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/authorization_grant_public_role.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/authorization_grant_public_role.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/authorization_grant_public_role.q.out Tue Nov 18 00:48:40 2014
@@ -41,7 +41,6 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: SHOW CURRENT ROLES
 POSTHOOK: type: SHOW_ROLES
 public
-
 PREHOOK: query: -- user2 should be able to do a describe table, as pubic is in the current roles
 DESC t_gpr1
 PREHOOK: type: DESCTABLE

Modified: hive/branches/spark/ql/src/test/results/clientpositive/authorization_index.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/authorization_index.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/authorization_index.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/authorization_index.q.out Tue Nov 18 00:48:40 2014
@@ -43,8 +43,6 @@ Compressed:         	No                 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[Order(col:a, order:1)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: alter index t1_index on t1 rebuild
 PREHOOK: type: ALTERINDEX_REBUILD
 PREHOOK: Input: default@t1

Modified: hive/branches/spark/ql/src/test/results/clientpositive/authorization_role_grant1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/authorization_role_grant1.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/authorization_role_grant1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/authorization_role_grant1.q.out Tue Nov 18 00:48:40 2014
@@ -29,7 +29,6 @@ POSTHOOK: type: SHOW_ROLES
 admin
 public
 src_role2
-
 PREHOOK: query: -- revoke role without role keyword
 revoke src_rolE2 from user user2
 PREHOOK: type: REVOKE_ROLE
@@ -48,7 +47,6 @@ POSTHOOK: type: SHOW_ROLES
 admin
 public
 src_role2
-
 PREHOOK: query: ----------------------------------------
 -- role granting without role keyword, with admin option (syntax check)
 ----------------------------------------
@@ -104,7 +102,6 @@ admin
 public
 src_role2
 src_role_wadmin
-
 PREHOOK: query: drop role Src_role2
 PREHOOK: type: DROPROLE
 POSTHOOK: query: drop role Src_role2
@@ -116,7 +113,6 @@ POSTHOOK: type: SHOW_ROLES
 admin
 public
 src_role_wadmin
-
 PREHOOK: query: drop role sRc_role_wadmin
 PREHOOK: type: DROPROLE
 POSTHOOK: query: drop role sRc_role_wadmin
@@ -127,4 +123,3 @@ POSTHOOK: query: show roles
 POSTHOOK: type: SHOW_ROLES
 admin
 public
-

Modified: hive/branches/spark/ql/src/test/results/clientpositive/authorization_set_show_current_role.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/authorization_set_show_current_role.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/authorization_set_show_current_role.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/authorization_set_show_current_role.q.out Tue Nov 18 00:48:40 2014
@@ -7,7 +7,6 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 admin
-
 PREHOOK: query: create role r1
 PREHOOK: type: CREATEROLE
 POSTHOOK: query: create role r1
@@ -25,7 +24,6 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 r1
-
 PREHOOK: query: set role PUBLIC
 PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: set role PUBLIC
@@ -35,7 +33,6 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 public
-
 PREHOOK: query: set role ALL
 PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: set role ALL
@@ -46,7 +43,6 @@ POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 public
 r1
-
 PREHOOK: query: set role ADMIN
 PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: set role ADMIN

Modified: hive/branches/spark/ql/src/test/results/clientpositive/authorization_view_sqlstd.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/authorization_view_sqlstd.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/authorization_view_sqlstd.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/authorization_view_sqlstd.q.out Tue Nov 18 00:48:40 2014
@@ -180,7 +180,6 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 public
-
 PREHOOK: query: set role ADMIN
 PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: set role ADMIN
@@ -206,7 +205,6 @@ POSTHOOK: type: SHOW_ROLES
 admin
 public
 role_v
-
 PREHOOK: query: grant all on table vt2 to role role_v
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@vt2

Modified: hive/branches/spark/ql/src/test/results/clientpositive/avro_decimal.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/avro_decimal.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/avro_decimal.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/avro_decimal.q.out Tue Nov 18 00:48:40 2014
@@ -106,9 +106,9 @@ Mary	4.33
 Cluck	5.96
 Tom	-12.25
 Mary	33.33
-Tom	19.00
-Beck	0.00
-Beck	79.90
+Tom	19
+Beck	0
+Beck	79.9
 PREHOOK: query: DROP TABLE IF EXISTS avro_dec1
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS avro_dec1
@@ -175,10 +175,10 @@ POSTHOOK: Input: default@avro_dec1
 77.3
 55.7
 4.3
-6.0
+6
 12.3
 33.3
-19.0
+19
 3.2
 79.9
 PREHOOK: query: DROP TABLE dec

Modified: hive/branches/spark/ql/src/test/results/clientpositive/avro_decimal_native.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/avro_decimal_native.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/avro_decimal_native.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/avro_decimal_native.q.out Tue Nov 18 00:48:40 2014
@@ -92,9 +92,9 @@ Mary	4.33
 Cluck	5.96
 Tom	-12.25
 Mary	33.33
-Tom	19.00
-Beck	0.00
-Beck	79.90
+Tom	19
+Beck	0
+Beck	79.9
 PREHOOK: query: DROP TABLE IF EXISTS avro_dec1
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS avro_dec1
@@ -143,10 +143,10 @@ POSTHOOK: Input: default@avro_dec1
 77.3
 55.7
 4.3
-6.0
+6
 12.3
 33.3
-19.0
+19
 3.2
 79.9
 PREHOOK: query: DROP TABLE dec

Modified: hive/branches/spark/ql/src/test/results/clientpositive/char_pad_convert.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/char_pad_convert.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/char_pad_convert.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/char_pad_convert.q.out Tue Nov 18 00:48:40 2014
@@ -144,7 +144,7 @@ select lpad(f, 4, ' '),
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1k
 #### A masked pattern was here ####
-74.7	42	zzzzzTRUE	20	dd45.40	yard du
+74.7	42	zzzzzTRUE	20	ddd45.4	yard du
 26.4	37	zzzzzTRUE	20	dd29.62	history
 96.9	18	zzzzFALSE	20	dd27.32	history
 13.0	34	zzzzFALSE	20	dd23.91	topolog
@@ -190,7 +190,7 @@ POSTHOOK: query: select rpad(f, 4, ' '),
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1k
 #### A masked pattern was here ####
-74.7	42	TRUEzzzzz	20	45.40dd	yard du
+74.7	42	TRUEzzzzz	20	45.4ddd	yard du
 26.4	37	TRUEzzzzz	20	29.62dd	history
 96.9	18	FALSEzzzz	20	27.32dd	history
 13.0	34	FALSEzzzz	20	23.91dd	topolog

Modified: hive/branches/spark/ql/src/test/results/clientpositive/create_like.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/create_like.q.out?rev=1640263&r1=1640262&r2=1640263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/create_like.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/create_like.q.out Tue Nov 18 00:48:40 2014
@@ -405,3 +405,51 @@ Bucket Columns:     	[]                 
 Sort Columns:       	[]                  	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
+PREHOOK: query: CREATE TABLE PropertiedParquetTable(a INT, b STRING) STORED AS PARQUET TBLPROPERTIES("parquet.compression"="LZO")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@PropertiedParquetTable
+POSTHOOK: query: CREATE TABLE PropertiedParquetTable(a INT, b STRING) STORED AS PARQUET TBLPROPERTIES("parquet.compression"="LZO")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@PropertiedParquetTable
+PREHOOK: query: CREATE TABLE LikePropertiedParquetTable LIKE PropertiedParquetTable
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@LikePropertiedParquetTable
+POSTHOOK: query: CREATE TABLE LikePropertiedParquetTable LIKE PropertiedParquetTable
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@LikePropertiedParquetTable
+PREHOOK: query: DESCRIBE FORMATTED LikePropertiedParquetTable
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@likepropertiedparquettable
+POSTHOOK: query: DESCRIBE FORMATTED LikePropertiedParquetTable
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@likepropertiedparquettable
+# col_name            	data_type           	comment             
+	 	 
+a                   	int                 	                    
+b                   	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Protect Mode:       	None                	 
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	parquet.compression 	LZO                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1