You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/11/07 21:41:45 UTC

svn commit: r1637444 [3/20] - in /hive/branches/spark: ./ cli/src/test/org/apache/hadoop/hive/cli/ common/ common/src/java/org/apache/hadoop/hive/common/type/ common/src/java/org/apache/hadoop/hive/conf/ common/src/test/org/apache/hadoop/hive/conf/ com...

Modified: hive/branches/spark/metastore/scripts/upgrade/mssql/002-HIVE-7784.mssql.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/mssql/002-HIVE-7784.mssql.sql?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/mssql/002-HIVE-7784.mssql.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/mssql/002-HIVE-7784.mssql.sql Fri Nov  7 20:41:34 2014
@@ -1 +1,32 @@
+--
+-- Create the table if it doesn't exist.
+--
+if not exists (SELECT 1 FROM INFORMATION_SCHEMA.TABLES 
+           WHERE TABLE_NAME='PART_COL_STATS')
+CREATE TABLE PART_COL_STATS
+(
+    CS_ID bigint NOT NULL,
+    AVG_COL_LEN float NULL,
+    "COLUMN_NAME" varchar(128) NOT NULL,
+    COLUMN_TYPE varchar(128) NOT NULL,
+    DB_NAME varchar(128) NOT NULL,
+    BIG_DECIMAL_HIGH_VALUE varchar(255) NULL,
+    BIG_DECIMAL_LOW_VALUE varchar(255) NULL,
+    DOUBLE_HIGH_VALUE float NULL,
+    DOUBLE_LOW_VALUE float NULL,
+    LAST_ANALYZED bigint NOT NULL,
+    LONG_HIGH_VALUE bigint NULL,
+    LONG_LOW_VALUE bigint NULL,
+    MAX_COL_LEN bigint NULL,
+    NUM_DISTINCTS bigint NULL,
+    NUM_FALSES bigint NULL,
+    NUM_NULLS bigint NOT NULL,
+    NUM_TRUES bigint NULL,
+    PART_ID bigint NULL,
+    PARTITION_NAME varchar(767) NOT NULL,
+    "TABLE_NAME" varchar(128) NOT NULL
+)
+go
+
+
 CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);

Modified: hive/branches/spark/metastore/scripts/upgrade/mssql/hive-schema-0.14.0.mssql.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/mssql/hive-schema-0.14.0.mssql.sql?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/mssql/hive-schema-0.14.0.mssql.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/mssql/hive-schema-0.14.0.mssql.sql Fri Nov  7 20:41:34 2014
@@ -140,7 +140,7 @@ CREATE TABLE PARTITIONS
     PART_ID bigint NOT NULL,
     CREATE_TIME int NOT NULL,
     LAST_ACCESS_TIME int NOT NULL,
-    PART_NAME varchar(767) NULL,
+    PART_NAME nvarchar(767) NULL,
     SD_ID bigint NULL,
     TBL_ID bigint NULL
 );
@@ -371,7 +371,7 @@ CREATE TABLE SDS
     INPUT_FORMAT varchar(4000) NULL,
     IS_COMPRESSED bit NOT NULL,
     IS_STOREDASSUBDIRECTORIES bit NOT NULL,
-    LOCATION varchar(4000) NULL,
+    LOCATION nvarchar(4000) NULL,
     NUM_BUCKETS int NOT NULL,
     OUTPUT_FORMAT varchar(4000) NULL,
     SERDE_ID bigint NULL
@@ -437,7 +437,7 @@ ALTER TABLE SKEWED_STRING_LIST_VALUES AD
 CREATE TABLE PARTITION_KEY_VALS
 (
     PART_ID bigint NOT NULL,
-    PART_KEY_VAL varchar(255) NULL,
+    PART_KEY_VAL nvarchar(255) NULL,
     INTEGER_IDX int NOT NULL
 );
 

Modified: hive/branches/spark/metastore/scripts/upgrade/mssql/hive-schema-0.15.0.mssql.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/mssql/hive-schema-0.15.0.mssql.sql?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/mssql/hive-schema-0.15.0.mssql.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/mssql/hive-schema-0.15.0.mssql.sql Fri Nov  7 20:41:34 2014
@@ -140,7 +140,7 @@ CREATE TABLE PARTITIONS
     PART_ID bigint NOT NULL,
     CREATE_TIME int NOT NULL,
     LAST_ACCESS_TIME int NOT NULL,
-    PART_NAME varchar(767) NULL,
+    PART_NAME nvarchar(767) NULL,
     SD_ID bigint NULL,
     TBL_ID bigint NULL
 );
@@ -371,7 +371,7 @@ CREATE TABLE SDS
     INPUT_FORMAT varchar(4000) NULL,
     IS_COMPRESSED bit NOT NULL,
     IS_STOREDASSUBDIRECTORIES bit NOT NULL,
-    LOCATION varchar(4000) NULL,
+    LOCATION nvarchar(4000) NULL,
     NUM_BUCKETS int NOT NULL,
     OUTPUT_FORMAT varchar(4000) NULL,
     SERDE_ID bigint NULL
@@ -437,7 +437,7 @@ ALTER TABLE SKEWED_STRING_LIST_VALUES AD
 CREATE TABLE PARTITION_KEY_VALS
 (
     PART_ID bigint NOT NULL,
-    PART_KEY_VAL varchar(255) NULL,
+    PART_KEY_VAL nvarchar(255) NULL,
     INTEGER_IDX int NOT NULL
 );
 

Modified: hive/branches/spark/metastore/scripts/upgrade/mssql/hive-txn-schema-0.14.0.mssql.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/mssql/hive-txn-schema-0.14.0.mssql.sql?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/mssql/hive-txn-schema-0.14.0.mssql.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/mssql/hive-txn-schema-0.14.0.mssql.sql Fri Nov  7 20:41:34 2014
@@ -1,101 +1 @@
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements.  See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the License); you may not use this file except in compliance with
--- the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an AS IS BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
---
--- Tables for transaction management
--- 
-
-CREATE TABLE COMPACTION_QUEUE(
-	CQ_ID int NOT NULL,
-	CQ_DATABASE varchar(128) NOT NULL,
-	CQ_TABLE varchar(128) NOT NULL,
-	CQ_PARTITION varchar(767) NULL,
-	CQ_STATE char(1) NOT NULL,
-	CQ_TYPE char(1) NOT NULL,
-	CQ_WORKER_ID varchar(128) NULL,
-	CQ_START int NULL,
-	CQ_RUN_AS varchar(128) NULL,
-PRIMARY KEY CLUSTERED 
-(
-	CQ_ID ASC
-)
-);
-
-CREATE TABLE COMPLETED_TXN_COMPONENTS(
-	CTC_TXNID int NULL,
-	CTC_DATABASE varchar(128) NOT NULL,
-	CTC_TABLE varchar(128) NULL,
-	CTC_PARTITION varchar(767) NULL
-);
-
-CREATE TABLE HIVE_LOCKS(
-	HL_LOCK_EXT_ID int NOT NULL,
-	HL_LOCK_INT_ID int NOT NULL,
-	HL_TXNID int NULL,
-	HL_DB varchar(128) NOT NULL,
-	HL_TABLE varchar(128) NULL,
-	HL_PARTITION varchar(767) NULL,
-	HL_LOCK_STATE char(1) NOT NULL,
-	HL_LOCK_TYPE char(1) NOT NULL,
-	HL_LAST_HEARTBEAT int NOT NULL,
-	HL_ACQUIRED_AT int NULL,
-	HL_USER varchar(128) NOT NULL,
-	HL_HOST varchar(128) NOT NULL,
-PRIMARY KEY CLUSTERED 
-(
-	HL_LOCK_EXT_ID ASC,
-	HL_LOCK_INT_ID ASC
-)
-);
-
-CREATE TABLE NEXT_COMPACTION_QUEUE_ID(
-	NCQ_NEXT int NOT NULL
-);
-
-INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
-
-CREATE TABLE NEXT_LOCK_ID(
-	NL_NEXT int NOT NULL
-);
-
-INSERT INTO NEXT_LOCK_ID VALUES(1);
-
-CREATE TABLE NEXT_TXN_ID(
-	NTXN_NEXT int NOT NULL
-);
-
-INSERT INTO NEXT_TXN_ID VALUES(1);
-
-CREATE TABLE TXNS(
-	TXN_ID int NOT NULL,
-	TXN_STATE char(1) NOT NULL,
-	TXN_STARTED int NOT NULL,
-	TXN_LAST_HEARTBEAT int NOT NULL,
-	TXN_USER varchar(128) NOT NULL,
-	TXN_HOST varchar(128) NOT NULL,
-PRIMARY KEY CLUSTERED 
-(
-	TXN_ID ASC
-)
-);
-
-CREATE TABLE TXN_COMPONENTS(
-	TC_TXNID int NULL,
-	TC_DATABASE varchar(128) NOT NULL,
-	TC_TABLE varchar(128) NULL,
-	TC_PARTITION varchar(767) NULL
-);
-
-ALTER TABLE TXN_COMPONENTS  WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID);
+-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements.  See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the License); you may not use this file except in compliance with
-- the License.  You may obtain a copy of the License at
--
--     http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an AS IS BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.

--
-- Tables for transaction management
-- 

CREATE TABLE COMPACTION_QUEUE(
	CQ_ID int NOT NULL,
	CQ_DATABASE varchar(128) NOT NULL,
	CQ_TABLE varchar(128) NOT NULL,
	CQ_PARTITION varchar(767) NULL,
	CQ_STATE char(1) NOT NULL,
	CQ_TYPE char(1) NOT NULL,
	CQ_WORKER_ID varchar(128) NULL,
	CQ_START int NULL,
	CQ_RUN_AS varchar(128) NULL,
PRIMARY KEY CLUSTERED 
(
	CQ_ID ASC
)
);

CREATE TABLE COMPLETED_TXN_COMPONENTS(
	CTC_TXNID int NULL,
	CTC_DATABASE varchar(128) NOT NULL,
	CTC_TABLE varchar(128) NULL,
	CTC_PARTITION varchar(767) NULL
);

CREATE TABLE HIVE_LOCKS(
	HL_LOCK_EXT_ID int NOT NULL,
	HL_LOCK_INT_ID int NOT NULL,
	HL_TXNID int NULL,
	HL_DB varchar(128) NOT NULL,
	HL_TABLE varchar(128) NULL,
	HL_PARTITION varchar(767) NULL,
	HL_LOCK_STATE char(1) NOT NULL,
	HL_LOCK_TYPE char(1) NOT NULL,
	HL_LAST_HEARTBEAT int NOT NULL,
	HL_ACQUIRED_AT int NULL,
	HL_USER varchar(128) NOT NULL,
	HL_HOST varchar(128) NOT NULL,
PRIMARY KEY CLUSTERED 
(
	HL_LOCK_EXT_ID ASC,
	HL_LOCK_INT_ID ASC
)
);

CREATE TABLE NEXT_COMPACTION_QUEUE_ID(
	NCQ_NEXT int NOT NULL
);

INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);

CREATE TABLE NEXT_LOCK_ID(
	NL_NEXT int NOT NULL
);

INSERT INTO NEXT_LOCK_ID VALUES(1);

CREATE TABLE NEXT_TXN_ID(
	NTXN_NEXT int NOT NULL
);

INSERT INTO NEXT_TXN_ID VALUES(1);

CREATE TABLE TXNS(
	TXN_ID int NOT NULL,
	TXN_STATE char(1) NOT NULL,
	TXN_STARTED int NOT NULL,
	TXN_LAST_HEARTBEAT int NOT NULL,
	TXN_USER varchar(128) NOT NULL,
	TXN_HOST varchar(128) NOT NULL,
PRIMARY KEY CLUSTERED 
(
	TXN_ID ASC
)
);

CREATE TABLE TXN_COMPONENTS(
	TC_TXNID int NULL,
	TC_DATABASE varchar(128) NOT NULL,
	TC_TABLE varchar(128) NULL,
	TC_PARTITION varchar(767) NULL
);

ALTER TABLE TXN_COMPONENTS  WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID);

\ No newline at end of file

Modified: hive/branches/spark/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql Fri Nov  7 20:41:34 2014
@@ -2,6 +2,7 @@ SELECT 'Upgrading MetaStore schema from 
 
 :r 002-HIVE-7784.mssql.sql;
 :r 003-HIVE-8239.mssql.sql;
+:r 004-HIVE-8550.mssql.sql;
 
 UPDATE VERSION SET SCHEMA_VERSION='0.14.0', VERSION_COMMENT='Hive release version 0.14.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 0.13.0 to 0.14.0' AS MESSAGE;

Modified: hive/branches/spark/metastore/scripts/upgrade/mysql/019-HIVE-7784.mysql.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/mysql/019-HIVE-7784.mysql.sql?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/mysql/019-HIVE-7784.mysql.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/mysql/019-HIVE-7784.mysql.sql Fri Nov  7 20:41:34 2014
@@ -1 +1,31 @@
+--
+-- Create the table if it doesn't exist.
+--
+CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PART_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+
+
 CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE;

Modified: hive/branches/spark/metastore/scripts/upgrade/oracle/020-HIVE-7784.oracle.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/oracle/020-HIVE-7784.oracle.sql?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/oracle/020-HIVE-7784.oracle.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/oracle/020-HIVE-7784.oracle.sql Fri Nov  7 20:41:34 2014
@@ -1 +1,27 @@
+--
+-- Create the table if it doesn't exist.
+--
+CREATE TABLE IF NOT EXISTS PART_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ PARTITION_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ PART_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
 CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);

Modified: hive/branches/spark/metastore/scripts/upgrade/postgres/019-HIVE-7784.postgres.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/postgres/019-HIVE-7784.postgres.sql?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/postgres/019-HIVE-7784.postgres.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/postgres/019-HIVE-7784.postgres.sql Fri Nov  7 20:41:34 2014
@@ -1 +1,29 @@
+--
+-- Create the table if it doesn't exist.
+--
+
+CREATE TABLE IF NOT EXISTS "PART_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+
 CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Fri Nov  7 20:41:34 2014
@@ -5773,6 +5773,7 @@ public class HiveMetaStore extends Thrif
       // Server will create new threads up to max as necessary. After an idle
       // period, it will destroy threads to keep the number of threads in the
       // pool to min.
+      int maxMessageSize = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXMESSAGESIZE);
       int minWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMINTHREADS);
       int maxWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXTHREADS);
       boolean tcpKeepAlive = conf.getBoolVar(HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE);
@@ -5824,6 +5825,7 @@ public class HiveMetaStore extends Thrif
           .processor(processor)
           .transportFactory(transFactory)
           .protocolFactory(new TBinaryProtocol.Factory())
+          .inputProtocolFactory(new TBinaryProtocol.Factory(true, true, maxMessageSize))
           .minWorkerThreads(minWorkerThreads)
           .maxWorkerThreads(maxWorkerThreads);
 

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java Fri Nov  7 20:41:34 2014
@@ -48,7 +48,6 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
@@ -151,18 +150,17 @@ class MetaStoreDirectSql {
    * here - for eg., for MySQL, we signal that we want to use ANSI SQL quoting behaviour
    */
   private void doDbSpecificInitializationsBeforeQuery() throws MetaException {
-    if (isMySql){
-      try {
-        assert pm.currentTransaction().isActive(); // must be inside tx together with queries
-        trySetAnsiQuotesForMysql();
-      } catch (SQLException sqlEx) {
-        throw new MetaException("Error setting ansi quotes: " + sqlEx.getMessage());
-      }
+    if (!isMySql) return;
+    try {
+      assert pm.currentTransaction().isActive(); // must be inside tx together with queries
+      trySetAnsiQuotesForMysql();
+    } catch (SQLException sqlEx) {
+      throw new MetaException("Error setting ansi quotes: " + sqlEx.getMessage());
     }
   }
 
   /**
-   * MySQL, by default, doesn't recognize ANSI quotes which need to have for Postgres.
+   * MySQL, by default, doesn't recognize ANSI quotes which we need to have for Postgres.
    * Try to set the ANSI quotes mode on for the session. Due to connection pooling, needs
    * to be called in the same transaction as the actual queries.
    */
@@ -194,18 +192,20 @@ class MetaStoreDirectSql {
       Object[] params = new Object[] { dbName };
       queryDbSelector = pm.newQuery("javax.jdo.query.SQL", queryTextDbSelector);
 
-      LOG.debug("getDatabase:query instantiated : " + queryTextDbSelector + " with param ["+params[0]+"]");
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("getDatabase:query instantiated : " + queryTextDbSelector
+            + " with param [" + params[0] + "]");
+      }
 
+      @SuppressWarnings("unchecked")
       List<Object[]> sqlResult = (List<Object[]>)queryDbSelector.executeWithArray(params);
       if ((sqlResult == null) || sqlResult.isEmpty()) {
-        LOG.debug("getDatabase:queryDbSelector ran, returned no/empty results, returning NoSuchObjectException");
-        throw new MetaException("There is no database named " + dbName);
+        return null;
       }
 
       assert(sqlResult.size() == 1);
-      if (sqlResult.get(0) == null){
-        LOG.debug("getDatabase:queryDbSelector ran, returned results, but the result entry was null, returning NoSuchObjectException");
-        throw new MetaException("There is no database named " + dbName);
+      if (sqlResult.get(0) == null) {
+        return null;
       }
 
       Object[] dbline = sqlResult.get(0);
@@ -215,25 +215,28 @@ class MetaStoreDirectSql {
           + " FROM \"DATABASE_PARAMS\" "
           + " WHERE \"DB_ID\" = ? "
           + " AND \"PARAM_KEY\" IS NOT NULL";
-      Object[] params2 = new Object[] { dbid };
-      queryDbParams = pm.newQuery("javax.jdo.query.SQL",queryTextDbParams);
-      LOG.debug("getDatabase:query2 instantiated : " + queryTextDbParams + " with param ["+params2[0]+"]");
+      params[0] = dbid;
+      queryDbParams = pm.newQuery("javax.jdo.query.SQL", queryTextDbParams);
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("getDatabase:query2 instantiated : " + queryTextDbParams
+            + " with param [" + params[0] + "]");
+      }
 
       Map<String,String> dbParams = new HashMap<String,String>();
-      List<Object[]> sqlResult2 = ensureList(queryDbParams.executeWithArray(params2));
-      if (!sqlResult2.isEmpty()){
-        for (Object[] line : sqlResult2){
+      List<Object[]> sqlResult2 = ensureList(queryDbParams.executeWithArray(params));
+      if (!sqlResult2.isEmpty()) {
+        for (Object[] line : sqlResult2) {
           dbParams.put(extractSqlString(line[0]),extractSqlString(line[1]));
         }
       }
-      LOG.debug("getDatabase: instantiating db object to return");
       Database db = new Database();
       db.setName(extractSqlString(dbline[1]));
       db.setLocationUri(extractSqlString(dbline[2]));
       db.setDescription(extractSqlString(dbline[3]));
       db.setOwnerName(extractSqlString(dbline[4]));
       String type = extractSqlString(dbline[5]);
-      db.setOwnerType((null == type || type.trim().isEmpty()) ? null : PrincipalType.valueOf(type));
+      db.setOwnerType(
+          (null == type || type.trim().isEmpty()) ? null : PrincipalType.valueOf(type));
       db.setParameters(dbParams);
       if (LOG.isDebugEnabled()){
         LOG.debug("getDatabase: directsql returning db " + db.getName()

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Fri Nov  7 20:41:34 2014
@@ -521,17 +521,22 @@ public class ObjectStore implements RawS
 
   @Override
   public Database getDatabase(String name) throws NoSuchObjectException {
+    MetaException ex = null;
+    Database db = null;
     try {
-      return getDatabaseInternal(name);
+      db = getDatabaseInternal(name);
     } catch (MetaException e) {
       // Signature restriction to NSOE, and NSOE being a flat exception prevents us from
       // setting the cause of the NSOE as the MetaException. We should not lose the info
       // we got here, but it's very likely that the MetaException is irrelevant and is
       // actually an NSOE message, so we should log it and throw an NSOE with the msg.
-      LOG.warn("Got a MetaException trying to call getDatabase("
-          +name+"), returning NoSuchObjectException", e);
-      throw new NoSuchObjectException(e.getMessage());
+      ex = e;
     }
+    if (db == null) {
+      LOG.warn("Failed to get database " + name +", returning NoSuchObjectException", ex);
+      throw new NoSuchObjectException(name + (ex == null ? "" : (": " + ex.getMessage())));
+    }
+    return db;
   }
 
   public Database getDatabaseInternal(String name) throws MetaException, NoSuchObjectException {
@@ -2375,7 +2380,7 @@ public class ObjectStore implements RawS
     }
 
     private void handleDirectSqlError(Exception ex) throws MetaException, NoSuchObjectException {
-      LOG.error("Direct SQL failed" + (allowJdo ? ", falling back to ORM" : ""), ex);
+      LOG.warn("Direct SQL failed" + (allowJdo ? ", falling back to ORM" : ""), ex);
       if (!allowJdo) {
         if (ex instanceof MetaException) {
           throw (MetaException)ex;

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java Fri Nov  7 20:41:34 2014
@@ -127,7 +127,7 @@ public class CompactionTxnHandler extend
          dbConn.rollback();
        } catch (SQLException e1) {
        }
-       detectDeadlock(e, "setRunAs");
+       detectDeadlock(dbConn, e, "setRunAs");
      } finally {
        closeDbConn(dbConn);
        closeStmt(stmt);
@@ -192,7 +192,7 @@ public class CompactionTxnHandler extend
           dbConn.rollback();
         } catch (SQLException e1) {
         }
-        detectDeadlock(e, "findNextToCompact");
+        detectDeadlock(dbConn, e, "findNextToCompact");
         throw new MetaException("Unable to connect to transaction database " +
             StringUtils.stringifyException(e));
       } finally {
@@ -234,7 +234,7 @@ public class CompactionTxnHandler extend
           dbConn.rollback();
         } catch (SQLException e1) {
         }
-        detectDeadlock(e, "markCompacted");
+        detectDeadlock(dbConn, e, "markCompacted");
         throw new MetaException("Unable to connect to transaction database " +
             StringUtils.stringifyException(e));
       } finally {
@@ -377,7 +377,7 @@ public class CompactionTxnHandler extend
           dbConn.rollback();
         } catch (SQLException e1) {
         }
-        detectDeadlock(e, "markCleaned");
+        detectDeadlock(dbConn, e, "markCleaned");
         throw new MetaException("Unable to connect to transaction database " +
             StringUtils.stringifyException(e));
       } finally {
@@ -429,7 +429,7 @@ public class CompactionTxnHandler extend
           dbConn.rollback();
         } catch (SQLException e1) {
         }
-        detectDeadlock(e, "cleanEmptyAbortedTxns");
+        detectDeadlock(dbConn, e, "cleanEmptyAbortedTxns");
         throw new MetaException("Unable to connect to transaction database " +
             StringUtils.stringifyException(e));
       } finally {
@@ -475,7 +475,7 @@ public class CompactionTxnHandler extend
           dbConn.rollback();
         } catch (SQLException e1) {
         }
-        detectDeadlock(e, "revokeFromLocalWorkers");
+        detectDeadlock(dbConn, e, "revokeFromLocalWorkers");
         throw new MetaException("Unable to connect to transaction database " +
             StringUtils.stringifyException(e));
       } finally {
@@ -522,7 +522,7 @@ public class CompactionTxnHandler extend
           dbConn.rollback();
         } catch (SQLException e1) {
         }
-        detectDeadlock(e, "revokeTimedoutWorkers");
+        detectDeadlock(dbConn, e, "revokeTimedoutWorkers");
         throw new MetaException("Unable to connect to transaction database " +
             StringUtils.stringifyException(e));
       } finally {

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java Fri Nov  7 20:41:34 2014
@@ -65,13 +65,13 @@ public class TxnHandler {
   static final protected char TXN_OPEN = 'o';
 
   // Lock states
-  static final private char LOCK_ACQUIRED = 'a';
-  static final private char LOCK_WAITING = 'w';
+  static final protected char LOCK_ACQUIRED = 'a';
+  static final protected char LOCK_WAITING = 'w';
 
   // Lock types
-  static final private char LOCK_EXCLUSIVE = 'e';
-  static final private char LOCK_SHARED = 'r';
-  static final private char LOCK_SEMI_SHARED = 'w';
+  static final protected char LOCK_EXCLUSIVE = 'e';
+  static final protected char LOCK_SHARED = 'r';
+  static final protected char LOCK_SEMI_SHARED = 'w';
 
   static final private int ALLOWED_REPEATED_DEADLOCKS = 5;
   static final private Log LOG = LogFactory.getLog(TxnHandler.class.getName());
@@ -301,7 +301,7 @@ public class TxnHandler {
           dbConn.rollback();
         } catch (SQLException e1) {
         }
-        detectDeadlock(e, "openTxns");
+        detectDeadlock(dbConn, e, "openTxns");
         throw new MetaException("Unable to select from transaction database "
           + StringUtils.stringifyException(e));
       } finally {
@@ -336,7 +336,7 @@ public class TxnHandler {
           dbConn.rollback();
         } catch (SQLException e1) {
         }
-        detectDeadlock(e, "abortTxn");
+        detectDeadlock(dbConn, e, "abortTxn");
         throw new MetaException("Unable to update transaction database "
           + StringUtils.stringifyException(e));
       } finally {
@@ -393,7 +393,7 @@ public class TxnHandler {
           dbConn.rollback();
         } catch (SQLException e1) {
         }
-        detectDeadlock(e, "commitTxn");
+        detectDeadlock(dbConn, e, "commitTxn");
         throw new MetaException("Unable to update transaction database "
           + StringUtils.stringifyException(e));
       } finally {
@@ -419,7 +419,7 @@ public class TxnHandler {
           dbConn.rollback();
         } catch (SQLException e1) {
         }
-        detectDeadlock(e, "lock");
+        detectDeadlock(dbConn, e, "lock");
         throw new MetaException("Unable to update transaction database " +
             StringUtils.stringifyException(e));
       } finally {
@@ -444,7 +444,7 @@ public class TxnHandler {
           dbConn.rollback();
         } catch (SQLException e1) {
         }
-        detectDeadlock(e, "lockNoWait");
+        detectDeadlock(dbConn, e, "lockNoWait");
         throw new MetaException("Unable to update transaction database " +
             StringUtils.stringifyException(e));
       } finally {
@@ -479,7 +479,7 @@ public class TxnHandler {
           dbConn.rollback();
         } catch (SQLException e1) {
         }
-        detectDeadlock(e, "checkLock");
+        detectDeadlock(dbConn, e, "checkLock");
         throw new MetaException("Unable to update transaction database " +
             StringUtils.stringifyException(e));
       } finally {
@@ -534,7 +534,7 @@ public class TxnHandler {
           dbConn.rollback();
         } catch (SQLException e1) {
         }
-        detectDeadlock(e, "unlock");
+        detectDeadlock(dbConn, e, "unlock");
         throw new MetaException("Unable to update transaction database " +
             StringUtils.stringifyException(e));
       } finally {
@@ -613,7 +613,7 @@ public class TxnHandler {
           dbConn.rollback();
         } catch (SQLException e1) {
         }
-        detectDeadlock(e, "heartbeat");
+        detectDeadlock(dbConn, e, "heartbeat");
         throw new MetaException("Unable to select from transaction database " +
             StringUtils.stringifyException(e));
       } finally {
@@ -652,7 +652,7 @@ public class TxnHandler {
           dbConn.rollback();
         } catch (SQLException e1) {
         }
-        detectDeadlock(e, "heartbeatTxnRange");
+        detectDeadlock(dbConn, e, "heartbeatTxnRange");
         throw new MetaException("Unable to select from transaction database " +
             StringUtils.stringifyException(e));
       } finally {
@@ -735,7 +735,7 @@ public class TxnHandler {
           dbConn.rollback();
         } catch (SQLException e1) {
         }
-        detectDeadlock(e, "compact");
+        detectDeadlock(dbConn, e, "compact");
         throw new MetaException("Unable to select from transaction database " +
             StringUtils.stringifyException(e));
       } finally {
@@ -898,15 +898,30 @@ public class TxnHandler {
    * Determine if an exception was a deadlock.  Unfortunately there is no standard way to do
    * this, so we have to inspect the error messages and catch the telltale signs for each
    * different database.
+   * @param conn database connection
    * @param e exception that was thrown.
    * @param caller name of the method calling this
    * @throws org.apache.hadoop.hive.metastore.txn.TxnHandler.DeadlockException when deadlock
    * detected and retry count has not been exceeded.
    */
-  protected void detectDeadlock(SQLException e, String caller) throws DeadlockException {
-    final String mysqlDeadlock =
-        "Deadlock found when trying to get lock; try restarting transaction";
-    if (e.getMessage().contains(mysqlDeadlock) || e instanceof SQLTransactionRollbackException) {
+  protected void detectDeadlock(Connection conn,
+                                SQLException e,
+                                String caller) throws DeadlockException, MetaException {
+
+    // If you change this function, remove the @Ignore from TestTxnHandler.deadlockIsDetected()
+    // to test these changes.
+    // MySQL and MSSQL use 40001 as the state code for rollback.  Postgres uses 40001 and 40P01.
+    // Oracle seems to return different SQLStates each time, but the message always contains
+    // "deadlock detected", so I've used that instead.
+    // Derby and newer MySQL driver use the new SQLTransactionRollbackException
+    if (dbProduct == null) {
+      determineDatabaseProduct(conn);
+    }
+    if (e instanceof SQLTransactionRollbackException ||
+        ((dbProduct == DatabaseProduct.MYSQL || dbProduct == DatabaseProduct.POSTGRES ||
+            dbProduct == DatabaseProduct.SQLSERVER) && e.getSQLState().equals("40001")) ||
+        (dbProduct == DatabaseProduct.POSTGRES && e.getSQLState().equals("40P01")) ||
+        (dbProduct == DatabaseProduct.ORACLE && (e.getMessage().contains("deadlock detected")))) {
       if (deadlockCnt++ < ALLOWED_REPEATED_DEADLOCKS) {
         LOG.warn("Deadlock detected in " + caller + ", trying again.");
         throw new DeadlockException();

Modified: hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java (original)
+++ hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java Fri Nov  7 20:41:34 2014
@@ -20,13 +20,18 @@ package org.apache.hadoop.hive.metastore
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.MetaStoreThread;
 import org.apache.hadoop.hive.metastore.api.*;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Test;
 
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
@@ -1082,6 +1087,115 @@ public class TestTxnHandler {
     for (int i = 0; i < saw.length; i++) assertTrue("Didn't see lock id " + i, saw[i]);
   }
 
+  @Test
+  @Ignore
+  public void deadlockDetected() throws Exception {
+    Connection conn = txnHandler.getDbConn(Connection.TRANSACTION_SERIALIZABLE);
+    Statement stmt = conn.createStatement();
+    long now = txnHandler.getDbTime(conn);
+    stmt.executeUpdate("insert into TXNS (txn_id, txn_state, txn_started, txn_last_heartbeat, " +
+        "txn_user, txn_host) values (1, 'o', " + now + ", " + now + ", 'shagy', " +
+        "'scooby.com')");
+    stmt.executeUpdate("insert into HIVE_LOCKS (hl_lock_ext_id, hl_lock_int_id, hl_txnid, " +
+        "hl_db, hl_table, hl_partition, hl_lock_state, hl_lock_type, hl_last_heartbeat, " +
+        "hl_user, hl_host) values (1, 1, 1, 'mydb', 'mytable', 'mypartition', '" +
+        txnHandler.LOCK_WAITING + "', '" + txnHandler.LOCK_EXCLUSIVE + "', " + now + ", 'fred', " +
+        "'scooby.com')");
+    conn.commit();
+    txnHandler.closeDbConn(conn);
+
+    final MetaStoreThread.BooleanPointer sawDeadlock = new MetaStoreThread.BooleanPointer();
+
+    final Connection conn1 = txnHandler.getDbConn(Connection.TRANSACTION_SERIALIZABLE);
+    final Connection conn2 = txnHandler.getDbConn(Connection.TRANSACTION_SERIALIZABLE);
+    try {
+
+      for (int i = 0; i < 5; i++) {
+        Thread t1 = new Thread() {
+          @Override
+          public void run() {
+            try {
+              try {
+                updateTxns(conn1);
+                updateLocks(conn1);
+                Thread.sleep(1000);
+                conn1.commit();
+                LOG.debug("no exception, no deadlock");
+              } catch (SQLException e) {
+                try {
+                  txnHandler.detectDeadlock(conn1, e, "thread t1");
+                  LOG.debug("Got an exception, but not a deadlock, SQLState is " +
+                      e.getSQLState() + " class of exception is " + e.getClass().getName() +
+                      " msg is <" + e.getMessage() + ">");
+                } catch (TxnHandler.DeadlockException de) {
+                  LOG.debug("Forced a deadlock, SQLState is " + e.getSQLState() + " class of " +
+                      "exception is " + e.getClass().getName() + " msg is <" + e
+                      .getMessage() + ">");
+                  sawDeadlock.boolVal = true;
+                }
+              }
+              conn1.rollback();
+            } catch (Exception e) {
+              throw new RuntimeException(e);
+            }
+          }
+        };
+
+        Thread t2 = new Thread() {
+          @Override
+          public void run() {
+            try {
+              try {
+                updateLocks(conn2);
+                updateTxns(conn2);
+                Thread.sleep(1000);
+                conn2.commit();
+                LOG.debug("no exception, no deadlock");
+              } catch (SQLException e) {
+                try {
+                  txnHandler.detectDeadlock(conn2, e, "thread t2");
+                  LOG.debug("Got an exception, but not a deadlock, SQLState is " +
+                      e.getSQLState() + " class of exception is " + e.getClass().getName() +
+                      " msg is <" + e.getMessage() + ">");
+                } catch (TxnHandler.DeadlockException de) {
+                  LOG.debug("Forced a deadlock, SQLState is " + e.getSQLState() + " class of " +
+                      "exception is " + e.getClass().getName() + " msg is <" + e
+                      .getMessage() + ">");
+                  sawDeadlock.boolVal = true;
+                }
+              }
+              conn2.rollback();
+            } catch (Exception e) {
+              throw new RuntimeException(e);
+            }
+          }
+        };
+
+        t1.start();
+        t2.start();
+        t1.join();
+        t2.join();
+        if (sawDeadlock.boolVal) break;
+      }
+      assertTrue(sawDeadlock.boolVal);
+    } finally {
+      conn1.rollback();
+      txnHandler.closeDbConn(conn1);
+      conn2.rollback();
+      txnHandler.closeDbConn(conn2);
+    }
+  }
+
+  private void updateTxns(Connection conn) throws SQLException {
+    Statement stmt = conn.createStatement();
+    stmt.executeUpdate("update TXNS set txn_last_heartbeat = txn_last_heartbeat + 1");
+  }
+
+  private void updateLocks(Connection conn) throws SQLException {
+    Statement stmt = conn.createStatement();
+    stmt.executeUpdate("update HIVE_LOCKS set hl_last_heartbeat = hl_last_heartbeat + 1");
+  }
+
   @Before
   public void setUp() throws Exception {
     TxnDbUtil.prepDb();

Modified: hive/branches/spark/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/pom.xml?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/pom.xml (original)
+++ hive/branches/spark/pom.xml Fri Nov  7 20:41:34 2014
@@ -130,6 +130,7 @@
     <jetty.version>7.6.0.v20120127</jetty.version>
     <jersey.version>1.14</jersey.version>
     <jline.version>0.9.94</jline.version>
+    <jansi.version>1.11</jansi.version>
     <jms.version>1.1</jms.version>
     <jodd.version>3.5.2</jodd.version>
     <json.version>20090211</json.version>
@@ -350,6 +351,11 @@
         <version>${jline.version}</version>
       </dependency>
       <dependency>
+        <groupId>org.fusesource.jansi</groupId>
+        <artifactId>jansi</artifactId>
+        <version>${jansi.version}</version>
+      </dependency>
+      <dependency>
         <groupId>junit</groupId>
         <artifactId>junit</artifactId>
         <version>${junit.version}</version>
@@ -487,13 +493,12 @@
           </exclusion>
         </exclusions>
       </dependency>
-        <dependency>
-            <groupId>org.apache.curator</groupId>
-            <artifactId>curator-framework</artifactId>
-            <version>${curator.version}</version>
-        </dependency>
-
-        <dependency>
+      <dependency>
+        <groupId>org.apache.curator</groupId>
+        <artifactId>curator-framework</artifactId>
+        <version>${curator.version}</version>
+      </dependency>
+      <dependency>
         <groupId>org.codehaus.groovy</groupId>
         <artifactId>groovy-all</artifactId>
         <version>${groovy.version}</version>
@@ -838,7 +843,7 @@
             <test.warehouse.dir>${test.warehouse.scheme}${test.warehouse.dir}</test.warehouse.dir>
             <java.net.preferIPv4Stack>true</java.net.preferIPv4Stack>
             <!-- EnforceReadOnlyTables hook and QTestUtil -->
-            <test.src.tables>src,src1,srcbucket,srcbucket2,src_json,src_thrift,src_sequencefile,srcpart,alltypesorc,src_hbase</test.src.tables>
+            <test.src.tables>src,src1,srcbucket,srcbucket2,src_json,src_thrift,src_sequencefile,srcpart,alltypesorc,src_hbase,cbo_t1,cbo_t2,cbo_t3,src_cbo,part,lineitem</test.src.tables>
             <java.security.krb5.conf>${test.tmp.dir}/conf/krb5.conf</java.security.krb5.conf>
           </systemPropertyVariables>
         </configuration>

Modified: hive/branches/spark/ql/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/pom.xml?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/pom.xml (original)
+++ hive/branches/spark/ql/pom.xml Fri Nov  7 20:41:34 2014
@@ -274,6 +274,16 @@
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>jline</groupId>
+      <artifactId>jline</artifactId>
+      <version>${jline.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.fusesource.jansi</groupId>
+      <artifactId>jansi</artifactId>
+      <version>${jansi.version}</version>
+    </dependency>
+    <dependency>
       <groupId>org.apache.tez</groupId>
       <artifactId>tez-api</artifactId>
       <version>${tez.version}</version>

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumnDecimal.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumnDecimal.txt?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumnDecimal.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumnDecimal.txt Fri Nov  7 20:41:34 2014
@@ -24,7 +24,7 @@ import org.apache.hadoop.hive.ql.exec.ve
 import org.apache.hadoop.hive.ql.exec.vector.expressions.NullUtil;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
-import org.apache.hadoop.hive.common.type.Decimal128;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 
 /**
  * Generated from template ColumnArithmeticColumnDecimal.txt, which covers binary arithmetic 
@@ -61,8 +61,8 @@ public class <ClassName> extends VectorE
     DecimalColumnVector outputColVector = (DecimalColumnVector) batch.cols[outputColumn];
     int[] sel = batch.selected;
     int n = batch.size;
-    Decimal128[] vector1 = inputColVector1.vector;
-    Decimal128[] vector2 = inputColVector2.vector;
+    HiveDecimalWritable[] vector1 = inputColVector1.vector;
+    HiveDecimalWritable[] vector2 = inputColVector2.vector;
 
     // return immediately if batch is empty
     if (n == 0) {

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalarDecimal.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalarDecimal.txt?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalarDecimal.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalarDecimal.txt Fri Nov  7 20:41:34 2014
@@ -24,7 +24,8 @@ import org.apache.hadoop.hive.ql.exec.ve
 import org.apache.hadoop.hive.ql.exec.vector.expressions.NullUtil;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
-import org.apache.hadoop.hive.common.type.Decimal128;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
 
 /**
  * Generated from template ColumnArithmeticScalarDecimal.txt, which covers binary arithmetic 
@@ -35,10 +36,10 @@ public class <ClassName> extends VectorE
   private static final long serialVersionUID = 1L;
 
   private int colNum;
-  private Decimal128 value;
+  private HiveDecimal value;
   private int outputColumn;
 
-  public <ClassName>(int colNum, Decimal128 value, int outputColumn) {
+  public <ClassName>(int colNum, HiveDecimal value, int outputColumn) {
     this.colNum = colNum;
     this.value = value;
     this.outputColumn = outputColumn;
@@ -64,7 +65,7 @@ public class <ClassName> extends VectorE
     outputColVector.noNulls = inputColVector.noNulls;
     outputColVector.isRepeating = inputColVector.isRepeating;
     int n = batch.size;
-    Decimal128[] vector = inputColVector.vector;
+    HiveDecimalWritable[] vector = inputColVector.vector;
     
     // return immediately if batch is empty
     if (n == 0) {
@@ -129,26 +130,6 @@ public class <ClassName> extends VectorE
   public int getOutputColumn() {
     return outputColumn;
   }
-  
-  public int getColNum() {
-    return colNum;
-  }
-  
-  public void setColNum(int colNum) {
-    this.colNum = colNum;
-  }
-
-  public Decimal128 getValue() {
-    return value;
-  }
-
-  public void setValue(Decimal128 value) {
-    this.value = value;
-  }
-
-  public void setOutputColumn(int outputColumn) {
-    this.outputColumn = outputColumn;
-  }
 
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideColumnDecimal.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideColumnDecimal.txt?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideColumnDecimal.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideColumnDecimal.txt Fri Nov  7 20:41:34 2014
@@ -24,7 +24,7 @@ import org.apache.hadoop.hive.ql.exec.ve
 import org.apache.hadoop.hive.ql.exec.vector.expressions.NullUtil;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
-import org.apache.hadoop.hive.common.type.Decimal128;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 
 /**
  * Generated from template ColumnArithmeticColumnDecimal.txt, which covers binary arithmetic
@@ -61,8 +61,8 @@ public class <ClassName> extends VectorE
     DecimalColumnVector outputColVector = (DecimalColumnVector) batch.cols[outputColumn];
     int[] sel = batch.selected;
     int n = batch.size;
-    Decimal128[] vector1 = inputColVector1.vector;
-    Decimal128[] vector2 = inputColVector2.vector;
+    HiveDecimalWritable[] vector1 = inputColVector1.vector;
+    HiveDecimalWritable[] vector2 = inputColVector2.vector;
 
     // return immediately if batch is empty
     if (n == 0) {
@@ -138,26 +138,6 @@ public class <ClassName> extends VectorE
     return outputColumn;
   }
 
-  public int getColNum1() {
-    return colNum1;
-  }
-
-  public void setColNum1(int colNum1) {
-    this.colNum1 = colNum1;
-  }
-
-  public int getColNum2() {
-    return colNum2;
-  }
-
-  public void setColNum2(int colNum2) {
-    this.colNum2 = colNum2;
-  }
-
-  public void setOutputColumn(int outputColumn) {
-    this.outputColumn = outputColumn;
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideScalarDecimal.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideScalarDecimal.txt?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideScalarDecimal.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideScalarDecimal.txt Fri Nov  7 20:41:34 2014
@@ -24,7 +24,8 @@ import org.apache.hadoop.hive.ql.exec.ve
 import org.apache.hadoop.hive.ql.exec.vector.expressions.NullUtil;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
-import org.apache.hadoop.hive.common.type.Decimal128;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
 
 /**
  * Generated from template ColumnDivideScalarDecimal.txt, which covers binary arithmetic
@@ -35,11 +36,10 @@ public class <ClassName> extends VectorE
   private static final long serialVersionUID = 1L;
 
   private int colNum;
-  private Decimal128 value;
+  private HiveDecimal value;
   private int outputColumn;
-  private transient Decimal128 zero;  // to hold constant 0 for later use
 
-  public <ClassName>(int colNum, Decimal128 value, int outputColumn) {
+  public <ClassName>(int colNum, HiveDecimal value, int outputColumn) {
     this.colNum = colNum;
     this.value = value;
     this.outputColumn = outputColumn;
@@ -65,13 +65,8 @@ public class <ClassName> extends VectorE
     outputColVector.noNulls = inputColVector.noNulls;
     outputColVector.isRepeating = inputColVector.isRepeating;
     int n = batch.size;
-    Decimal128[] vector = inputColVector.vector;
-    Decimal128[] outputVector = outputColVector.vector;
-
-    // Initialize local variable to use as 0 value on first use.
-    if (zero == null) {
-      this.zero = new Decimal128(0, inputColVector.scale);
-    }
+    HiveDecimalWritable[] vector = inputColVector.vector;
+    HiveDecimalWritable[] outputVector = outputColVector.vector;
 
     // return immediately if batch is empty
     if (n == 0) {
@@ -90,7 +85,7 @@ public class <ClassName> extends VectorE
     }
 
 
-    if (value.compareTo(zero) == 0) {
+    if (value.compareTo(HiveDecimal.ZERO) == 0) {
 
       // Denominator is zero, convert the batch to nulls
       outputColVector.noNulls = false;
@@ -142,26 +137,6 @@ public class <ClassName> extends VectorE
     return outputColumn;
   }
 
-  public int getColNum() {
-    return colNum;
-  }
-
-  public void setColNum(int colNum) {
-    this.colNum = colNum;
-  }
-
-  public Decimal128 getValue() {
-    return value;
-  }
-
-  public void setValue(Decimal128 value) {
-    this.value = value;
-  }
-
-  public void setOutputColumn(int outputColumn) {
-    this.outputColumn = outputColumn;
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/DecimalColumnUnaryFunc.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/DecimalColumnUnaryFunc.txt?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/DecimalColumnUnaryFunc.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/DecimalColumnUnaryFunc.txt Fri Nov  7 20:41:34 2014
@@ -23,7 +23,7 @@ import org.apache.hadoop.hive.ql.exec.ve
 import org.apache.hadoop.hive.ql.exec.vector.*;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
-import org.apache.hadoop.hive.common.type.Decimal128;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil;
@@ -59,7 +59,7 @@ public class <ClassName> extends VectorE
     boolean[] outputIsNull = outputColVector.isNull;
     outputColVector.noNulls = inputColVector.noNulls;
     int n = batch.size;
-    Decimal128[] vector = inputColVector.vector;
+    HiveDecimalWritable[] vector = inputColVector.vector;
 
     // return immediately if batch is empty
     if (n == 0) {
@@ -117,18 +117,6 @@ public class <ClassName> extends VectorE
   public String getOutputType() {
     return outputType;
   }
-  
-  public int getColNum() {
-    return colNum;
-  }
-
-  public void setColNum(int colNum) {
-    this.colNum = colNum;
-  }
-
-  public void setOutputColumn(int outputColumn) {
-    this.outputColumn = outputColumn;
-  }
 
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt Fri Nov  7 20:41:34 2014
@@ -18,8 +18,10 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions.gen;
 
-import org.apache.hadoop.hive.common.type.Decimal128;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
@@ -37,10 +39,10 @@ public class <ClassName> extends VectorE
   private int colNum;
 
   // The comparison is of the form "column BETWEEN leftValue AND rightValue"
-  private Decimal128 leftValue;
-  private Decimal128 rightValue;
+  private HiveDecimal leftValue;
+  private HiveDecimal rightValue;
 
-  public <ClassName>(int colNum, Decimal128 leftValue, Decimal128 rightValue) {
+  public <ClassName>(int colNum, HiveDecimal leftValue, HiveDecimal rightValue) {
     this.colNum = colNum;
     this.leftValue = leftValue;
     this.rightValue = rightValue;
@@ -60,7 +62,7 @@ public class <ClassName> extends VectorE
     int[] sel = batch.selected;
     boolean[] nullPos = inputColVector.isNull;
     int n = batch.size;
-    Decimal128[] vector = inputColVector.vector;
+    HiveDecimalWritable[] vector = inputColVector.vector;
 
     // return immediately if batch is empty
     if (n == 0) {
@@ -72,7 +74,7 @@ public class <ClassName> extends VectorE
 
         // All must be selected otherwise size would be zero.
         // Repeating property will not change.
-        if (<OptionalNot>(vector[0].compareTo(leftValue) < 0 || vector[0].compareTo(rightValue) > 0)) {
+        if (<OptionalNot>(DecimalUtil.compare(vector[0], leftValue) < 0 || DecimalUtil.compare(vector[0], rightValue) > 0)) {
 
           // Entire batch is filtered out.
           batch.size = 0;
@@ -81,7 +83,7 @@ public class <ClassName> extends VectorE
         int newSize = 0;
         for(int j = 0; j != n; j++) {
           int i = sel[j];
-          if (<OptionalNot>(leftValue.compareTo(vector[i]) <= 0 && vector[i].compareTo(rightValue) <= 0)) {
+          if (<OptionalNot>(DecimalUtil.compare(leftValue, vector[i]) <= 0 && DecimalUtil.compare(vector[i], rightValue) <= 0)) {
             sel[newSize++] = i;
           }
         }
@@ -89,7 +91,7 @@ public class <ClassName> extends VectorE
       } else {
         int newSize = 0;
         for(int i = 0; i != n; i++) {
-          if (<OptionalNot>(leftValue.compareTo(vector[i]) <= 0 && vector[i].compareTo(rightValue) <= 0)) {
+          if (<OptionalNot>(DecimalUtil.compare(leftValue, vector[i]) <= 0 && DecimalUtil.compare(vector[i], rightValue) <= 0)) {
             sel[newSize++] = i;
           }
         }
@@ -104,7 +106,7 @@ public class <ClassName> extends VectorE
         // All must be selected otherwise size would be zero.
         // Repeating property will not change.
         if (!nullPos[0]) {
-          if (<OptionalNot>(vector[0].compareTo(leftValue) < 0 || vector[0].compareTo(rightValue) > 0)) {
+          if (<OptionalNot>(DecimalUtil.compare(vector[0], leftValue) < 0 || DecimalUtil.compare(vector[0], rightValue) > 0)) {
 
             // Entire batch is filtered out.
             batch.size = 0;
@@ -117,7 +119,7 @@ public class <ClassName> extends VectorE
         for(int j = 0; j != n; j++) {
           int i = sel[j];
           if (!nullPos[i]) {
-            if (<OptionalNot>(leftValue.compareTo(vector[i]) <= 0 && vector[i].compareTo(rightValue) <= 0)) {
+            if (<OptionalNot>(DecimalUtil.compare(leftValue, vector[i]) <= 0 && DecimalUtil.compare(vector[i], rightValue) <= 0)) {
              sel[newSize++] = i;
             }
           }
@@ -129,7 +131,7 @@ public class <ClassName> extends VectorE
         int newSize = 0;
         for(int i = 0; i != n; i++) {
           if (!nullPos[i]) {
-            if (<OptionalNot>(leftValue.compareTo(vector[i]) <= 0 && vector[i].compareTo(rightValue) <= 0)) {
+            if (<OptionalNot>(DecimalUtil.compare(leftValue, vector[i]) <= 0 && DecimalUtil.compare(vector[i], rightValue) <= 0)) {
               sel[newSize++] = i;
             }
           }
@@ -152,30 +154,6 @@ public class <ClassName> extends VectorE
     return "boolean";
   }
 
-  public int getColNum() {
-    return colNum;
-  }
-
-  public void setColNum(int colNum) {
-    this.colNum = colNum;
-  }
-
-  public Decimal128 getLeftValue() {
-    return leftValue;
-  }
-
-  public void setLeftValue(Decimal128 value) {
-    this.leftValue = value;
-  }
-
-  public Decimal128 getRightValue() {
-    return rightValue;
-  }
-
-  public void setRightValue(Decimal128 value) {
-    this.leftValue = value;
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareColumn.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareColumn.txt?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareColumn.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareColumn.txt Fri Nov  7 20:41:34 2014
@@ -22,7 +22,7 @@ import org.apache.hadoop.hive.ql.exec.ve
 import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
-import org.apache.hadoop.hive.common.type.Decimal128;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 
 /**
  * Generated from template FilterDecimalColumnCompareColumn.txt, which covers binary comparison 
@@ -57,8 +57,8 @@ public class <ClassName> extends VectorE
     boolean[] nullPos1 = inputColVector1.isNull;
     boolean[] nullPos2 = inputColVector2.isNull;
     int n = batch.size;
-    Decimal128[] vector1 = inputColVector1.vector;
-    Decimal128[] vector2 = inputColVector2.vector;
+    HiveDecimalWritable[] vector1 = inputColVector1.vector;
+    HiveDecimalWritable[] vector2 = inputColVector2.vector;
 
     // return immediately if batch is empty
     if (n == 0) {
@@ -428,22 +428,6 @@ public class <ClassName> extends VectorE
   public int getOutputColumn() {
     return -1;
   }
-  
-  public int getColNum1() {
-    return colNum1;
-  }
-
-  public void setColNum1(int colNum1) {
-    this.colNum1 = colNum1;
-  }
-
-  public int getColNum2() {
-    return colNum2;
-  }
-
-  public void setColNum2(int colNum2) {
-    this.colNum2 = colNum2;
-  }
 
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareScalar.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareScalar.txt?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareScalar.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareScalar.txt Fri Nov  7 20:41:34 2014
@@ -20,9 +20,11 @@ package org.apache.hadoop.hive.ql.exec.v
 
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
-import org.apache.hadoop.hive.common.type.Decimal128;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
 
 /**
  * This is a generated class to evaluate a <OperatorSymbol> comparison on a vector of decimal
@@ -33,9 +35,9 @@ public class <ClassName> extends VectorE
   private static final long serialVersionUID = 1L;
 
   private int colNum;
-  private Decimal128 value;
+  private HiveDecimal value;
 
-  public <ClassName>(int colNum, Decimal128 value) {
+  public <ClassName>(int colNum, HiveDecimal value) {
     this.colNum = colNum;
     this.value = value;
   }
@@ -52,7 +54,7 @@ public class <ClassName> extends VectorE
     int[] sel = batch.selected;
     boolean[] nullPos = inputColVector.isNull;
     int n = batch.size;
-    Decimal128[] vector = inputColVector.vector;
+    HiveDecimalWritable[] vector = inputColVector.vector;
 
     // return immediately if batch is empty
     if (n == 0) {
@@ -63,7 +65,7 @@ public class <ClassName> extends VectorE
       if (inputColVector.isRepeating) {
 
         // All must be selected otherwise size would be zero. Repeating property will not change.
-        if (!(vector[0].compareTo(value) <OperatorSymbol> 0)) {
+        if (!(DecimalUtil.compare(vector[0], value) <OperatorSymbol> 0)) {
 
           // Entire batch is filtered out.
           batch.size = 0;
@@ -72,7 +74,7 @@ public class <ClassName> extends VectorE
         int newSize = 0;
         for(int j = 0; j != n; j++) {
           int i = sel[j];
-          if (vector[i].compareTo(value) <OperatorSymbol> 0) {
+          if (DecimalUtil.compare(vector[i], value) <OperatorSymbol> 0) {
             sel[newSize++] = i;
           }
         }
@@ -80,7 +82,7 @@ public class <ClassName> extends VectorE
       } else {
         int newSize = 0;
         for(int i = 0; i != n; i++) {
-          if (vector[i].compareTo(value) <OperatorSymbol> 0) {
+          if (DecimalUtil.compare(vector[i], value) <OperatorSymbol> 0) {
             sel[newSize++] = i;
           }
         }
@@ -94,7 +96,7 @@ public class <ClassName> extends VectorE
 
         // All must be selected otherwise size would be zero. Repeating property will not change.
         if (!nullPos[0]) {
-          if (!(vector[0].compareTo(value) <OperatorSymbol> 0)) {
+          if (!(DecimalUtil.compare(vector[0], value) <OperatorSymbol> 0)) {
 
             // Entire batch is filtered out.
             batch.size = 0;
@@ -107,7 +109,7 @@ public class <ClassName> extends VectorE
         for(int j = 0; j != n; j++) {
           int i = sel[j];
           if (!nullPos[i]) {
-           if (vector[i].compareTo(value) <OperatorSymbol> 0) {
+           if (DecimalUtil.compare(vector[i], value) <OperatorSymbol> 0) {
              sel[newSize++] = i;
            }
           }
@@ -119,7 +121,7 @@ public class <ClassName> extends VectorE
         int newSize = 0;
         for(int i = 0; i != n; i++) {
           if (!nullPos[i]) {
-            if (vector[i].compareTo(value) <OperatorSymbol> 0) {
+            if (DecimalUtil.compare(vector[i], value) <OperatorSymbol> 0) {
               sel[newSize++] = i;
             }
           }
@@ -142,22 +144,6 @@ public class <ClassName> extends VectorE
     return "boolean";
   }
 
-  public int getColNum() {
-    return colNum;
-  }
-
-  public void setColNum(int colNum) {
-    this.colNum = colNum;
-  }
-
-  public Decimal128 getValue() {
-    return value;
-  }
-
-  public void setValue(Decimal128 value) {
-    this.value = value;
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareColumn.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareColumn.txt?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareColumn.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareColumn.txt Fri Nov  7 20:41:34 2014
@@ -20,9 +20,11 @@ package org.apache.hadoop.hive.ql.exec.v
 
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
-import org.apache.hadoop.hive.common.type.Decimal128;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
 
 /**
  * This is a generated class to evaluate a <OperatorSymbol> comparison on a vector of decimal
@@ -33,9 +35,9 @@ public class <ClassName> extends VectorE
   private static final long serialVersionUID = 1L;
 
   private int colNum;
-  private Decimal128 value;
+  private HiveDecimal value;
 
-  public <ClassName>(Decimal128 value, int colNum) {
+  public <ClassName>(HiveDecimal value, int colNum) {
     this.colNum = colNum;
     this.value = value;
   }
@@ -52,7 +54,7 @@ public class <ClassName> extends VectorE
     int[] sel = batch.selected;
     boolean[] nullPos = inputColVector.isNull;
     int n = batch.size;
-    Decimal128[] vector = inputColVector.vector;
+    HiveDecimalWritable[] vector = inputColVector.vector;
 
     // return immediately if batch is empty
     if (n == 0) {
@@ -63,7 +65,7 @@ public class <ClassName> extends VectorE
       if (inputColVector.isRepeating) {
 
         // All must be selected otherwise size would be zero. Repeating property will not change.
-        if (!(value.compareTo(vector[0]) <OperatorSymbol> 0)) {
+        if (!(DecimalUtil.compare(value, vector[0]) <OperatorSymbol> 0)) {
 
           // Entire batch is filtered out.
           batch.size = 0;
@@ -72,7 +74,7 @@ public class <ClassName> extends VectorE
         int newSize = 0;
         for(int j = 0; j != n; j++) {
           int i = sel[j];
-          if (value.compareTo(vector[i]) <OperatorSymbol> 0) {
+          if (DecimalUtil.compare(value, vector[i]) <OperatorSymbol> 0) {
             sel[newSize++] = i;
           }
         }
@@ -80,7 +82,7 @@ public class <ClassName> extends VectorE
       } else {
         int newSize = 0;
         for(int i = 0; i != n; i++) {
-          if (value.compareTo(vector[i]) <OperatorSymbol> 0) {
+          if (DecimalUtil.compare(value, vector[i]) <OperatorSymbol> 0) {
             sel[newSize++] = i;
           }
         }
@@ -94,7 +96,7 @@ public class <ClassName> extends VectorE
 
         // All must be selected otherwise size would be zero. Repeating property will not change.
         if (!nullPos[0]) {
-          if (!(value.compareTo(vector[0]) <OperatorSymbol> 0)) {
+          if (!(DecimalUtil.compare(value, vector[0]) <OperatorSymbol> 0)) {
 
             // Entire batch is filtered out.
             batch.size = 0;
@@ -107,7 +109,7 @@ public class <ClassName> extends VectorE
         for(int j = 0; j != n; j++) {
           int i = sel[j];
           if (!nullPos[i]) {
-           if (value.compareTo(vector[i]) <OperatorSymbol> 0) {
+           if (DecimalUtil.compare(value, vector[i]) <OperatorSymbol> 0) {
              sel[newSize++] = i;
            }
           }
@@ -119,7 +121,7 @@ public class <ClassName> extends VectorE
         int newSize = 0;
         for(int i = 0; i != n; i++) {
           if (!nullPos[i]) {
-            if (value.compareTo(vector[i]) <OperatorSymbol> 0) {
+            if (DecimalUtil.compare(value, vector[i]) <OperatorSymbol> 0) {
               sel[newSize++] = i;
             }
           }
@@ -142,22 +144,6 @@ public class <ClassName> extends VectorE
     return "boolean";
   }
 
-  public int getColNum() {
-    return colNum;
-  }
-
-  public void setColNum(int colNum) {
-    this.colNum = colNum;
-  }
-
-  public Decimal128 getValue() {
-    return value;
-  }
-
-  public void setValue(Decimal128 value) {
-    this.value = value;
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumnDecimal.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumnDecimal.txt?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumnDecimal.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumnDecimal.txt Fri Nov  7 20:41:34 2014
@@ -24,7 +24,8 @@ import org.apache.hadoop.hive.ql.exec.ve
 import org.apache.hadoop.hive.ql.exec.vector.expressions.NullUtil;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
-import org.apache.hadoop.hive.common.type.Decimal128;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
 
 /**
  * Generated from template ScalarArithmeticColumnDecimal.txt, which covers binary arithmetic 
@@ -35,10 +36,10 @@ public class <ClassName> extends VectorE
   private static final long serialVersionUID = 1L;
 
   private int colNum;
-  private Decimal128 value;
+  private HiveDecimal value;
   private int outputColumn;
 
-  public <ClassName>(Decimal128 value, int colNum, int outputColumn) {
+  public <ClassName>(HiveDecimal value, int colNum, int outputColumn) {
     this.colNum = colNum;
     this.value = value;
     this.outputColumn = outputColumn;
@@ -64,7 +65,7 @@ public class <ClassName> extends VectorE
     outputColVector.noNulls = inputColVector.noNulls;
     outputColVector.isRepeating = inputColVector.isRepeating;
     int n = batch.size;
-    Decimal128[] vector = inputColVector.vector;
+    HiveDecimalWritable[] vector = inputColVector.vector;
     
     // return immediately if batch is empty
     if (n == 0) {
@@ -126,26 +127,6 @@ public class <ClassName> extends VectorE
   public int getOutputColumn() {
     return outputColumn;
   }
-  
-  public int getColNum() {
-    return colNum;
-  }
-  
-  public void setColNum(int colNum) {
-    this.colNum = colNum;
-  }
-
-  public Decimal128 getValue() {
-    return value;
-  }
-
-  public void setValue(Decimal128 value) {
-    this.value = value;
-  }
-
-  public void setOutputColumn(int outputColumn) {
-    this.outputColumn = outputColumn;
-  }
 
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ScalarDivideColumnDecimal.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ScalarDivideColumnDecimal.txt?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ScalarDivideColumnDecimal.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ScalarDivideColumnDecimal.txt Fri Nov  7 20:41:34 2014
@@ -24,7 +24,8 @@ import org.apache.hadoop.hive.ql.exec.ve
 import org.apache.hadoop.hive.ql.exec.vector.expressions.NullUtil;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
-import org.apache.hadoop.hive.common.type.Decimal128;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
 
 /**
  * Generated from template ScalarDivideColumnDecimal.txt, which covers binary arithmetic
@@ -35,10 +36,10 @@ public class <ClassName> extends VectorE
   private static final long serialVersionUID = 1L;
 
   private int colNum;
-  private Decimal128 value;
+  private HiveDecimal value;
   private int outputColumn;
 
-  public <ClassName>(Decimal128 value, int colNum, int outputColumn) {
+  public <ClassName>(HiveDecimal value, int colNum, int outputColumn) {
     this.colNum = colNum;
     this.value = value;
     this.outputColumn = outputColumn;
@@ -64,8 +65,8 @@ public class <ClassName> extends VectorE
     outputColVector.noNulls = inputColVector.noNulls;
     outputColVector.isRepeating = inputColVector.isRepeating;
     int n = batch.size;
-    Decimal128[] vector = inputColVector.vector;
-    Decimal128[] outputVector = outputColVector.vector;
+    HiveDecimalWritable[] vector = inputColVector.vector;
+    HiveDecimalWritable[] outputVector = outputColVector.vector;
 
     // return immediately if batch is empty
     if (n == 0) {
@@ -129,26 +130,6 @@ public class <ClassName> extends VectorE
     return outputColumn;
   }
 
-  public int getColNum() {
-    return colNum;
-  }
-
-  public void setColNum(int colNum) {
-    this.colNum = colNum;
-  }
-
-  public Decimal128 getValue() {
-    return value;
-  }
-
-  public void setValue(Decimal128 value) {
-    this.value = value;
-  }
-
-  public void setOutputColumn(int outputColumn) {
-    this.outputColumn = outputColumn;
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())

Modified: hive/branches/spark/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt Fri Nov  7 20:41:34 2014
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.gen;
 
-import org.apache.hadoop.hive.common.type.Decimal128;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
@@ -31,6 +30,7 @@ import org.apache.hadoop.hive.ql.exec.ve
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.util.JavaDataModel;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 
 /**
@@ -49,7 +49,7 @@ public class <ClassName> extends VectorA
 
       private static final long serialVersionUID = 1L;
 
-      transient private final Decimal128 value;
+      transient private final HiveDecimalWritable value;
 
       /**
       * Value is explicitly (re)initialized in reset()
@@ -57,15 +57,16 @@ public class <ClassName> extends VectorA
       transient private boolean isNull = true;
 
       public Aggregation() {
-        value = new Decimal128();
+        value = new HiveDecimalWritable();
       }
 
-      public void checkValue(Decimal128 value, short scale) {
+      public void checkValue(HiveDecimalWritable writable, short scale) {
+        HiveDecimal value = writable.getHiveDecimal();
         if (isNull) {
           isNull = false;
-          this.value.update(value);
-        } else if (this.value.compareTo(value) <OperatorSymbol> 0) {
-          this.value.update(value, scale);
+          this.value.set(value);
+        } else if (this.value.getHiveDecimal().compareTo(value) <OperatorSymbol> 0) {
+          this.value.set(value);
         }
       }
 
@@ -77,7 +78,7 @@ public class <ClassName> extends VectorA
       @Override
       public void reset () {
         isNull = true;
-        value.zeroClear();
+        value.set(HiveDecimal.ZERO);
       }
     }
 
@@ -124,7 +125,7 @@ public class <ClassName> extends VectorA
 
       DecimalColumnVector inputVector = (DecimalColumnVector)batch.
         cols[this.inputExpression.getOutputColumn()];
-      Decimal128[] vector = inputVector.vector;
+      HiveDecimalWritable[] vector = inputVector.vector;
 
       if (inputVector.noNulls) {
         if (inputVector.isRepeating) {
@@ -170,7 +171,7 @@ public class <ClassName> extends VectorA
     private void iterateNoNullsRepeatingWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregrateIndex,
-      Decimal128 value,
+      HiveDecimalWritable value,
       short scale,
       int batchSize) {
 
@@ -186,7 +187,7 @@ public class <ClassName> extends VectorA
     private void iterateNoNullsSelectionWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregrateIndex,
-      Decimal128[] values,
+      HiveDecimalWritable[] values,
       short scale,
       int[] selection,
       int batchSize) {
@@ -203,7 +204,7 @@ public class <ClassName> extends VectorA
     private void iterateNoNullsWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregrateIndex,
-      Decimal128[] values,
+      HiveDecimalWritable[] values,
       short scale,
       int batchSize) {
       for (int i=0; i < batchSize; ++i) {
@@ -218,7 +219,7 @@ public class <ClassName> extends VectorA
     private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregrateIndex,
-      Decimal128 value,
+      HiveDecimalWritable value,
       short scale,
       int batchSize,
       int[] selection,
@@ -239,7 +240,7 @@ public class <ClassName> extends VectorA
     private void iterateHasNullsRepeatingWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregrateIndex,
-      Decimal128 value,
+      HiveDecimalWritable value,
       short scale,
       int batchSize,
       boolean[] isNull) {
@@ -258,7 +259,7 @@ public class <ClassName> extends VectorA
     private void iterateHasNullsSelectionWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregrateIndex,
-      Decimal128[] values,
+      HiveDecimalWritable[] values,
       short scale,
       int batchSize,
       int[] selection,
@@ -279,7 +280,7 @@ public class <ClassName> extends VectorA
     private void iterateHasNullsWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregrateIndex,
-      Decimal128[] values,
+      HiveDecimalWritable[] values,
       short scale,
       int batchSize,
       boolean[] isNull) {
@@ -312,13 +313,14 @@ public class <ClassName> extends VectorA
 
         Aggregation myagg = (Aggregation)agg;
 
-        Decimal128[] vector = inputVector.vector;
+        HiveDecimalWritable[] vector = inputVector.vector;
 
         if (inputVector.isRepeating) {
           if (inputVector.noNulls &&
             (myagg.isNull || (myagg.value.compareTo(vector[0]) <OperatorSymbol> 0))) {
             myagg.isNull = false;
-            myagg.value.update(vector[0], inputVector.scale);
+            HiveDecimal value = vector[0].getHiveDecimal();
+            myagg.value.set(value);
           }
           return;
         }
@@ -341,7 +343,7 @@ public class <ClassName> extends VectorA
 
     private void iterateSelectionHasNulls(
         Aggregation myagg,
-        Decimal128[] vector,
+        HiveDecimalWritable[] vector,
         short scale,
         int batchSize,
         boolean[] isNull,
@@ -350,13 +352,13 @@ public class <ClassName> extends VectorA
       for (int j=0; j< batchSize; ++j) {
         int i = selected[j];
         if (!isNull[i]) {
-          Decimal128 value = vector[i];
+          HiveDecimal value = vector[i].getHiveDecimal();
           if (myagg.isNull) {
             myagg.isNull = false;
-            myagg.value.update(value);
+            myagg.value.set(value);
           }
-          else if (myagg.value.compareTo(value) <OperatorSymbol> 0) {
-            myagg.value.update(value, scale);
+          else if (myagg.value.getHiveDecimal().compareTo(value) <OperatorSymbol> 0) {
+            myagg.value.set(value);
           }
         }
       }
@@ -364,40 +366,41 @@ public class <ClassName> extends VectorA
 
     private void iterateSelectionNoNulls(
         Aggregation myagg,
-        Decimal128[] vector,
+        HiveDecimalWritable[] vector,
         short scale,
         int batchSize,
         int[] selected) {
 
       if (myagg.isNull) {
-        myagg.value.update(vector[selected[0]]);
+        HiveDecimal value = vector[selected[0]].getHiveDecimal();
+        myagg.value.set(value);
         myagg.isNull = false;
       }
 
       for (int i=0; i< batchSize; ++i) {
-        Decimal128 value = vector[selected[i]];
-        if (myagg.value.compareTo(value) <OperatorSymbol> 0) {
-          myagg.value.update(value, scale);
+        HiveDecimal value = vector[selected[i]].getHiveDecimal();
+        if (myagg.value.getHiveDecimal().compareTo(value) <OperatorSymbol> 0) {
+          myagg.value.set(value);
         }
       }
     }
 
     private void iterateNoSelectionHasNulls(
         Aggregation myagg,
-        Decimal128[] vector,
+        HiveDecimalWritable[] vector,
         short scale,
         int batchSize,
         boolean[] isNull) {
 
       for(int i=0;i<batchSize;++i) {
         if (!isNull[i]) {
-          Decimal128 value = vector[i];
+          HiveDecimal value = vector[i].getHiveDecimal();
           if (myagg.isNull) {
-            myagg.value.update(value, scale);
+            myagg.value.set(value);
             myagg.isNull = false;
           }
-          else if (myagg.value.compareTo(value) <OperatorSymbol> 0) {
-            myagg.value.update(value, scale);
+          else if (myagg.value.getHiveDecimal().compareTo(value) <OperatorSymbol> 0) {
+            myagg.value.set(value);
           }
         }
       }
@@ -405,18 +408,19 @@ public class <ClassName> extends VectorA
 
     private void iterateNoSelectionNoNulls(
         Aggregation myagg,
-        Decimal128[] vector,
+        HiveDecimalWritable[] vector,
         short scale,
         int batchSize) {
       if (myagg.isNull) {
-        myagg.value.update(vector[0]);
+        HiveDecimal value = vector[0].getHiveDecimal();
+        myagg.value.set(value);
         myagg.isNull = false;
       }
 
       for (int i=0;i<batchSize;++i) {
-        Decimal128 value = vector[i];
-        if (myagg.value.compareTo(value) <OperatorSymbol> 0) {
-          myagg.value.update(value, scale);
+        HiveDecimal value = vector[i].getHiveDecimal();
+        if (myagg.value.getHiveDecimal().compareTo(value) <OperatorSymbol> 0) {
+          myagg.value.set(value);
         }
       }
     }