You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2014/08/22 23:37:21 UTC

svn commit: r1619936 [13/49] - in /hive/branches/cbo: ./ accumulo-handler/ ant/src/org/apache/hadoop/hive/ant/ bin/ common/src/java/org/apache/hadoop/hive/ant/ common/src/java/org/apache/hadoop/hive/common/type/ common/src/java/org/apache/hadoop/hive/c...

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java Fri Aug 22 21:36:47 2014
@@ -43,44 +43,40 @@ public class RetryingHMSHandler implemen
   private final IHMSHandler base;
   private final MetaStoreInit.MetaStoreInitData metaStoreInitData =
     new MetaStoreInit.MetaStoreInitData();
-  private final HiveConf hiveConf;
 
-  protected RetryingHMSHandler(final HiveConf hiveConf, final String name) throws MetaException {
+  private final HiveConf hiveConf;            // base configuration
+  private final Configuration configuration;  // active configuration
+
+  private RetryingHMSHandler(HiveConf hiveConf, String name, boolean local) throws MetaException {
     this.hiveConf = hiveConf;
+    this.base = new HiveMetaStore.HMSHandler(name, hiveConf, false);
+    if (local) {
+      base.setConf(hiveConf); // tests expect configuration changes applied directly to metastore
+    }
+    configuration = base.getConf();
 
     // This has to be called before initializing the instance of HMSHandler
-    init();
+    // Using the hook on startup ensures that the hook always has priority
+    // over settings in *.xml.  The thread local conf needs to be used because at this point
+    // it has already been initialized using hiveConf.
+    MetaStoreInit.updateConnectionURL(hiveConf, getConf(), null, metaStoreInitData);
 
-    this.base = new HiveMetaStore.HMSHandler(name, hiveConf);
+    base.init();
   }
 
-  public static IHMSHandler getProxy(HiveConf hiveConf, String name) throws MetaException {
+  public static IHMSHandler getProxy(HiveConf hiveConf, String name, boolean local)
+      throws MetaException {
 
-    RetryingHMSHandler handler = new RetryingHMSHandler(hiveConf, name);
+    RetryingHMSHandler handler = new RetryingHMSHandler(hiveConf, name, local);
 
     return (IHMSHandler) Proxy.newProxyInstance(
       RetryingHMSHandler.class.getClassLoader(),
       new Class[] { IHMSHandler.class }, handler);
   }
 
-  private void init() throws MetaException {
-     // Using the hook on startup ensures that the hook always has priority
-     // over settings in *.xml.  The thread local conf needs to be used because at this point
-     // it has already been initialized using hiveConf.
-    MetaStoreInit.updateConnectionURL(hiveConf, getConf(), null, metaStoreInitData);
-
-  }
-
-  private void initMS() {
-    base.setConf(getConf());
-  }
-
-
   @Override
   public Object invoke(final Object proxy, final Method method, final Object[] args) throws Throwable {
 
-    Object ret = null;
-
     boolean gotNewConnectUrl = false;
     boolean reloadConf = HiveConf.getBoolVar(hiveConf,
         HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF);
@@ -95,15 +91,14 @@ public class RetryingHMSHandler implemen
     }
 
     int retryCount = 0;
-    // Exception caughtException = null;
     Throwable caughtException = null;
     while (true) {
       try {
         if (reloadConf || gotNewConnectUrl) {
-          initMS();
+          base.setConf(getConf());
         }
-        ret = method.invoke(base, args);
-        break;
+        return method.invoke(base, args);
+
       } catch (javax.jdo.JDOException e) {
         caughtException = e;
       } catch (UndeclaredThrowableException e) {
@@ -166,10 +161,9 @@ public class RetryingHMSHandler implemen
       gotNewConnectUrl = MetaStoreInit.updateConnectionURL(hiveConf, getConf(),
         lastUrl, metaStoreInitData);
     }
-    return ret;
   }
 
   public Configuration getConf() {
-    return hiveConf;
+    return configuration;
   }
 }

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java Fri Aug 22 21:36:47 2014
@@ -64,38 +64,54 @@ public class StatObjectConverter {
 
      if (statsObj.getStatsData().isSetBooleanStats()) {
        BooleanColumnStatsData boolStats = statsObj.getStatsData().getBooleanStats();
-       mColStats.setBooleanStats(boolStats.getNumTrues(), boolStats.getNumFalses(),
-           boolStats.getNumNulls());
+       mColStats.setBooleanStats(
+           boolStats.isSetNumTrues() ? boolStats.getNumTrues() : null, 
+           boolStats.isSetNumFalses() ? boolStats.getNumFalses() : null,
+           boolStats.isSetNumNulls() ? boolStats.getNumNulls() : null);
      } else if (statsObj.getStatsData().isSetLongStats()) {
        LongColumnStatsData longStats = statsObj.getStatsData().getLongStats();
-       mColStats.setLongStats(longStats.getNumNulls(), longStats.getNumDVs(),
+       mColStats.setLongStats(
+           longStats.isSetNumNulls() ? longStats.getNumNulls() : null, 
+           longStats.isSetNumDVs() ? longStats.getNumDVs() : null,
            longStats.isSetLowValue() ? longStats.getLowValue() : null,
            longStats.isSetHighValue() ? longStats.getHighValue() : null);
      } else if (statsObj.getStatsData().isSetDoubleStats()) {
        DoubleColumnStatsData doubleStats = statsObj.getStatsData().getDoubleStats();
-       mColStats.setDoubleStats(doubleStats.getNumNulls(), doubleStats.getNumDVs(),
+       mColStats.setDoubleStats(
+           doubleStats.isSetNumNulls() ? doubleStats.getNumNulls() : null, 
+           doubleStats.isSetNumDVs() ? doubleStats.getNumDVs() : null,
            doubleStats.isSetLowValue() ? doubleStats.getLowValue() : null,
            doubleStats.isSetHighValue() ? doubleStats.getHighValue() : null);
      } else if (statsObj.getStatsData().isSetDecimalStats()) {
        DecimalColumnStatsData decimalStats = statsObj.getStatsData().getDecimalStats();
        String low = decimalStats.isSetLowValue() ? createJdoDecimalString(decimalStats.getLowValue()) : null;
        String high = decimalStats.isSetHighValue() ? createJdoDecimalString(decimalStats.getHighValue()) : null;
-       mColStats.setDecimalStats(decimalStats.getNumNulls(), decimalStats.getNumDVs(), low, high);
+       mColStats.setDecimalStats(
+           decimalStats.isSetNumNulls() ? decimalStats.getNumNulls() : null, 
+           decimalStats.isSetNumDVs() ? decimalStats.getNumDVs() : null, 
+               low, high);
      } else if (statsObj.getStatsData().isSetStringStats()) {
        StringColumnStatsData stringStats = statsObj.getStatsData().getStringStats();
-       mColStats.setStringStats(stringStats.getNumNulls(), stringStats.getNumDVs(),
-         stringStats.getMaxColLen(), stringStats.getAvgColLen());
+       mColStats.setStringStats(
+           stringStats.isSetNumNulls() ? stringStats.getNumNulls() : null, 
+           stringStats.isSetNumDVs() ? stringStats.getNumDVs() : null,
+           stringStats.isSetMaxColLen() ? stringStats.getMaxColLen() : null, 
+           stringStats.isSetAvgColLen() ? stringStats.getAvgColLen() : null);
      } else if (statsObj.getStatsData().isSetBinaryStats()) {
        BinaryColumnStatsData binaryStats = statsObj.getStatsData().getBinaryStats();
-       mColStats.setBinaryStats(binaryStats.getNumNulls(), binaryStats.getMaxColLen(),
-         binaryStats.getAvgColLen());
+       mColStats.setBinaryStats(
+           binaryStats.isSetNumNulls() ? binaryStats.getNumNulls() : null, 
+           binaryStats.isSetMaxColLen() ? binaryStats.getMaxColLen() : null,
+           binaryStats.isSetAvgColLen() ? binaryStats.getAvgColLen() : null);
      }
      return mColStats;
   }
 
   public static void setFieldsIntoOldStats(
       MTableColumnStatistics mStatsObj, MTableColumnStatistics oldStatsObj) {
-    oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen());
+	  if (mStatsObj.getAvgColLen() != null) {
+	    oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen());
+	  }
     if (mStatsObj.getLongHighValue() != null) {
       oldStatsObj.setLongHighValue(mStatsObj.getLongHighValue());
     }
@@ -114,29 +130,63 @@ public class StatObjectConverter {
     if (mStatsObj.getDecimalHighValue() != null) {
       oldStatsObj.setDecimalHighValue(mStatsObj.getDecimalHighValue());
     }
-    oldStatsObj.setMaxColLen(mStatsObj.getMaxColLen());
-    oldStatsObj.setNumDVs(mStatsObj.getNumDVs());
-    oldStatsObj.setNumFalses(mStatsObj.getNumFalses());
-    oldStatsObj.setNumTrues(mStatsObj.getNumTrues());
-    oldStatsObj.setNumNulls(mStatsObj.getNumNulls());
+    if (mStatsObj.getMaxColLen() != null) {
+  	  oldStatsObj.setMaxColLen(mStatsObj.getMaxColLen());
+    }
+    if (mStatsObj.getNumDVs() != null) {
+  	  oldStatsObj.setNumDVs(mStatsObj.getNumDVs());
+    }
+    if (mStatsObj.getNumFalses() != null) {
+  	  oldStatsObj.setNumFalses(mStatsObj.getNumFalses());
+    }
+    if (mStatsObj.getNumTrues() != null) {
+  	  oldStatsObj.setNumTrues(mStatsObj.getNumTrues());
+    }
+    if (mStatsObj.getNumNulls() != null) {
+  	  oldStatsObj.setNumNulls(mStatsObj.getNumNulls());
+    }
     oldStatsObj.setLastAnalyzed(mStatsObj.getLastAnalyzed());
   }
 
   public static void setFieldsIntoOldStats(
       MPartitionColumnStatistics mStatsObj, MPartitionColumnStatistics oldStatsObj) {
-    oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen());
-    oldStatsObj.setLongHighValue(mStatsObj.getLongHighValue());
-    oldStatsObj.setDoubleHighValue(mStatsObj.getDoubleHighValue());
+    if (mStatsObj.getAvgColLen() != null) {
+          oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen());
+	  }
+    if (mStatsObj.getLongHighValue() != null) {
+		  oldStatsObj.setLongHighValue(mStatsObj.getLongHighValue());
+	  }
+	  if (mStatsObj.getDoubleHighValue() != null) {
+		  oldStatsObj.setDoubleHighValue(mStatsObj.getDoubleHighValue());
+	  }
     oldStatsObj.setLastAnalyzed(mStatsObj.getLastAnalyzed());
-    oldStatsObj.setLongLowValue(mStatsObj.getLongLowValue());
-    oldStatsObj.setDoubleLowValue(mStatsObj.getDoubleLowValue());
-    oldStatsObj.setDecimalLowValue(mStatsObj.getDecimalLowValue());
-    oldStatsObj.setDecimalHighValue(mStatsObj.getDecimalHighValue());
-    oldStatsObj.setMaxColLen(mStatsObj.getMaxColLen());
-    oldStatsObj.setNumDVs(mStatsObj.getNumDVs());
-    oldStatsObj.setNumFalses(mStatsObj.getNumFalses());
-    oldStatsObj.setNumTrues(mStatsObj.getNumTrues());
-    oldStatsObj.setNumNulls(mStatsObj.getNumNulls());
+    if (mStatsObj.getLongLowValue() != null) {
+      oldStatsObj.setLongLowValue(mStatsObj.getLongLowValue());
+    }
+    if (mStatsObj.getDoubleLowValue() != null) {
+      oldStatsObj.setDoubleLowValue(mStatsObj.getDoubleLowValue());
+    }
+    if (mStatsObj.getDecimalLowValue() != null) {
+      oldStatsObj.setDecimalLowValue(mStatsObj.getDecimalLowValue());
+    }
+    if (mStatsObj.getDecimalHighValue() != null) {
+      oldStatsObj.setDecimalHighValue(mStatsObj.getDecimalHighValue());
+    }
+    if (mStatsObj.getMaxColLen() != null) {
+      oldStatsObj.setMaxColLen(mStatsObj.getMaxColLen());
+    }
+    if (mStatsObj.getNumDVs() != null) {
+      oldStatsObj.setNumDVs(mStatsObj.getNumDVs());
+    }
+    if (mStatsObj.getNumFalses() != null) {
+      oldStatsObj.setNumFalses(mStatsObj.getNumFalses());
+    }
+    if (mStatsObj.getNumTrues() != null) {
+      oldStatsObj.setNumTrues(mStatsObj.getNumTrues());
+    }
+    if (mStatsObj.getNumNulls() != null) {
+      oldStatsObj.setNumNulls(mStatsObj.getNumNulls());
+    }
   }
 
   public static ColumnStatisticsObj getTableColumnStatisticsObj(
@@ -241,31 +291,45 @@ public class StatObjectConverter {
 
     if (statsObj.getStatsData().isSetBooleanStats()) {
       BooleanColumnStatsData boolStats = statsObj.getStatsData().getBooleanStats();
-      mColStats.setBooleanStats(boolStats.getNumTrues(), boolStats.getNumFalses(),
-          boolStats.getNumNulls());
+      mColStats.setBooleanStats(
+          boolStats.isSetNumTrues() ? boolStats.getNumTrues() : null, 
+          boolStats.isSetNumFalses() ? boolStats.getNumFalses() : null,
+          boolStats.isSetNumNulls() ? boolStats.getNumNulls() : null);
     } else if (statsObj.getStatsData().isSetLongStats()) {
       LongColumnStatsData longStats = statsObj.getStatsData().getLongStats();
-      mColStats.setLongStats(longStats.getNumNulls(), longStats.getNumDVs(),
+      mColStats.setLongStats(
+          longStats.isSetNumNulls() ? longStats.getNumNulls() : null, 
+          longStats.isSetNumDVs() ? longStats.getNumDVs() : null,
           longStats.isSetLowValue() ? longStats.getLowValue() : null,
           longStats.isSetHighValue() ? longStats.getHighValue() : null);
     } else if (statsObj.getStatsData().isSetDoubleStats()) {
       DoubleColumnStatsData doubleStats = statsObj.getStatsData().getDoubleStats();
-      mColStats.setDoubleStats(doubleStats.getNumNulls(), doubleStats.getNumDVs(),
+      mColStats.setDoubleStats(
+          doubleStats.isSetNumNulls() ? doubleStats.getNumNulls() : null, 
+          doubleStats.isSetNumDVs() ? doubleStats.getNumDVs() : null,
           doubleStats.isSetLowValue() ? doubleStats.getLowValue() : null,
           doubleStats.isSetHighValue() ? doubleStats.getHighValue() : null);
     } else if (statsObj.getStatsData().isSetDecimalStats()) {
       DecimalColumnStatsData decimalStats = statsObj.getStatsData().getDecimalStats();
       String low = decimalStats.isSetLowValue() ? createJdoDecimalString(decimalStats.getLowValue()) : null;
       String high = decimalStats.isSetHighValue() ? createJdoDecimalString(decimalStats.getHighValue()) : null;
-      mColStats.setDecimalStats(decimalStats.getNumNulls(), decimalStats.getNumDVs(), low, high);
+      mColStats.setDecimalStats(
+          decimalStats.isSetNumNulls() ? decimalStats.getNumNulls() : null, 
+          decimalStats.isSetNumDVs() ? decimalStats.getNumDVs() : null, 
+              low, high);
     } else if (statsObj.getStatsData().isSetStringStats()) {
       StringColumnStatsData stringStats = statsObj.getStatsData().getStringStats();
-      mColStats.setStringStats(stringStats.getNumNulls(), stringStats.getNumDVs(),
-        stringStats.getMaxColLen(), stringStats.getAvgColLen());
+      mColStats.setStringStats(
+          stringStats.isSetNumNulls() ? stringStats.getNumNulls() : null, 
+          stringStats.isSetNumDVs() ? stringStats.getNumDVs() : null,
+          stringStats.isSetMaxColLen() ? stringStats.getMaxColLen() : null, 
+          stringStats.isSetAvgColLen() ? stringStats.getAvgColLen() : null);
     } else if (statsObj.getStatsData().isSetBinaryStats()) {
       BinaryColumnStatsData binaryStats = statsObj.getStatsData().getBinaryStats();
-      mColStats.setBinaryStats(binaryStats.getNumNulls(), binaryStats.getMaxColLen(),
-        binaryStats.getAvgColLen());
+      mColStats.setBinaryStats(
+          binaryStats.isSetNumNulls() ? binaryStats.getNumNulls() : null, 
+          binaryStats.isSetMaxColLen() ? binaryStats.getMaxColLen() : null,
+          binaryStats.isSetAvgColLen() ? binaryStats.getAvgColLen() : null);
     }
     return mColStats;
   }

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java Fri Aug 22 21:36:47 2014
@@ -49,121 +49,135 @@ public class TxnDbUtil {
     // intended for creating derby databases, and thus will inexorably get
     // out of date with it.  I'm open to any suggestions on how to make this
     // read the file in a build friendly way.
-    Connection conn = getConnection();
-    Statement s = conn.createStatement();
-    s.execute("CREATE TABLE TXNS (" +
-        "  TXN_ID bigint PRIMARY KEY," +
-        "  TXN_STATE char(1) NOT NULL," +
-        "  TXN_STARTED bigint NOT NULL," +
-        "  TXN_LAST_HEARTBEAT bigint NOT NULL," +
-        "  TXN_USER varchar(128) NOT NULL," +
-        "  TXN_HOST varchar(128) NOT NULL)");
-
-        s.execute("CREATE TABLE TXN_COMPONENTS (" +
-        "  TC_TXNID bigint REFERENCES TXNS (TXN_ID)," +
-        "  TC_DATABASE varchar(128) NOT NULL," +
-        "  TC_TABLE varchar(128)," +
-        "  TC_PARTITION varchar(767))");
-    s.execute("CREATE TABLE COMPLETED_TXN_COMPONENTS (" +
-        "  CTC_TXNID bigint," +
-        "  CTC_DATABASE varchar(128) NOT NULL," +
-        "  CTC_TABLE varchar(128)," +
-        "  CTC_PARTITION varchar(767))");
-    s.execute("CREATE TABLE NEXT_TXN_ID (" +
-        "  NTXN_NEXT bigint NOT NULL)");
-    s.execute("INSERT INTO NEXT_TXN_ID VALUES(1)");
-    s.execute("CREATE TABLE HIVE_LOCKS (" +
-        " HL_LOCK_EXT_ID bigint NOT NULL," +
-        " HL_LOCK_INT_ID bigint NOT NULL," +
-        " HL_TXNID bigint," +
-        " HL_DB varchar(128) NOT NULL," +
-        " HL_TABLE varchar(128)," +
-        " HL_PARTITION varchar(767)," +
-        " HL_LOCK_STATE char(1) NOT NULL," +
-        " HL_LOCK_TYPE char(1) NOT NULL," +
-        " HL_LAST_HEARTBEAT bigint NOT NULL," +
-        " HL_ACQUIRED_AT bigint," +
-        " HL_USER varchar(128) NOT NULL," +
-        " HL_HOST varchar(128) NOT NULL," +
-        " PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))");
-    s.execute("CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID)");
-
-    s.execute("CREATE TABLE NEXT_LOCK_ID (" +
-        " NL_NEXT bigint NOT NULL)");
-    s.execute("INSERT INTO NEXT_LOCK_ID VALUES(1)");
-
-    s.execute("CREATE TABLE COMPACTION_QUEUE (" +
-        " CQ_ID bigint PRIMARY KEY," +
-        " CQ_DATABASE varchar(128) NOT NULL," +
-        " CQ_TABLE varchar(128) NOT NULL," +
-        " CQ_PARTITION varchar(767)," +
-        " CQ_STATE char(1) NOT NULL," +
-        " CQ_TYPE char(1) NOT NULL," +
-        " CQ_WORKER_ID varchar(128)," +
-        " CQ_START bigint," +
-        " CQ_RUN_AS varchar(128))");
-
-    s.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)");
-    s.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)");
-
-    conn.commit();
-    conn.close();
+    Connection conn = null;
+    boolean committed = false;
+    try {
+      conn = getConnection();
+      Statement s = conn.createStatement();
+      s.execute("CREATE TABLE TXNS (" +
+          "  TXN_ID bigint PRIMARY KEY," +
+          "  TXN_STATE char(1) NOT NULL," +
+          "  TXN_STARTED bigint NOT NULL," +
+          "  TXN_LAST_HEARTBEAT bigint NOT NULL," +
+          "  TXN_USER varchar(128) NOT NULL," +
+          "  TXN_HOST varchar(128) NOT NULL)");
+
+      s.execute("CREATE TABLE TXN_COMPONENTS (" +
+      "  TC_TXNID bigint REFERENCES TXNS (TXN_ID)," +
+      "  TC_DATABASE varchar(128) NOT NULL," +
+      "  TC_TABLE varchar(128)," +
+      "  TC_PARTITION varchar(767))");
+      s.execute("CREATE TABLE COMPLETED_TXN_COMPONENTS (" +
+          "  CTC_TXNID bigint," +
+          "  CTC_DATABASE varchar(128) NOT NULL," +
+          "  CTC_TABLE varchar(128)," +
+          "  CTC_PARTITION varchar(767))");
+      s.execute("CREATE TABLE NEXT_TXN_ID (" +
+          "  NTXN_NEXT bigint NOT NULL)");
+      s.execute("INSERT INTO NEXT_TXN_ID VALUES(1)");
+      s.execute("CREATE TABLE HIVE_LOCKS (" +
+          " HL_LOCK_EXT_ID bigint NOT NULL," +
+          " HL_LOCK_INT_ID bigint NOT NULL," +
+          " HL_TXNID bigint," +
+          " HL_DB varchar(128) NOT NULL," +
+          " HL_TABLE varchar(128)," +
+          " HL_PARTITION varchar(767)," +
+          " HL_LOCK_STATE char(1) NOT NULL," +
+          " HL_LOCK_TYPE char(1) NOT NULL," +
+          " HL_LAST_HEARTBEAT bigint NOT NULL," +
+          " HL_ACQUIRED_AT bigint," +
+          " HL_USER varchar(128) NOT NULL," +
+          " HL_HOST varchar(128) NOT NULL," +
+          " PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))");
+      s.execute("CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID)");
+
+      s.execute("CREATE TABLE NEXT_LOCK_ID (" +
+          " NL_NEXT bigint NOT NULL)");
+      s.execute("INSERT INTO NEXT_LOCK_ID VALUES(1)");
+
+      s.execute("CREATE TABLE COMPACTION_QUEUE (" +
+          " CQ_ID bigint PRIMARY KEY," +
+          " CQ_DATABASE varchar(128) NOT NULL," +
+          " CQ_TABLE varchar(128) NOT NULL," +
+          " CQ_PARTITION varchar(767)," +
+          " CQ_STATE char(1) NOT NULL," +
+          " CQ_TYPE char(1) NOT NULL," +
+          " CQ_WORKER_ID varchar(128)," +
+          " CQ_START bigint," +
+          " CQ_RUN_AS varchar(128))");
+
+      s.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)");
+      s.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)");
+
+      conn.commit();
+      committed = true;
+    } finally {
+      if (!committed) conn.rollback();
+      conn.close();
+    }
   }
 
   public static void cleanDb() throws  Exception {
-    Connection conn = getConnection();
-    Statement s = conn.createStatement();
-    // We want to try these, whether they succeed or fail.
-    try {
-      s.execute("DROP INDEX HL_TXNID_INDEX");
-    } catch (Exception e) {
-      System.err.println("Unable to drop index HL_TXNID_INDEX " +
-          e.getMessage());
-    }
-    try {
-      s.execute("DROP TABLE TXN_COMPONENTS");
-    } catch (Exception e) {
-      System.err.println("Unable to drop table TXN_COMPONENTS " +
-          e.getMessage());
-    }
-    try {
-      s.execute("DROP TABLE COMPLETED_TXN_COMPONENTS");
-    } catch (Exception e) {
-      System.err.println("Unable to drop table COMPLETED_TXN_COMPONENTS " +
-          e.getMessage());
-    }
-    try {
-      s.execute("DROP TABLE TXNS");
-    } catch (Exception e) {
-      System.err.println("Unable to drop table TXNS " +
-          e.getMessage());
-    }
+    Connection conn = null;
+    boolean committed = false;
     try {
-      s.execute("DROP TABLE NEXT_TXN_ID");
-    } catch (Exception e) {
-      System.err.println("Unable to drop table NEXT_TXN_ID " +
-          e.getMessage());
+      conn = getConnection();
+      Statement s = conn.createStatement();
+      // We want to try these, whether they succeed or fail.
+      try {
+        s.execute("DROP INDEX HL_TXNID_INDEX");
+      } catch (Exception e) {
+        System.err.println("Unable to drop index HL_TXNID_INDEX " +
+            e.getMessage());
+      }
+      try {
+        s.execute("DROP TABLE TXN_COMPONENTS");
+      } catch (Exception e) {
+        System.err.println("Unable to drop table TXN_COMPONENTS " +
+            e.getMessage());
+      }
+      try {
+        s.execute("DROP TABLE COMPLETED_TXN_COMPONENTS");
+      } catch (Exception e) {
+        System.err.println("Unable to drop table COMPLETED_TXN_COMPONENTS " +
+            e.getMessage());
+      }
+      try {
+        s.execute("DROP TABLE TXNS");
+      } catch (Exception e) {
+        System.err.println("Unable to drop table TXNS " +
+            e.getMessage());
+      }
+      try {
+        s.execute("DROP TABLE NEXT_TXN_ID");
+      } catch (Exception e) {
+        System.err.println("Unable to drop table NEXT_TXN_ID " +
+            e.getMessage());
+      }
+      try {
+        s.execute("DROP TABLE HIVE_LOCKS");
+      } catch (Exception e) {
+        System.err.println("Unable to drop table HIVE_LOCKS " +
+            e.getMessage());
+      }
+      try {
+        s.execute("DROP TABLE NEXT_LOCK_ID");
+      } catch (Exception e) {
+      }
+      try {
+        s.execute("DROP TABLE COMPACTION_QUEUE");
+      } catch (Exception e) {
+      }
+      try {
+        s.execute("DROP TABLE NEXT_COMPACTION_QUEUE_ID");
+      } catch (Exception e) {
+      }
+      conn.commit();
+      committed = true;
+    } finally {
+      if (!committed) conn.rollback();
+      conn.close();
     }
-    try {
-      s.execute("DROP TABLE HIVE_LOCKS");
-    } catch (Exception e) {
-      System.err.println("Unable to drop table HIVE_LOCKS " +
-          e.getMessage());
-    }
-    try {
-      s.execute("DROP TABLE NEXT_LOCK_ID");
-    } catch (Exception e) {
-    }
-    try {
-      s.execute("DROP TABLE COMPACTION_QUEUE");
-    } catch (Exception e) {
-    }
-    try {
-      s.execute("DROP TABLE NEXT_COMPACTION_QUEUE_ID");
-    } catch (Exception e) {
-    }
-    conn.commit();
-    conn.close();
   }
 
   /**
@@ -174,25 +188,34 @@ public class TxnDbUtil {
    */
   public static int countLockComponents(long lockId) throws  Exception {
     Connection conn = getConnection();
-    Statement s = conn.createStatement();
-    ResultSet rs = s.executeQuery("select count(*) from hive_locks where " +
-        "hl_lock_ext_id = " + lockId);
-    if (!rs.next()) return 0;
-    int rc = rs.getInt(1);
-    conn.rollback();
-    conn.close();
-    return rc;
+    try {
+      Statement s = conn.createStatement();
+      ResultSet rs = s.executeQuery("select count(*) from hive_locks where hl_lock_ext_id = " +
+          lockId);
+      if (!rs.next()) return 0;
+      int rc = rs.getInt(1);
+      return rc;
+    } finally {
+      conn.rollback();
+      conn.close();
+    }
   }
 
   public static int findNumCurrentLocks() throws Exception {
-    Connection conn = getConnection();
-    Statement s = conn.createStatement();
-    ResultSet rs = s.executeQuery("select count(*) from hive_locks");
-    if (!rs.next()) return 0;
-    int rc = rs.getInt(1);
-    conn.rollback();
-    conn.close();
-    return rc;
+    Connection conn = null;
+    try {
+      conn = getConnection();
+      Statement s = conn.createStatement();
+      ResultSet rs = s.executeQuery("select count(*) from hive_locks");
+      if (!rs.next()) return 0;
+      int rc = rs.getInt(1);
+      return rc;
+    } finally {
+      if (conn != null) {
+        conn.rollback();
+        conn.close();
+      }
+    }
   }
 
   private static Connection getConnection() throws Exception {

Modified: hive/branches/cbo/metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java (original)
+++ hive/branches/cbo/metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java Fri Aug 22 21:36:47 2014
@@ -46,12 +46,12 @@ public class MPartitionColumnStatistics 
   private Double doubleHighValue;
   private String decimalLowValue;
   private String decimalHighValue;
-  private long numNulls;
-  private long numDVs;
-  private double avgColLen;
-  private long maxColLen;
-  private long numTrues;
-  private long numFalses;
+  private Long numNulls;
+  private Long numDVs;
+  private Double avgColLen;
+  private Long maxColLen;
+  private Long numTrues;
+  private Long numFalses;
   private long lastAnalyzed;
 
   public MPartitionColumnStatistics() {}
@@ -72,7 +72,7 @@ public class MPartitionColumnStatistics 
     this.colName = colName;
   }
 
-  public long getNumNulls() {
+  public Long getNumNulls() {
     return numNulls;
   }
 
@@ -80,7 +80,7 @@ public class MPartitionColumnStatistics 
     this.numNulls = numNulls;
   }
 
-  public long getNumDVs() {
+  public Long getNumDVs() {
     return numDVs;
   }
 
@@ -88,7 +88,7 @@ public class MPartitionColumnStatistics 
     this.numDVs = numDVs;
   }
 
-  public double getAvgColLen() {
+  public Double getAvgColLen() {
     return avgColLen;
   }
 
@@ -96,7 +96,7 @@ public class MPartitionColumnStatistics 
     this.avgColLen = avgColLen;
   }
 
-  public long getMaxColLen() {
+  public Long getMaxColLen() {
     return maxColLen;
   }
 
@@ -104,7 +104,7 @@ public class MPartitionColumnStatistics 
     this.maxColLen = maxColLen;
   }
 
-  public long getNumTrues() {
+  public Long getNumTrues() {
     return numTrues;
   }
 
@@ -112,7 +112,7 @@ public class MPartitionColumnStatistics 
     this.numTrues = numTrues;
   }
 
-  public long getNumFalses() {
+  public Long getNumFalses() {
     return numFalses;
   }
 
@@ -160,20 +160,20 @@ public class MPartitionColumnStatistics 
     this.colType = colType;
   }
 
-  public void setBooleanStats(long numTrues, long numFalses, long numNulls) {
+  public void setBooleanStats(Long numTrues, Long numFalses, Long numNulls) {
     this.numTrues = numTrues;
     this.numFalses = numFalses;
     this.numNulls = numNulls;
   }
 
-  public void setLongStats(long numNulls, long numNDVs, Long lowValue, Long highValue) {
+  public void setLongStats(Long numNulls, Long numNDVs, Long lowValue, Long highValue) {
     this.numNulls = numNulls;
     this.numDVs = numNDVs;
     this.longLowValue = lowValue;
     this.longHighValue = highValue;
   }
 
-  public void setDoubleStats(long numNulls, long numNDVs, Double lowValue, Double highValue) {
+  public void setDoubleStats(Long numNulls, Long numNDVs, Double lowValue, Double highValue) {
     this.numNulls = numNulls;
     this.numDVs = numNDVs;
     this.doubleLowValue = lowValue;
@@ -181,21 +181,21 @@ public class MPartitionColumnStatistics 
   }
 
   public void setDecimalStats(
-      long numNulls, long numNDVs, String lowValue, String highValue) {
+      Long numNulls, Long numNDVs, String lowValue, String highValue) {
     this.numNulls = numNulls;
     this.numDVs = numNDVs;
     this.decimalLowValue = lowValue;
     this.decimalHighValue = highValue;
   }
 
-  public void setStringStats(long numNulls, long numNDVs, long maxColLen, double avgColLen) {
+  public void setStringStats(Long numNulls, Long numNDVs, Long maxColLen, Double avgColLen) {
     this.numNulls = numNulls;
     this.numDVs = numNDVs;
     this.maxColLen = maxColLen;
     this.avgColLen = avgColLen;
   }
 
-  public void setBinaryStats(long numNulls, long maxColLen, double avgColLen) {
+  public void setBinaryStats(Long numNulls, Long maxColLen, Double avgColLen) {
     this.numNulls = numNulls;
     this.maxColLen = maxColLen;
     this.avgColLen = avgColLen;

Modified: hive/branches/cbo/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java (original)
+++ hive/branches/cbo/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java Fri Aug 22 21:36:47 2014
@@ -44,12 +44,12 @@ public class MTableColumnStatistics {
   private Double doubleHighValue;
   private String decimalLowValue;
   private String decimalHighValue;
-  private long numNulls;
-  private long numDVs;
-  private double avgColLen;
-  private long maxColLen;
-  private long numTrues;
-  private long numFalses;
+  private Long numNulls;
+  private Long numDVs;
+  private Double avgColLen;
+  private Long maxColLen;
+  private Long numTrues;
+  private Long numFalses;
   private long lastAnalyzed;
 
   public MTableColumnStatistics() {}
@@ -86,7 +86,7 @@ public class MTableColumnStatistics {
     this.colType = colType;
   }
 
-  public long getNumNulls() {
+  public Long getNumNulls() {
     return numNulls;
   }
 
@@ -94,7 +94,7 @@ public class MTableColumnStatistics {
     this.numNulls = numNulls;
   }
 
-  public long getNumDVs() {
+  public Long getNumDVs() {
     return numDVs;
   }
 
@@ -102,7 +102,7 @@ public class MTableColumnStatistics {
     this.numDVs = numDVs;
   }
 
-  public double getAvgColLen() {
+  public Double getAvgColLen() {
     return avgColLen;
   }
 
@@ -110,7 +110,7 @@ public class MTableColumnStatistics {
     this.avgColLen = avgColLen;
   }
 
-  public long getMaxColLen() {
+  public Long getMaxColLen() {
     return maxColLen;
   }
 
@@ -118,7 +118,7 @@ public class MTableColumnStatistics {
     this.maxColLen = maxColLen;
   }
 
-  public long getNumTrues() {
+  public Long getNumTrues() {
     return numTrues;
   }
 
@@ -126,7 +126,7 @@ public class MTableColumnStatistics {
     this.numTrues = numTrues;
   }
 
-  public long getNumFalses() {
+  public Long getNumFalses() {
     return numFalses;
   }
 
@@ -150,20 +150,20 @@ public class MTableColumnStatistics {
     this.dbName = dbName;
   }
 
-  public void setBooleanStats(long numTrues, long numFalses, long numNulls) {
+  public void setBooleanStats(Long numTrues, Long numFalses, Long numNulls) {
     this.numTrues = numTrues;
     this.numFalses = numFalses;
     this.numNulls = numNulls;
   }
 
-  public void setLongStats(long numNulls, long numNDVs, Long lowValue, Long highValue) {
+  public void setLongStats(Long numNulls, Long numNDVs, Long lowValue, Long highValue) {
     this.numNulls = numNulls;
     this.numDVs = numNDVs;
     this.longLowValue = lowValue;
     this.longHighValue = highValue;
   }
 
-  public void setDoubleStats(long numNulls, long numNDVs, Double lowValue, Double highValue) {
+  public void setDoubleStats(Long numNulls, Long numNDVs, Double lowValue, Double highValue) {
     this.numNulls = numNulls;
     this.numDVs = numNDVs;
     this.doubleLowValue = lowValue;
@@ -171,21 +171,21 @@ public class MTableColumnStatistics {
   }
 
   public void setDecimalStats(
-      long numNulls, long numNDVs, String lowValue, String highValue) {
+      Long numNulls, Long numNDVs, String lowValue, String highValue) {
     this.numNulls = numNulls;
     this.numDVs = numNDVs;
     this.decimalLowValue = lowValue;
     this.decimalHighValue = highValue;
   }
 
-  public void setStringStats(long numNulls, long numNDVs, long maxColLen, double avgColLen) {
+  public void setStringStats(Long numNulls, Long numNDVs, Long maxColLen, Double avgColLen) {
     this.numNulls = numNulls;
     this.numDVs = numNDVs;
     this.maxColLen = maxColLen;
     this.avgColLen = avgColLen;
   }
 
-  public void setBinaryStats(long numNulls, long maxColLen, double avgColLen) {
+  public void setBinaryStats(Long numNulls, Long maxColLen, Double avgColLen) {
     this.numNulls = numNulls;
     this.maxColLen = maxColLen;
     this.avgColLen = avgColLen;

Modified: hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/DummyListener.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/DummyListener.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/DummyListener.java (original)
+++ hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/DummyListener.java Fri Aug 22 21:36:47 2014
@@ -26,6 +26,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
 import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
 import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
@@ -58,6 +59,11 @@ public class DummyListener extends MetaS
   }
 
   @Override
+  public void onConfigChange(ConfigChangeEvent configChange) {
+    notifyList.add(configChange);
+  }
+
+  @Override
   public void onAddPartition(AddPartitionEvent partition) throws MetaException {
     notifyList.add(partition);
   }

Modified: hive/branches/cbo/packaging/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/cbo/packaging/pom.xml?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/packaging/pom.xml (original)
+++ hive/branches/cbo/packaging/pom.xml Fri Aug 22 21:36:47 2014
@@ -182,6 +182,11 @@
       <version>${project.version}</version>
     </dependency>
     <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-accumulo-handler</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hive.hcatalog</groupId>
       <artifactId>hive-hcatalog-streaming</artifactId>
       <version>${project.version}</version>

Modified: hive/branches/cbo/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/cbo/pom.xml?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/pom.xml (original)
+++ hive/branches/cbo/pom.xml Fri Aug 22 21:36:47 2014
@@ -31,6 +31,7 @@
   </prerequisites>
 
   <modules>
+    <module>accumulo-handler</module>
     <module>ant</module>
     <module>beeline</module>
     <module>cli</module>
@@ -87,6 +88,7 @@
     <maven.build-helper.plugin.version>1.8</maven.build-helper.plugin.version>
 
     <!-- Library Dependency Versions -->
+    <accumulo.version>1.6.0</accumulo.version>
     <activemq.version>5.5.0</activemq.version>
     <ant.version>1.9.1</ant.version>
     <antlr.version>3.4</antlr.version>
@@ -147,7 +149,7 @@
     <slf4j.version>1.7.5</slf4j.version>
     <ST4.version>4.0.4</ST4.version>
     <super-csv.version>2.2.0</super-csv.version>
-    <tez.version>0.4.0-incubating</tez.version>
+    <tez.version>0.4.1-incubating</tez.version>
     <tempus-fugit.version>1.1</tempus-fugit.version>
     <snappy.version>0.2</snappy.version>
     <wadl-resourcedoc-doclet.version>1.4</wadl-resourcedoc-doclet.version>
@@ -365,6 +367,31 @@
         <version>${commons-exec.version}</version>
       </dependency>
       <dependency>
+        <groupId>org.apache.accumulo</groupId>
+        <artifactId>accumulo-core</artifactId>
+        <version>${accumulo.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.accumulo</groupId>
+        <artifactId>accumulo-fate</artifactId>
+        <version>${accumulo.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.accumulo</groupId>
+        <artifactId>accumulo-minicluster</artifactId>
+        <version>${accumulo.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.accumulo</groupId>
+        <artifactId>accumulo-start</artifactId>
+        <version>${accumulo.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.accumulo</groupId>
+        <artifactId>accumulo-trace</artifactId>
+        <version>${accumulo.version}</version>
+      </dependency>
+      <dependency>
         <groupId>org.apache.activemq</groupId>
         <artifactId>activemq-core</artifactId>
         <version>${activemq.version}</version>

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java Fri Aug 22 21:36:47 2014
@@ -103,7 +103,6 @@ import org.apache.hadoop.hive.ql.process
 import org.apache.hadoop.hive.ql.security.authorization.AuthorizationUtils;
 import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext;
-import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext.CLIENT_TYPE;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivObjectActionType;
@@ -703,11 +702,7 @@ public class Driver implements CommandPr
       HashSet<WriteEntity> outputs, String command, Map<String, List<String>> tab2cols) throws HiveException {
 
     HiveAuthzContext.Builder authzContextBuilder = new HiveAuthzContext.Builder();
-
-    authzContextBuilder.setClientType(ss.isHiveServerQuery() ? CLIENT_TYPE.HIVESERVER2
-        : CLIENT_TYPE.HIVECLI);
     authzContextBuilder.setUserIpAddress(ss.getUserIpAddress());
-    authzContextBuilder.setSessionString(ss.getSessionId());
     authzContextBuilder.setCommandString(command);
 
     HiveOperationType hiveOpType = getHiveOperationType(op);
@@ -759,6 +754,9 @@ public class Driver implements CommandPr
         objName = privObject.getD();
         break;
       case FUNCTION:
+        if(privObject.getDatabase() != null) {
+          dbname = privObject.getDatabase().getName();
+        }
         objName = privObject.getFunctionName();
         break;
       case DUMMYPARTITION:

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java Fri Aug 22 21:36:47 2014
@@ -41,6 +41,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
 import org.apache.hadoop.hive.ql.DriverContext;
 import org.apache.hadoop.hive.ql.QueryPlan;
@@ -342,9 +343,7 @@ public class ColumnStatsTask extends Tas
     // Construct a column statistics object from the result
     List<ColumnStatistics> colStats = constructColumnStatsFromPackedRows();
     // Persist the column statistics object to the metastore
-    for (ColumnStatistics colStat : colStats) {
-      db.updatePartitionColumnStatistics(colStat);
-    }
+    db.setPartitionColumnStatistics(new SetPartitionsStatsRequest(colStats));
     return 0;
   }
 

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java Fri Aug 22 21:36:47 2014
@@ -105,7 +105,7 @@ public class FetchOperator implements Se
   private transient JobConf job;
   private transient WritableComparable key;
   private transient Writable value;
-  private transient Writable[] vcValues;
+  private transient Object[] vcValues;
   private transient Deserializer serde;
   private transient Deserializer tblSerde;
   private transient Converter partTblObjectInspectorConverter;
@@ -141,12 +141,11 @@ public class FetchOperator implements Se
       List<String> names = new ArrayList<String>(vcCols.size());
       List<ObjectInspector> inspectors = new ArrayList<ObjectInspector>(vcCols.size());
       for (VirtualColumn vc : vcCols) {
-        inspectors.add(PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(
-                vc.getTypeInfo()));
+        inspectors.add(vc.getObjectInspector());
         names.add(vc.getName());
       }
       vcsOI = ObjectInspectorFactory.getStandardStructObjectInspector(names, inspectors);
-      vcValues = new Writable[vcCols.size()];
+      vcValues = new Object[vcCols.size()];
     }
     isPartitioned = work.isPartitioned();
     tblDataDone = false;

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java Fri Aug 22 21:36:47 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.hive.conf.HiveC
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.ql.io.IOContext;
 import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext;
+import org.apache.hadoop.hive.ql.io.RecordIdentifier;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 import org.apache.hadoop.hive.ql.plan.MapWork;
@@ -140,7 +141,7 @@ public class MapOperator extends Operato
     String tableName;
     String partName;
     List<VirtualColumn> vcs;
-    Writable[] vcValues;
+    Object[] vcValues;
 
     private boolean isPartitioned() {
       return partObjectInspector != null;
@@ -165,7 +166,7 @@ public class MapOperator extends Operato
    * op.
    *
    * @param hconf
-   * @param mrwork
+   * @param mapWork
    * @throws HiveException
    */
   public void initializeAsRoot(Configuration hconf, MapWork mapWork)
@@ -250,13 +251,13 @@ public class MapOperator extends Operato
 
     // The op may not be a TableScan for mapjoins
     // Consider the query: select /*+MAPJOIN(a)*/ count(*) FROM T1 a JOIN T2 b ON a.key = b.key;
-    // In that case, it will be a Select, but the rowOI need not be ammended
+    // In that case, it will be a Select, but the rowOI need not be amended
     if (ctx.op instanceof TableScanOperator) {
       TableScanOperator tsOp = (TableScanOperator) ctx.op;
       TableScanDesc tsDesc = tsOp.getConf();
       if (tsDesc != null && tsDesc.hasVirtualCols()) {
         opCtx.vcs = tsDesc.getVirtualCols();
-        opCtx.vcValues = new Writable[opCtx.vcs.size()];
+        opCtx.vcValues = new Object[opCtx.vcs.size()];
         opCtx.vcsObjectInspector = VirtualColumn.getVCSObjectInspector(opCtx.vcs);
         if (opCtx.isPartitioned()) {
           opCtx.rowWithPartAndVC = Arrays.copyOfRange(opCtx.rowWithPart, 0, 3);
@@ -550,13 +551,13 @@ public class MapOperator extends Operato
     }
   }
 
-  public static Writable[] populateVirtualColumnValues(ExecMapperContext ctx,
-      List<VirtualColumn> vcs, Writable[] vcValues, Deserializer deserializer) {
+  public static Object[] populateVirtualColumnValues(ExecMapperContext ctx,
+      List<VirtualColumn> vcs, Object[] vcValues, Deserializer deserializer) {
     if (vcs == null) {
       return vcValues;
     }
     if (vcValues == null) {
-      vcValues = new Writable[vcs.size()];
+      vcValues = new Object[vcs.size()];
     }
     for (int i = 0; i < vcs.size(); i++) {
       VirtualColumn vc = vcs.get(i);
@@ -602,6 +603,19 @@ public class MapOperator extends Operato
           old.set(current);
         }
       }
+      else if(vc.equals(VirtualColumn.ROWID)) {
+        if(ctx.getIoCxt().ri == null) {
+          vcValues[i] = null;
+        }
+        else {
+          if(vcValues[i] == null) {
+            vcValues[i] = new Object[RecordIdentifier.Field.values().length];
+          }
+          RecordIdentifier.StructInfo.toArray(ctx.getIoCxt().ri, (Object[])vcValues[i]);
+          ctx.getIoCxt().ri = null;//so we don't accidentally cache the value; shouldn't
+          //happen since IO layer either knows how to produce ROW__ID or not - but to be safe
+        }
+      }
     }
     return vcValues;
   }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java Fri Aug 22 21:36:47 2014
@@ -32,6 +32,7 @@ import org.apache.hadoop.hive.ql.io.merg
 import org.apache.hadoop.hive.ql.io.merge.MergeWork;
 import org.apache.hadoop.hive.ql.io.rcfile.stats.PartialScanTask;
 import org.apache.hadoop.hive.ql.io.rcfile.stats.PartialScanWork;
+import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
 import org.apache.hadoop.hive.ql.plan.ColumnStatsWork;
 import org.apache.hadoop.hive.ql.plan.ConditionalWork;
 import org.apache.hadoop.hive.ql.plan.CopyWork;
@@ -92,6 +93,7 @@ public final class TaskFactory {
         StatsTask.class));
     taskvec.add(new TaskTuple<StatsNoJobWork>(StatsNoJobWork.class, StatsNoJobTask.class));
     taskvec.add(new TaskTuple<ColumnStatsWork>(ColumnStatsWork.class, ColumnStatsTask.class));
+    taskvec.add(new TaskTuple<ColumnStatsUpdateWork>(ColumnStatsUpdateWork.class, ColumnStatsUpdateTask.class));
     taskvec.add(new TaskTuple<MergeWork>(MergeWork.class,
         MergeTask.class));
     taskvec.add(new TaskTuple<DependencyCollectionWork>(DependencyCollectionWork.class,

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Fri Aug 22 21:36:47 2014
@@ -3466,9 +3466,9 @@ public final class Utilities {
       String origUmask, FileSystem fs) throws IOException {
     if (unsetUmask) {
       if (origUmask != null) {
-        conf.set("fs.permissions.umask-mode", origUmask);
+        conf.set(FsPermission.UMASK_LABEL, origUmask);
       } else {
-        conf.unset("fs.permissions.umask-mode");
+        conf.unset(FsPermission.UMASK_LABEL);
       }
     }
 
@@ -3482,10 +3482,10 @@ public final class Utilities {
         recursive);
 
     if (recursive) {
-      origUmask = conf.get("fs.permissions.umask-mode");
+      origUmask = conf.get(FsPermission.UMASK_LABEL);
       // this umask is required because by default the hdfs mask is 022 resulting in
       // all parents getting the fsPermission & !(022) permission instead of fsPermission
-      conf.set("fs.permissions.umask-mode", "000");
+      conf.set(FsPermission.UMASK_LABEL, "000");
     }
 
     FileSystem fs = ShimLoader.getHadoopShims().getNonCachedFileSystem(mkdirPath.toUri(), conf);

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java Fri Aug 22 21:36:47 2014
@@ -155,7 +155,7 @@ public class ExecMapper extends MapReduc
       }
     }
   }
-
+  @Override
   public void map(Object key, Object value, OutputCollector output,
       Reporter reporter) throws IOException {
     if (oc == null) {

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java Fri Aug 22 21:36:47 2014
@@ -59,7 +59,10 @@ public class EnforceReadOnlyTables imple
   public void run(SessionState sess, Set<ReadEntity> inputs,
       Set<WriteEntity> outputs, UserGroupInformation ugi)
     throws Exception {
-    if (sess.getConf().getBoolean("hive.test.init.phase", false) == true) {
+
+    // Don't enforce during test driver setup or shutdown.
+    if (sess.getConf().getBoolean("hive.test.init.phase", false) ||
+        sess.getConf().getBoolean("hive.test.shutdown.phase", false)) {
       return;
     }
     for (WriteEntity w: outputs) {

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java Fri Aug 22 21:36:47 2014
@@ -333,6 +333,9 @@ public class Entity implements Serializa
     case DUMMYPARTITION:
       return p.getName();
     case FUNCTION:
+      if (database != null) {
+        return database.getName() + "." + stringObject;
+      }
       return stringObject;
     default:
       return d;

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java Fri Aug 22 21:36:47 2014
@@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.RecordReader;
@@ -86,11 +86,20 @@ import java.io.IOException;
  * <p>
  * To support transitions between non-ACID layouts to ACID layouts, the input
  * formats are expected to support both layouts and detect the correct one.
- *
- * @param <V> The row type
+ * <p>
+ *   A note on the KEY of this InputFormat.  
+ *   For row-at-a-time processing, KEY can conveniently pass RowId into the operator
+ *   pipeline.  For vectorized execution the KEY could perhaps represent a range in the batch.
+ *   Since {@link org.apache.hadoop.hive.ql.io.orc.OrcInputFormat} is declared to return
+ *   {@code NullWritable} key, {@link org.apache.hadoop.hive.ql.io.AcidRecordReader} is defined
+ *   to provide access to the RowId.  Other implementations of AcidInputFormat can use either
+ *   mechanism.
+ * </p>
+ * 
+ * @param <VALUE> The row type
  */
-public interface AcidInputFormat<V>
-    extends InputFormat<NullWritable, V>, InputFormatChecker {
+public interface AcidInputFormat<KEY extends WritableComparable, VALUE>
+    extends InputFormat<KEY, VALUE>, InputFormatChecker {
 
   /**
    * Options for controlling the record readers.
@@ -140,7 +149,7 @@ public interface AcidInputFormat<V>
    * @return a record reader
    * @throws IOException
    */
-  public RowReader<V> getReader(InputSplit split,
+  public RowReader<VALUE> getReader(InputSplit split,
                                 Options options) throws IOException;
 
   public static interface RawReader<V>
@@ -162,11 +171,18 @@ public interface AcidInputFormat<V>
    * @return a record reader
    * @throws IOException
    */
-   RawReader<V> getRawReader(Configuration conf,
+   RawReader<VALUE> getRawReader(Configuration conf,
                              boolean collapseEvents,
                              int bucket,
                              ValidTxnList validTxnList,
                              Path baseDirectory,
                              Path[] deltaDirectory
                              ) throws IOException;
+
+  /**
+   * RecordReader returned by AcidInputFormat working in row-at-a-time mode should AcidRecordReader.
+   */
+  public interface AcidRecordReader<K,V> extends RecordReader<K,V> {
+    RecordIdentifier getRecordIdentifier();
+  }
 }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java Fri Aug 22 21:36:47 2014
@@ -23,7 +23,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.Reporter;
 
 import java.io.IOException;
@@ -34,7 +34,7 @@ import java.util.Properties;
  * An extension for OutputFormats that want to implement ACID transactions.
  * @param <V> the row type of the file
  */
-public interface AcidOutputFormat<V> extends HiveOutputFormat<NullWritable, V> {
+public interface AcidOutputFormat<K extends WritableComparable, V> extends HiveOutputFormat<K, V> {
 
   /**
    * Options to control how the files are written

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java Fri Aug 22 21:36:47 2014
@@ -20,17 +20,13 @@ package org.apache.hadoop.hive.ql.io;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
-import java.util.Properties;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.exec.FooterBuffer;
@@ -42,16 +38,13 @@ import org.apache.hadoop.hive.ql.udf.gen
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan;
-import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.mapred.FileSplit;
 import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.RecordReader;
-import org.apache.hadoop.util.ReflectionUtils;
 
 /** This class prepares an IOContext, and provides the ability to perform a binary search on the
   * data.  The binary search can be used by setting the value of inputFormatSorted in the
@@ -119,7 +112,18 @@ public abstract class HiveContextAwareRe
     }
     updateIOContext();
     try {
-      return doNext(key, value);
+      boolean retVal = doNext(key, value);
+      if(retVal) {
+        if(key instanceof RecordIdentifier) {
+          //supports AcidInputFormat which uses the KEY pass ROW__ID info
+          ioCxtRef.ri = (RecordIdentifier)key;
+        }
+        else if(recordReader instanceof AcidInputFormat.AcidRecordReader) {
+          //supports AcidInputFormat which do not use the KEY pass ROW__ID info
+          ioCxtRef.ri = ((AcidInputFormat.AcidRecordReader) recordReader).getRecordIdentifier();
+        }
+      }
+      return retVal;
     } catch (IOException e) {
       ioCxtRef.setIOExceptions(true);
       throw e;

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java Fri Aug 22 21:36:47 2014
@@ -69,6 +69,10 @@ public class IOContext {
   Comparison comparison = null;
   // The class name of the generic UDF being used by the filter
   String genericUDFClassName = null;
+  /**
+   * supports {@link org.apache.hadoop.hive.ql.metadata.VirtualColumn#ROWID}
+   */
+  public RecordIdentifier ri;
 
   public static enum Comparison {
     GREATER,

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java Fri Aug 22 21:36:47 2014
@@ -19,16 +19,81 @@
 package org.apache.hadoop.hive.ql.io;
 
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.io.WritableComparable;
 
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
 
 /**
- * Gives the Record identifer information for the current record.
+ * Gives the Record identifier information for the current record.
  */
 public class RecordIdentifier implements WritableComparable<RecordIdentifier> {
+  /**
+   * This is in support of {@link org.apache.hadoop.hive.ql.metadata.VirtualColumn#ROWID}
+   * Contains metadata about each field in RecordIdentifier that needs to be part of ROWID
+   * which is represented as a struct {@link org.apache.hadoop.hive.ql.io.RecordIdentifier.StructInfo}.
+   * Each field of RecordIdentifier which should be part of ROWID should be in this enum... which 
+   * really means that it should be part of VirtualColumn (so make a subclass for rowid).
+   */
+  public static enum Field {
+    //note the enum names match field names in the struct
+    transactionId(TypeInfoFactory.longTypeInfo,
+      PrimitiveObjectInspectorFactory.javaLongObjectInspector),
+    bucketId(TypeInfoFactory.intTypeInfo, PrimitiveObjectInspectorFactory.javaIntObjectInspector),
+    rowId(TypeInfoFactory.longTypeInfo, PrimitiveObjectInspectorFactory.javaLongObjectInspector);
+    public final TypeInfo fieldType;
+    public final ObjectInspector fieldOI;
+    Field(TypeInfo fieldType, ObjectInspector fieldOI) {
+      this.fieldType = fieldType;
+      this.fieldOI = fieldOI;
+    }
+  }
+  /**
+   * RecordIdentifier is passed along the operator tree as a struct.  This class contains a few
+   * utilities for that.
+   */
+  public static final class StructInfo {
+    private static final List<String> fieldNames = new ArrayList<String>(Field.values().length);
+    private static final List<TypeInfo> fieldTypes = new ArrayList<TypeInfo>(fieldNames.size());
+    private static final List<ObjectInspector> fieldOis = 
+      new ArrayList<ObjectInspector>(fieldNames.size());
+    static {
+      for(Field f : Field.values()) {
+        fieldNames.add(f.name());
+        fieldTypes.add(f.fieldType);
+        fieldOis.add(f.fieldOI);
+      }
+    }
+    public static final TypeInfo typeInfo = 
+      TypeInfoFactory.getStructTypeInfo(fieldNames, fieldTypes);
+    public static final ObjectInspector oi = 
+      ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldOis);
+
+    /**
+     * Copies relevant fields from {@code ri} to {@code struct}
+     * @param ri
+     * @param struct must be of size Field.values().size()
+     */
+    public static void toArray(RecordIdentifier ri, Object[] struct) {
+      assert struct != null && struct.length == Field.values().length;
+      if(ri == null) {
+        Arrays.fill(struct, null);
+        return;
+      }
+      struct[Field.transactionId.ordinal()] = ri.getTransactionId();
+      struct[Field.bucketId.ordinal()] = ri.getBucketId();
+      struct[Field.rowId.ordinal()] = ri.getRowId();
+    }
+  }
+  
   private long transactionId;
   private int bucketId;
   private long rowId;

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroGenericRecordReader.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroGenericRecordReader.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroGenericRecordReader.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroGenericRecordReader.java Fri Aug 22 21:36:47 2014
@@ -127,7 +127,7 @@ public class AvroGenericRecordReader imp
     String s = job.get(AvroSerdeUtils.AVRO_SERDE_SCHEMA);
     if(s != null) {
       LOG.info("Found the avro schema in the job: " + s);
-      return Schema.parse(s);
+      return AvroSerdeUtils.getSchemaFor(s);
     }
     // No more places to get the schema from. Give up.  May have to re-encode later.
     return null;

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java Fri Aug 22 21:36:47 2014
@@ -98,7 +98,7 @@ import com.google.common.util.concurrent
  */
 public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
   InputFormatChecker, VectorizedInputFormatInterface,
-    AcidInputFormat<OrcStruct> {
+    AcidInputFormat<NullWritable, OrcStruct> {
 
   private static final Log LOG = LogFactory.getLog(OrcInputFormat.class);
   static final HadoopShims SHIMS = ShimLoader.getHadoopShims();
@@ -989,7 +989,7 @@ public class OrcInputFormat  implements 
     boolean vectorMode = Utilities.isVectorMode(conf);
 
     // if HiveCombineInputFormat gives us FileSplits instead of OrcSplits,
-    // we know it is not ACID.
+    // we know it is not ACID. (see a check in CombineHiveInputFormat.getSplits() that assures this)
     if (inputSplit.getClass() == FileSplit.class) {
       if (vectorMode) {
         return createVectorizedReader(inputSplit, conf, reporter);
@@ -998,62 +998,75 @@ public class OrcInputFormat  implements 
           ((FileSplit) inputSplit).getPath(),
           OrcFile.readerOptions(conf)), conf, (FileSplit) inputSplit);
     }
-
+    
     OrcSplit split = (OrcSplit) inputSplit;
     reporter.setStatus(inputSplit.toString());
 
-    // if we are strictly old-school, just use the old code
+    Options options = new Options(conf).reporter(reporter);
+    final RowReader<OrcStruct> inner = getReader(inputSplit, options);
+    
+    
+    /*Even though there are no delta files, we still need to produce row ids so that an
+    * UPDATE or DELETE statement would work on a table which didn't have any previous updates*/
     if (split.isOriginal() && split.getDeltas().isEmpty()) {
       if (vectorMode) {
         return createVectorizedReader(inputSplit, conf, reporter);
       } else {
-        return new OrcRecordReader(OrcFile.createReader(split.getPath(),
-            OrcFile.readerOptions(conf)), conf, split);
+        return new NullKeyRecordReader(inner, conf);
       }
     }
 
-    Options options = new Options(conf).reporter(reporter);
-    final RowReader<OrcStruct> inner = getReader(inputSplit, options);
     if (vectorMode) {
       return (org.apache.hadoop.mapred.RecordReader)
           new VectorizedOrcAcidRowReader(inner, conf, (FileSplit) inputSplit);
     }
-    final RecordIdentifier id = inner.createKey();
-
-    // Return a RecordReader that is compatible with the Hive 0.12 reader
-    // with NullWritable for the key instead of RecordIdentifier.
-    return new org.apache.hadoop.mapred.RecordReader<NullWritable, OrcStruct>(){
-      @Override
-      public boolean next(NullWritable nullWritable,
-                          OrcStruct orcStruct) throws IOException {
-        return inner.next(id, orcStruct);
-      }
+    return new NullKeyRecordReader(inner, conf);
+  }
+  /**
+   * Return a RecordReader that is compatible with the Hive 0.12 reader
+   * with NullWritable for the key instead of RecordIdentifier.
+   */
+  public static final class NullKeyRecordReader implements AcidRecordReader<NullWritable, OrcStruct> {
+    private final RecordIdentifier id;
+    private final RowReader<OrcStruct> inner;
+
+    public RecordIdentifier getRecordIdentifier() {
+      return id;
+    }
+    private NullKeyRecordReader(RowReader<OrcStruct> inner, Configuration conf) {
+      this.inner = inner;
+      id = inner.createKey();
+    }
+    @Override
+    public boolean next(NullWritable nullWritable,
+                        OrcStruct orcStruct) throws IOException {
+      return inner.next(id, orcStruct);
+    }
 
-      @Override
-      public NullWritable createKey() {
-        return NullWritable.get();
-      }
+    @Override
+    public NullWritable createKey() {
+      return NullWritable.get();
+    }
 
-      @Override
-      public OrcStruct createValue() {
-        return inner.createValue();
-      }
+    @Override
+    public OrcStruct createValue() {
+      return inner.createValue();
+    }
 
-      @Override
-      public long getPos() throws IOException {
-        return inner.getPos();
-      }
+    @Override
+    public long getPos() throws IOException {
+      return inner.getPos();
+    }
 
-      @Override
-      public void close() throws IOException {
-        inner.close();
-      }
+    @Override
+    public void close() throws IOException {
+      inner.close();
+    }
 
-      @Override
-      public float getProgress() throws IOException {
-        return inner.getProgress();
-      }
-    };
+    @Override
+    public float getProgress() throws IOException {
+      return inner.getProgress();
+    }
   }
 
 

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java Fri Aug 22 21:36:47 2014
@@ -50,7 +50,7 @@ import java.util.Properties;
  * A Hive OutputFormat for ORC files.
  */
 public class OrcOutputFormat extends FileOutputFormat<NullWritable, OrcSerdeRow>
-                        implements AcidOutputFormat<OrcSerdeRow> {
+                        implements AcidOutputFormat<NullWritable, OrcSerdeRow> {
 
   private static class OrcRecordWriter
       implements RecordWriter<NullWritable, OrcSerdeRow>,

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java Fri Aug 22 21:36:47 2014
@@ -88,6 +88,9 @@ public class OrcRecordUpdater implements
   private final IntWritable bucket = new IntWritable();
   private final LongWritable rowId = new LongWritable();
   private long insertedRows = 0;
+  // This records how many rows have been inserted or deleted.  It is separate from insertedRows
+  // because that is monotonically increasing to give new unique row ids.
+  private long rowCountDelta = 0;
   private final KeyIndexBuilder indexBuilder = new KeyIndexBuilder();
 
   static class AcidStats {
@@ -263,6 +266,7 @@ public class OrcRecordUpdater implements
     }
     addEvent(INSERT_OPERATION, currentTransaction, currentTransaction,
         insertedRows++, row);
+    rowCountDelta++;
   }
 
   @Override
@@ -283,6 +287,7 @@ public class OrcRecordUpdater implements
     }
     addEvent(DELETE_OPERATION, currentTransaction, originalTransaction, rowId,
         null);
+    rowCountDelta--;
   }
 
   @Override
@@ -317,7 +322,11 @@ public class OrcRecordUpdater implements
 
   @Override
   public SerDeStats getStats() {
-    return null;
+    SerDeStats stats = new SerDeStats();
+    stats.setRowCount(rowCountDelta);
+    // Don't worry about setting raw data size diff.  I have no idea how to calculate that
+    // without finding the row we are updating or deleting, which would be a mess.
+    return stats;
   }
 
   @VisibleForTesting

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java Fri Aug 22 21:36:47 2014
@@ -21,6 +21,7 @@ import static org.apache.hadoop.hive.con
 
 import java.io.EOFException;
 import java.io.IOException;
+import java.math.BigDecimal;
 import java.math.BigInteger;
 import java.nio.ByteBuffer;
 import java.sql.Timestamp;
@@ -1292,8 +1293,9 @@ class RecordReaderImpl implements Record
             BigInteger bInt = SerializationUtils.readBigInteger(valueStream);
             result.vector[i].update(bInt, (short) scratchScaleVector.vector[i]);
 
-            // Change the scale to match the schema if the scale in data is different.
-            if (scale != scratchScaleVector.vector[i]) {
+            // Change the scale to match the schema if the scale is less than in data.
+            // (HIVE-7373) If scale is bigger, then it leaves the original trailing zeros
+            if (scale < scratchScaleVector.vector[i]) {
               result.vector[i].changeScaleDestructive((short) scale);
             }
           }
@@ -2410,6 +2412,9 @@ class RecordReaderImpl implements Record
 
   private static Object getBaseObjectForComparison(Object predObj, Object statsObj) {
     if (predObj != null) {
+      if (predObj instanceof ExprNodeConstantDesc) {
+        predObj = ((ExprNodeConstantDesc) predObj).getValue();
+      }
       // following are implicitly convertible
       if (statsObj instanceof Long) {
         if (predObj instanceof Double) {
@@ -2428,10 +2433,6 @@ class RecordReaderImpl implements Record
           return Double.valueOf(predObj.toString());
         }
       } else if (statsObj instanceof String) {
-        // Ex: where d = date '1970-02-01' will be ExprNodeConstantDesc
-        if (predObj instanceof ExprNodeConstantDesc) {
-          return ((ExprNodeConstantDesc) predObj).getValue().toString();
-        }
         return predObj.toString();
       } else if (statsObj instanceof HiveDecimal) {
         if (predObj instanceof Long) {
@@ -2440,6 +2441,8 @@ class RecordReaderImpl implements Record
           return HiveDecimal.create(predObj.toString());
         } else if (predObj instanceof String) {
           return HiveDecimal.create(predObj.toString());
+        } else if (predObj instanceof BigDecimal) {
+          return HiveDecimal.create((BigDecimal)predObj);
         }
       }
     }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RunLengthIntegerWriterV2.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RunLengthIntegerWriterV2.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RunLengthIntegerWriterV2.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RunLengthIntegerWriterV2.java Fri Aug 22 21:36:47 2014
@@ -142,9 +142,9 @@ class RunLengthIntegerWriterV2 implement
   private final boolean signed;
   private EncodingType encoding;
   private int numLiterals;
-  private long[] zigzagLiterals;
-  private long[] baseRedLiterals;
-  private long[] adjDeltas;
+  private final long[] zigzagLiterals = new long[MAX_SCOPE];
+  private final long[] baseRedLiterals = new long[MAX_SCOPE];
+  private final long[] adjDeltas = new long[MAX_SCOPE];
   private long fixedDelta;
   private int zzBits90p;
   private int zzBits100p;
@@ -252,8 +252,11 @@ class RunLengthIntegerWriterV2 implement
       // store the first value as delta value using zigzag encoding
       utils.writeVslong(output, adjDeltas[0]);
 
-      // adjacent delta values are bit packed
-      utils.writeInts(adjDeltas, 1, adjDeltas.length - 1, fb, output);
+      // adjacent delta values are bit packed. The length of adjDeltas array is
+      // always one less than the number of literals (delta difference for n
+      // elements is n-1). We have already written one element, write the
+      // remaining numLiterals - 2 elements here
+      utils.writeInts(adjDeltas, 1, numLiterals - 2, fb, output);
     }
   }
 
@@ -323,7 +326,7 @@ class RunLengthIntegerWriterV2 implement
     // base reduced literals are bit packed
     int closestFixedBits = utils.getClosestFixedBits(fb);
 
-    utils.writeInts(baseRedLiterals, 0, baseRedLiterals.length, closestFixedBits,
+    utils.writeInts(baseRedLiterals, 0, numLiterals, closestFixedBits,
         output);
 
     // write patch list
@@ -372,7 +375,7 @@ class RunLengthIntegerWriterV2 implement
     output.write(headerSecondByte);
 
     // bit packing the zigzag encoded literals
-    utils.writeInts(zigzagLiterals, 0, zigzagLiterals.length, fb, output);
+    utils.writeInts(zigzagLiterals, 0, numLiterals, fb, output);
 
     // reset run length
     variableRunLength = 0;
@@ -414,14 +417,6 @@ class RunLengthIntegerWriterV2 implement
   }
 
   private void determineEncoding() {
-    // used for direct encoding
-    zigzagLiterals = new long[numLiterals];
-
-    // used for patched base encoding
-    baseRedLiterals = new long[numLiterals];
-
-    // used for delta encoding
-    adjDeltas = new long[numLiterals - 1];
 
     int idx = 0;
 
@@ -530,10 +525,10 @@ class RunLengthIntegerWriterV2 implement
     // is not significant then we can use direct or delta encoding
 
     double p = 0.9;
-    zzBits90p = utils.percentileBits(zigzagLiterals, p);
+    zzBits90p = utils.percentileBits(zigzagLiterals, 0, numLiterals, p);
 
     p = 1.0;
-    zzBits100p = utils.percentileBits(zigzagLiterals, p);
+    zzBits100p = utils.percentileBits(zigzagLiterals, 0, numLiterals, p);
 
     int diffBitsLH = zzBits100p - zzBits90p;
 
@@ -543,18 +538,18 @@ class RunLengthIntegerWriterV2 implement
         && isFixedDelta == false) {
       // patching is done only on base reduced values.
       // remove base from literals
-      for(int i = 0; i < zigzagLiterals.length; i++) {
+      for(int i = 0; i < numLiterals; i++) {
         baseRedLiterals[i] = literals[i] - min;
       }
 
       // 95th percentile width is used to determine max allowed value
       // after which patching will be done
       p = 0.95;
-      brBits95p = utils.percentileBits(baseRedLiterals, p);
+      brBits95p = utils.percentileBits(baseRedLiterals, 0, numLiterals, p);
 
       // 100th percentile is used to compute the max patch width
       p = 1.0;
-      brBits100p = utils.percentileBits(baseRedLiterals, p);
+      brBits100p = utils.percentileBits(baseRedLiterals, 0, numLiterals, p);
 
       // after base reducing the values, if the difference in bits between
       // 95th percentile and 100th percentile value is zero then there
@@ -592,7 +587,7 @@ class RunLengthIntegerWriterV2 implement
 
     // since we are considering only 95 percentile, the size of gap and
     // patch array can contain only be 5% values
-    patchLength = (int) Math.ceil((baseRedLiterals.length * 0.05));
+    patchLength = (int) Math.ceil((numLiterals * 0.05));
 
     int[] gapList = new int[patchLength];
     long[] patchList = new long[patchLength];
@@ -616,7 +611,7 @@ class RunLengthIntegerWriterV2 implement
     int gap = 0;
     int maxGap = 0;
 
-    for(int i = 0; i < baseRedLiterals.length; i++) {
+    for(int i = 0; i < numLiterals; i++) {
       // if value is above mask then create the patch and record the gap
       if (baseRedLiterals[i] > mask) {
         gap = i - prev;
@@ -694,9 +689,6 @@ class RunLengthIntegerWriterV2 implement
     numLiterals = 0;
     encoding = null;
     prevDelta = 0;
-    zigzagLiterals = null;
-    baseRedLiterals = null;
-    adjDeltas = null;
     fixedDelta = 0;
     zzBits90p = 0;
     zzBits100p = 0;

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SerializationUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SerializationUtils.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SerializationUtils.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SerializationUtils.java Fri Aug 22 21:36:47 2014
@@ -244,7 +244,7 @@ final class SerializationUtils {
    * @param p - percentile value (>=0.0 to <=1.0)
    * @return pth percentile bits
    */
-  int percentileBits(long[] data, double p) {
+  int percentileBits(long[] data, int offset, int length, double p) {
     if ((p > 1.0) || (p <= 0.0)) {
       return -1;
     }
@@ -254,13 +254,12 @@ final class SerializationUtils {
     int[] hist = new int[32];
 
     // compute the histogram
-    for(long l : data) {
-      int idx = encodeBitWidth(findClosestNumBits(l));
+    for(int i = offset; i < (offset + length); i++) {
+      int idx = encodeBitWidth(findClosestNumBits(data[i]));
       hist[idx] += 1;
     }
 
-    int len = data.length;
-    int perLen = (int) (len * (1.0 - p));
+    int perLen = (int) (length * (1.0 - p));
 
     // return the bits required by pth percentile length
     for(int i = hist.length - 1; i >= 0; i--) {

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java Fri Aug 22 21:36:47 2014
@@ -146,7 +146,7 @@ public class ProjectionPusher {
     if ((part != null) && (part.getTableDesc() != null)) {
       Utilities.copyTableJobPropertiesToConf(part.getTableDesc(), cloneJobConf);
     }
-    pushProjectionsAndFilters(cloneJobConf, path.toString(), path.toUri().toString());
+    pushProjectionsAndFilters(cloneJobConf, path.toString(), path.toUri().getPath());
     return cloneJobConf;
   }
 }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ArrayWritableGroupConverter.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ArrayWritableGroupConverter.java?rev=1619936&r1=1619935&r2=1619936&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ArrayWritableGroupConverter.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ArrayWritableGroupConverter.java Fri Aug 22 21:36:47 2014
@@ -13,6 +13,9 @@
  */
 package org.apache.hadoop.hive.ql.io.parquet.convert;
 
+import java.util.List;
+
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.io.ArrayWritable;
 import org.apache.hadoop.io.Writable;
 
@@ -30,7 +33,7 @@ public class ArrayWritableGroupConverter
   private Writable[] mapPairContainer;
 
   public ArrayWritableGroupConverter(final GroupType groupType, final HiveGroupConverter parent,
-      final int index) {
+      final int index, List<TypeInfo> hiveSchemaTypeInfos) {
     this.parent = parent;
     this.index = index;
     int count = groupType.getFieldCount();
@@ -40,7 +43,8 @@ public class ArrayWritableGroupConverter
     isMap = count == 2;
     converters = new Converter[count];
     for (int i = 0; i < count; i++) {
-      converters[i] = getConverterFromDescription(groupType.getType(i), i, this);
+      converters[i] = getConverterFromDescription(groupType.getType(i), i, this,
+          hiveSchemaTypeInfos);
     }
   }