You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2014/06/28 02:23:57 UTC

svn commit: r1606275 [2/3] - in /hive/branches/cbo: ./ common/src/java/org/apache/hadoop/hive/conf/ conf/ contrib/src/test/queries/clientnegative/ contrib/src/test/queries/clientpositive/ data/conf/ hbase-handler/src/java/org/apache/hadoop/hive/hbase/ ...

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java Sat Jun 28 00:23:54 2014
@@ -120,8 +120,9 @@ public class TxnHandler {
     // database we'll look at the current transaction number first.  If it
     // subsequently shows up in the open list that's ok.
     Connection dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+    Statement stmt = null;
     try {
-      Statement stmt = dbConn.createStatement();
+      stmt = dbConn.createStatement();
       String s = "select ntxn_next - 1 from NEXT_TXN_ID";
       LOG.debug("Going to execute query <" + s + ">");
       ResultSet rs = stmt.executeQuery(s);
@@ -157,7 +158,6 @@ public class TxnHandler {
         }
         txnInfo.add(new TxnInfo(rs.getLong(1), state, rs.getString(3), rs.getString(4)));
       }
-      stmt.close();
       LOG.debug("Going to rollback");
       dbConn.rollback();
       return new GetOpenTxnsInfoResponse(hwm, txnInfo);
@@ -170,6 +170,7 @@ public class TxnHandler {
       throw new MetaException("Unable to select from transaction database, "
           + StringUtils.stringifyException(e));
     } finally {
+      closeStmt(stmt);
       closeDbConn(dbConn);
     }
   }
@@ -180,9 +181,10 @@ public class TxnHandler {
     // database we'll look at the current transaction number first.  If it
     // subsequently shows up in the open list that's ok.
     Connection dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+    Statement stmt = null;
     try {
       timeOutTxns(dbConn);
-      Statement stmt = dbConn.createStatement();
+      stmt = dbConn.createStatement();
       String s = "select ntxn_next - 1 from NEXT_TXN_ID";
       LOG.debug("Going to execute query <" + s + ">");
       ResultSet rs = stmt.executeQuery(s);
@@ -203,7 +205,6 @@ public class TxnHandler {
       while (rs.next()) {
         openList.add(rs.getLong(1));
       }
-      stmt.close();
       LOG.debug("Going to rollback");
       dbConn.rollback();
       return new GetOpenTxnsResponse(hwm, openList);
@@ -216,6 +217,7 @@ public class TxnHandler {
       throw new MetaException("Unable to select from transaction database, "
           + StringUtils.stringifyException(e));
     } finally {
+      closeStmt(stmt);
       closeDbConn(dbConn);
     }
   }
@@ -235,13 +237,14 @@ public class TxnHandler {
     int numTxns = rqst.getNum_txns();
     try {
       Connection dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
+      Statement stmt = null;
       try {
         // Make sure the user has not requested an insane amount of txns.
         int maxTxns = HiveConf.getIntVar(conf,
             HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH);
         if (numTxns > maxTxns) numTxns = maxTxns;
 
-        Statement stmt = dbConn.createStatement();
+        stmt = dbConn.createStatement();
         String s = "select ntxn_next from NEXT_TXN_ID";
         LOG.debug("Going to execute query <" + s + ">");
         ResultSet rs = stmt.executeQuery(s);
@@ -279,6 +282,7 @@ public class TxnHandler {
         throw new MetaException("Unable to select from transaction database "
           + StringUtils.stringifyException(e));
       } finally {
+        closeStmt(stmt);
         closeDbConn(dbConn);
       }
     } catch (DeadlockException e) {
@@ -327,8 +331,9 @@ public class TxnHandler {
     long txnid = rqst.getTxnid();
     try {
       Connection dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
+      Statement stmt = null;
       try {
-        Statement stmt = dbConn.createStatement();
+        stmt = dbConn.createStatement();
         // Before we do the commit heartbeat the txn.  This is slightly odd in that we're going to
         // commit it, but it does two things.  One, it makes sure the transaction is still valid.
         // Two, it avoids the race condition where we time out between now and when we actually
@@ -369,6 +374,7 @@ public class TxnHandler {
         throw new MetaException("Unable to update transaction database "
           + StringUtils.stringifyException(e));
       } finally {
+        closeStmt(stmt);
         closeDbConn(dbConn);
       }
     } catch (DeadlockException e) {
@@ -468,6 +474,7 @@ public class TxnHandler {
       throws NoSuchLockException, TxnOpenException, MetaException {
     try {
       Connection dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
+      Statement stmt = null;
       try {
         // Odd as it seems, we need to heartbeat first because this touches the
         // lock table and assures that our locks our still valid.  If they are
@@ -487,7 +494,7 @@ public class TxnHandler {
           LOG.error(msg);
           throw new TxnOpenException(msg);
         }
-        Statement stmt = dbConn.createStatement();
+        stmt = dbConn.createStatement();
         String s = "delete from HIVE_LOCKS where hl_lock_ext_id = " + extLockId;
         LOG.debug("Going to execute update <" + s + ">");
         int rc = stmt.executeUpdate(s);
@@ -508,6 +515,7 @@ public class TxnHandler {
         throw new MetaException("Unable to update transaction database " +
             StringUtils.stringifyException(e));
       } finally {
+        closeStmt(stmt);
         closeDbConn(dbConn);
       }
     } catch (DeadlockException e) {
@@ -521,8 +529,9 @@ public class TxnHandler {
     Connection dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
     ShowLocksResponse rsp = new ShowLocksResponse();
     List<ShowLocksResponseElement> elems = new ArrayList<ShowLocksResponseElement>();
+    Statement stmt = null;
     try {
-      Statement stmt = dbConn.createStatement();
+      stmt = dbConn.createStatement();
 
       String s = "select hl_lock_ext_id, hl_txnid, hl_db, hl_table, hl_partition, hl_lock_state, " +
           "hl_lock_type, hl_last_heartbeat, hl_acquired_at, hl_user, hl_host from HIVE_LOCKS";
@@ -561,6 +570,7 @@ public class TxnHandler {
       throw new MetaException("Unable to select from transaction database " +
           StringUtils.stringifyException(e));
     } finally {
+      closeStmt(stmt);
       closeDbConn(dbConn);
     }
     rsp.setLocks(elems);
@@ -634,8 +644,9 @@ public class TxnHandler {
     // Put a compaction request in the queue.
     try {
       Connection dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
+      Statement stmt = null;
       try {
-        Statement stmt = dbConn.createStatement();
+        stmt = dbConn.createStatement();
 
         // Get the id for the next entry in the queue
         String s = "select ncq_next from NEXT_COMPACTION_QUEUE_ID";
@@ -705,6 +716,7 @@ public class TxnHandler {
         throw new MetaException("Unable to select from transaction database " +
             StringUtils.stringifyException(e));
       } finally {
+        closeStmt(stmt);
         closeDbConn(dbConn);
       }
     } catch (DeadlockException e) {
@@ -717,8 +729,9 @@ public class TxnHandler {
   public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaException {
     ShowCompactResponse response = new ShowCompactResponse();
     Connection dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+    Statement stmt = null;
     try {
-      Statement stmt = dbConn.createStatement();
+      stmt = dbConn.createStatement();
       String s = "select cq_database, cq_table, cq_partition, cq_state, cq_type, cq_worker_id, " +
           "cq_start, cq_run_as from COMPACTION_QUEUE";
       LOG.debug("Going to execute query <" + s + ">");
@@ -755,6 +768,7 @@ public class TxnHandler {
       throw new MetaException("Unable to select from transaction database " +
           StringUtils.stringifyException(e));
     } finally {
+      closeStmt(stmt);
       closeDbConn(dbConn);
     }
     return response;
@@ -765,8 +779,9 @@ public class TxnHandler {
    */
   int numLocksInLockTable() throws SQLException, MetaException {
     Connection dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+    Statement stmt = null;
     try {
-      Statement stmt = dbConn.createStatement();
+      stmt = dbConn.createStatement();
       String s = "select count(*) from HIVE_LOCKS";
       LOG.debug("Going to execute query <" + s + ">");
       ResultSet rs = stmt.executeQuery(s);
@@ -777,6 +792,7 @@ public class TxnHandler {
       return rc;
     } finally {
       closeDbConn(dbConn);
+      closeStmt(stmt);
     }
   }
 
@@ -819,6 +835,18 @@ public class TxnHandler {
       LOG.warn("Failed to close db connection " + e.getMessage());
     }
   }
+  
+  /**
+   * Close statement instance.
+   * @param stmt statement instance.
+   */
+  protected void closeStmt(Statement stmt) {
+    try {
+      if (stmt != null) stmt.close();
+    } catch (SQLException e) {
+      LOG.warn("Failed to close statement " + e.getMessage());
+    }
+  }
 
   /**
    * Determine if an exception was a deadlock.  Unfortunately there is no standard way to do
@@ -850,10 +878,10 @@ public class TxnHandler {
    * @throws org.apache.hadoop.hive.metastore.api.MetaException if the time cannot be determined
    */
   protected long getDbTime(Connection conn) throws MetaException {
+    Statement stmt = null;
     try {
-      Statement stmt = conn.createStatement();
+      stmt = conn.createStatement();
       String s;
-      ResultSet rs;
       DatabaseProduct prod = determineDatabaseProduct(conn);
       switch (prod) {
         case DERBY:
@@ -876,13 +904,15 @@ public class TxnHandler {
           throw new MetaException(msg);
       }
       LOG.debug("Going to execute query <" + s + ">");
-      rs = stmt.executeQuery(s);
+      ResultSet rs = stmt.executeQuery(s);
       if (!rs.next()) throw new MetaException("No results from date query");
       return rs.getTimestamp(1).getTime();
     } catch (SQLException e) {
       String msg = "Unable to determine current time: " + e.getMessage();
       LOG.error(msg);
       throw new MetaException(msg);
+    } finally {
+      closeStmt(stmt);
     }
   }
 
@@ -1042,33 +1072,39 @@ public class TxnHandler {
    * @throws SQLException
    */
   private int abortTxns(Connection dbConn, List<Long> txnids) throws SQLException {
-    Statement stmt = dbConn.createStatement();
-
-    // delete from HIVE_LOCKS first, we always access HIVE_LOCKS before TXNS
-    StringBuilder buf = new StringBuilder("delete from HIVE_LOCKS where hl_txnid in (");
-    boolean first = true;
-    for (Long id : txnids) {
-      if (first) first = false;
-      else buf.append(',');
-      buf.append(id);
-    }
-    buf.append(')');
-    LOG.debug("Going to execute update <" + buf.toString() + ">");
-    stmt.executeUpdate(buf.toString());
-
-    buf = new StringBuilder("update TXNS set txn_state = '" + TXN_ABORTED + "' where txn_id in (");
-    first = true;
-    for (Long id : txnids) {
-      if (first) first = false;
-      else buf.append(',');
-      buf.append(id);
+    Statement stmt = null;
+    int updateCnt = 0;
+    try {
+      stmt = dbConn.createStatement();
+  
+      // delete from HIVE_LOCKS first, we always access HIVE_LOCKS before TXNS
+      StringBuilder buf = new StringBuilder("delete from HIVE_LOCKS where hl_txnid in (");
+      boolean first = true;
+      for (Long id : txnids) {
+        if (first) first = false;
+        else buf.append(',');
+        buf.append(id);
+      }
+      buf.append(')');
+      LOG.debug("Going to execute update <" + buf.toString() + ">");
+      stmt.executeUpdate(buf.toString());
+  
+      buf = new StringBuilder("update TXNS set txn_state = '" + TXN_ABORTED + "' where txn_id in (");
+      first = true;
+      for (Long id : txnids) {
+        if (first) first = false;
+        else buf.append(',');
+        buf.append(id);
+      }
+      buf.append(')');
+      LOG.debug("Going to execute update <" + buf.toString() + ">");
+      updateCnt = stmt.executeUpdate(buf.toString());
+  
+      LOG.debug("Going to commit");
+      dbConn.commit();
+    } finally {
+      closeStmt(stmt);
     }
-    buf.append(')');
-    LOG.debug("Going to execute update <" + buf.toString() + ">");
-    int updateCnt = stmt.executeUpdate(buf.toString());
-
-    LOG.debug("Going to commit");
-    dbConn.commit();
     return updateCnt;
   }
 
@@ -1102,9 +1138,9 @@ public class TxnHandler {
     synchronized (lockLock) {
       // Clean up timed out locks before we attempt to acquire any.
       timeOutLocks(dbConn);
-
+      Statement stmt = null;
       try {
-        Statement stmt = dbConn.createStatement();
+        stmt = dbConn.createStatement();
 
         // Get the next lock id.
         String s = "select nl_next from NEXT_LOCK_ID";
@@ -1183,6 +1219,8 @@ public class TxnHandler {
       } catch (NoSuchLockException e) {
         // This should never happen, as we just added the lock id
         throw new MetaException("Couldn't find a lock we just created!");
+      } finally {
+        closeStmt(stmt);
       }
     }
   }
@@ -1197,7 +1235,6 @@ public class TxnHandler {
 
     LOG.debug("Setting savepoint");
     Savepoint save = dbConn.setSavepoint();
-    Statement stmt = dbConn.createStatement();
     StringBuilder query = new StringBuilder("select hl_lock_ext_id, " +
         "hl_lock_int_id, hl_db, hl_table, hl_partition, hl_lock_state, " +
         "hl_lock_type from HIVE_LOCKS where hl_db in (");
@@ -1267,98 +1304,104 @@ public class TxnHandler {
     }
 
     LOG.debug("Going to execute query <" + query.toString() + ">");
-    ResultSet rs = stmt.executeQuery(query.toString());
-    SortedSet lockSet = new TreeSet(new LockInfoComparator());
-    while (rs.next()) {
-      lockSet.add(new LockInfo(rs));
-    }
-    // Turn the tree set into an array so we can move back and forth easily
-    // in it.
-    LockInfo[] locks = (LockInfo[])lockSet.toArray(new LockInfo[1]);
-
-    for (LockInfo info : locksBeingChecked) {
-      // Find the lock record we're checking
-      int index = -1;
-      for (int i = 0; i < locks.length; i++) {
-        if (locks[i].equals(info)) {
-          index = i;
-          break;
-        }
+    Statement stmt = null;
+    try {
+      stmt = dbConn.createStatement();
+      ResultSet rs = stmt.executeQuery(query.toString());
+      SortedSet lockSet = new TreeSet(new LockInfoComparator());
+      while (rs.next()) {
+        lockSet.add(new LockInfo(rs));
       }
+      // Turn the tree set into an array so we can move back and forth easily
+      // in it.
+      LockInfo[] locks = (LockInfo[])lockSet.toArray(new LockInfo[1]);
 
-      // If we didn't find the lock, then it must not be in the table
-      if (index == -1) {
-        LOG.debug("Going to rollback");
-        dbConn.rollback();
-        throw new MetaException("How did we get here, we heartbeated our lock before we started!");
-      }
+      for (LockInfo info : locksBeingChecked) {
+        // Find the lock record we're checking
+        int index = -1;
+        for (int i = 0; i < locks.length; i++) {
+          if (locks[i].equals(info)) {
+            index = i;
+            break;
+          }
+        }
 
+        // If we didn't find the lock, then it must not be in the table
+        if (index == -1) {
+          LOG.debug("Going to rollback");
+          dbConn.rollback();
+          throw new MetaException("How did we get here, we heartbeated our lock before we started!");
+        }
 
-      // If we've found it and it's already been marked acquired,
-      // then just look at the other locks.
-      if (locks[index].state == LockState.ACQUIRED) {
-        continue;
-      }
 
-      // Look at everything in front of this lock to see if it should block
-      // it or not.
-      boolean acquired = false;
-      for (int i = index - 1; i >= 0; i--) {
-        // Check if we're operating on the same database, if not, move on
-        if (!locks[index].db.equals(locks[i].db)) {
+        // If we've found it and it's already been marked acquired,
+        // then just look at the other locks.
+        if (locks[index].state == LockState.ACQUIRED) {
           continue;
         }
 
-        // If table is null on either of these, then they are claiming to
-        // lock the whole database and we need to check it.  Otherwise,
-        // check if they are operating on the same table, if not, move on.
-        if (locks[index].table != null && locks[i].table != null
-            && !locks[index].table.equals(locks[i].table)) {
-          continue;
-        }
+        // Look at everything in front of this lock to see if it should block
+        // it or not.
+        boolean acquired = false;
+        for (int i = index - 1; i >= 0; i--) {
+          // Check if we're operating on the same database, if not, move on
+          if (!locks[index].db.equals(locks[i].db)) {
+            continue;
+          }
 
-        // If partition is null on either of these, then they are claiming to
-        // lock the whole table and we need to check it.  Otherwise,
-        // check if they are operating on the same partition, if not, move on.
-        if (locks[index].partition != null && locks[i].partition != null
-            && !locks[index].partition.equals(locks[i].partition)) {
-          continue;
-        }
+          // If table is null on either of these, then they are claiming to
+          // lock the whole database and we need to check it.  Otherwise,
+          // check if they are operating on the same table, if not, move on.
+          if (locks[index].table != null && locks[i].table != null
+              && !locks[index].table.equals(locks[i].table)) {
+            continue;
+          }
 
-        // We've found something that matches what we're trying to lock,
-        // so figure out if we can lock it too.
-        switch (jumpTable.get(locks[index].type).get(locks[i].type).get
-            (locks[i].state)) {
-          case ACQUIRE:
-            acquire(dbConn, stmt, extLockId, info.intLockId);
-            acquired = true;
-            break;
-          case WAIT:
-            wait(dbConn, save);
-            if (alwaysCommit) {
-              // In the case where lockNoWait has been called we don't want to commit because
-              // it's going to roll everything back.  In every other case we want to commit here.
-              LOG.debug("Going to commit");
-              dbConn.commit();
-            }
-            response.setState(LockState.WAITING);
-            return response;
-          case KEEP_LOOKING:
+          // If partition is null on either of these, then they are claiming to
+          // lock the whole table and we need to check it.  Otherwise,
+          // check if they are operating on the same partition, if not, move on.
+          if (locks[index].partition != null && locks[i].partition != null
+              && !locks[index].partition.equals(locks[i].partition)) {
             continue;
+          }
+
+          // We've found something that matches what we're trying to lock,
+          // so figure out if we can lock it too.
+          switch (jumpTable.get(locks[index].type).get(locks[i].type).get
+              (locks[i].state)) {
+              case ACQUIRE:
+                acquire(dbConn, stmt, extLockId, info.intLockId);
+                acquired = true;
+                break;
+              case WAIT:
+                wait(dbConn, save);
+                if (alwaysCommit) {
+                  // In the case where lockNoWait has been called we don't want to commit because
+                  // it's going to roll everything back. In every other case we want to commit here.
+                  LOG.debug("Going to commit");
+                  dbConn.commit();
+                }
+                response.setState(LockState.WAITING);
+                return response;
+              case KEEP_LOOKING:
+                continue;
+          }
+          if (acquired) break; // We've acquired this lock component,
+          // so get out of the loop and look at the next component.
         }
-        if (acquired) break; // We've acquired this lock component,
-        // so get out of the loop and look at the next component.
+
+        // If we've arrived here and we have not already acquired, it means there's nothing in the
+        // way of the lock, so acquire the lock.
+        if (!acquired) acquire(dbConn, stmt, extLockId, info.intLockId);
       }
 
-      // If we've arrived here and we have not already acquired, it means there's nothing in the
-      // way of the lock, so acquire the lock.
-      if (!acquired) acquire(dbConn, stmt, extLockId, info.intLockId);
+      // We acquired all of the locks, so commit and return acquired.
+      LOG.debug("Going to commit");
+      dbConn.commit();
+      response.setState(LockState.ACQUIRED);
+    } finally {
+      closeStmt(stmt);
     }
-
-    // We acquired all of the locks, so commit and return acquired.
-    LOG.debug("Going to commit");
-    dbConn.commit();
-    response.setState(LockState.ACQUIRED);
     return response;
   }
 
@@ -1397,20 +1440,25 @@ public class TxnHandler {
       throws NoSuchLockException, SQLException, MetaException {
     // If the lock id is 0, then there are no locks in this heartbeat
     if (extLockId == 0) return;
-    Statement stmt = dbConn.createStatement();
-    long now = getDbTime(dbConn);
+    Statement stmt = null;
+    try {
+      stmt = dbConn.createStatement();
+      long now = getDbTime(dbConn);
 
-    String s = "update HIVE_LOCKS set hl_last_heartbeat = " +
-        now + " where hl_lock_ext_id = " + extLockId;
-    LOG.debug("Going to execute update <" + s + ">");
-    int rc = stmt.executeUpdate(s);
-    if (rc < 1) {
-      LOG.debug("Going to rollback");
-      dbConn.rollback();
-      throw new NoSuchLockException("No such lock: " + extLockId);
+      String s = "update HIVE_LOCKS set hl_last_heartbeat = " +
+          now + " where hl_lock_ext_id = " + extLockId;
+      LOG.debug("Going to execute update <" + s + ">");
+      int rc = stmt.executeUpdate(s);
+      if (rc < 1) {
+        LOG.debug("Going to rollback");
+        dbConn.rollback();
+        throw new NoSuchLockException("No such lock: " + extLockId);
+      }
+      LOG.debug("Going to commit");
+      dbConn.commit();
+    } finally {
+      closeStmt(stmt);
     }
-    LOG.debug("Going to commit");
-    dbConn.commit();
   }
 
   // Heartbeats on the txn table.  This commits, so do not enter it with any state
@@ -1418,68 +1466,83 @@ public class TxnHandler {
       throws NoSuchTxnException, TxnAbortedException, SQLException, MetaException {
     // If the txnid is 0, then there are no transactions in this heartbeat
     if (txnid == 0) return;
-    Statement stmt = dbConn.createStatement();
-    long now = getDbTime(dbConn);
-    // We need to check whether this transaction is valid and open
-    String s = "select txn_state from TXNS where txn_id = " + txnid;
-    LOG.debug("Going to execute query <" + s + ">");
-    ResultSet rs = stmt.executeQuery(s);
-    if (!rs.next()) {
-      LOG.debug("Going to rollback");
-      dbConn.rollback();
-      throw new NoSuchTxnException("No such transaction: " + txnid);
-    }
-    if (rs.getString(1).charAt(0) == TXN_ABORTED) {
-      LOG.debug("Going to rollback");
-      dbConn.rollback();
-      throw new TxnAbortedException("Transaction " + txnid +
-          " already aborted");
+    Statement stmt = null;
+    try {
+      stmt = dbConn.createStatement();
+      long now = getDbTime(dbConn);
+      // We need to check whether this transaction is valid and open
+      String s = "select txn_state from TXNS where txn_id = " + txnid;
+      LOG.debug("Going to execute query <" + s + ">");
+      ResultSet rs = stmt.executeQuery(s);
+      if (!rs.next()) {
+        LOG.debug("Going to rollback");
+        dbConn.rollback();
+        throw new NoSuchTxnException("No such transaction: " + txnid);
+      }
+      if (rs.getString(1).charAt(0) == TXN_ABORTED) {
+        LOG.debug("Going to rollback");
+        dbConn.rollback();
+        throw new TxnAbortedException("Transaction " + txnid +
+            " already aborted");
+      }
+      s = "update TXNS set txn_last_heartbeat = " + now +
+          " where txn_id = " + txnid;
+      LOG.debug("Going to execute update <" + s + ">");
+      stmt.executeUpdate(s);
+      LOG.debug("Going to commit");
+      dbConn.commit();
+    } finally {
+      closeStmt(stmt);
     }
-    s = "update TXNS set txn_last_heartbeat = " + now +
-        " where txn_id = " + txnid;
-    LOG.debug("Going to execute update <" + s + ">");
-    stmt.executeUpdate(s);
-    LOG.debug("Going to commit");
-    dbConn.commit();
   }
 
   // NEVER call this function without first calling heartbeat(long, long)
   private long getTxnIdFromLockId(Connection dbConn, long extLockId)
       throws NoSuchLockException, MetaException, SQLException {
-    Statement stmt = dbConn.createStatement();
-    String s = "select hl_txnid from HIVE_LOCKS where hl_lock_ext_id = " +
-        extLockId;
-    LOG.debug("Going to execute query <" + s + ">");
-    ResultSet rs = stmt.executeQuery(s);
-    if (!rs.next()) {
-      throw new MetaException("This should never happen!  We already " +
-          "checked the lock existed but now we can't find it!");
-    }
-    long txnid = rs.getLong(1);
-    LOG.debug("Return txnid " + (rs.wasNull() ? -1 : txnid));
-    return (rs.wasNull() ? -1 : txnid);
+    Statement stmt = null;
+    try {
+      stmt = dbConn.createStatement();
+      String s = "select hl_txnid from HIVE_LOCKS where hl_lock_ext_id = " +
+          extLockId;
+      LOG.debug("Going to execute query <" + s + ">");
+      ResultSet rs = stmt.executeQuery(s);
+      if (!rs.next()) {
+        throw new MetaException("This should never happen!  We already " +
+            "checked the lock existed but now we can't find it!");
+      }
+      long txnid = rs.getLong(1);
+      LOG.debug("Return txnid " + (rs.wasNull() ? -1 : txnid));
+      return (rs.wasNull() ? -1 : txnid);
+    } finally {
+      closeStmt(stmt);
+    }
   }
 
   // NEVER call this function without first calling heartbeat(long, long)
   private List<LockInfo> getLockInfoFromLockId(Connection dbConn, long extLockId)
       throws NoSuchLockException, MetaException, SQLException {
-    Statement stmt = dbConn.createStatement();
-    String s = "select hl_lock_ext_id, hl_lock_int_id, hl_db, hl_table, " +
-        "hl_partition, hl_lock_state, hl_lock_type from HIVE_LOCKS where " +
-        "hl_lock_ext_id = " + extLockId;
-    LOG.debug("Going to execute query <" + s + ">");
-    ResultSet rs = stmt.executeQuery(s);
-    boolean sawAtLeastOne = false;
-    List<LockInfo> ourLockInfo = new ArrayList<LockInfo>();
-    while (rs.next()) {
-      ourLockInfo.add(new LockInfo(rs));
-      sawAtLeastOne = true;
-    }
-    if (!sawAtLeastOne) {
-      throw new MetaException("This should never happen!  We already " +
-          "checked the lock existed but now we can't find it!");
+    Statement stmt = null;
+    try {
+      stmt = dbConn.createStatement();
+      String s = "select hl_lock_ext_id, hl_lock_int_id, hl_db, hl_table, " +
+          "hl_partition, hl_lock_state, hl_lock_type from HIVE_LOCKS where " +
+          "hl_lock_ext_id = " + extLockId;
+      LOG.debug("Going to execute query <" + s + ">");
+      ResultSet rs = stmt.executeQuery(s);
+      boolean sawAtLeastOne = false;
+      List<LockInfo> ourLockInfo = new ArrayList<LockInfo>();
+      while (rs.next()) {
+        ourLockInfo.add(new LockInfo(rs));
+        sawAtLeastOne = true;
+      }
+      if (!sawAtLeastOne) {
+        throw new MetaException("This should never happen!  We already " +
+            "checked the lock existed but now we can't find it!");
+      }
+      return ourLockInfo;
+    } finally {
+      closeStmt(stmt);
     }
-    return ourLockInfo;
   }
 
   // Clean time out locks from the database.  This does a commit,
@@ -1487,14 +1550,19 @@ public class TxnHandler {
   // open transactions.
   private void timeOutLocks(Connection dbConn) throws SQLException, MetaException {
     long now = getDbTime(dbConn);
-    Statement stmt = dbConn.createStatement();
-    // Remove any timed out locks from the table.
-    String s = "delete from HIVE_LOCKS where hl_last_heartbeat < " +
-        (now - timeout);
-    LOG.debug("Going to execute update <" + s + ">");
-    stmt.executeUpdate(s);
-    LOG.debug("Going to commit");
-    dbConn.commit();
+    Statement stmt = null;
+    try {
+      stmt = dbConn.createStatement();
+      // Remove any timed out locks from the table.
+      String s = "delete from HIVE_LOCKS where hl_last_heartbeat < " +
+          (now - timeout);
+      LOG.debug("Going to execute update <" + s + ">");
+      stmt.executeUpdate(s);
+      LOG.debug("Going to commit");
+      dbConn.commit();
+    } finally {
+      closeStmt(stmt);
+    }
   }
 
   // Abort timed out transactions.  This calls abortTxn(), which does a commit,
@@ -1502,19 +1570,24 @@ public class TxnHandler {
   // open transactions on the underlying database.
   private void timeOutTxns(Connection dbConn) throws SQLException, MetaException {
     long now = getDbTime(dbConn);
-    Statement stmt = dbConn.createStatement();
-    // Abort any timed out locks from the table.
-    String s = "select txn_id from TXNS where txn_state = '" + TXN_OPEN +
-        "' and txn_last_heartbeat <  " + (now - timeout);
-    LOG.debug("Going to execute query <" + s + ">");
-    ResultSet rs = stmt.executeQuery(s);
-    List<Long> deadTxns = new ArrayList<Long>();
-    // Limit the number of timed out transactions we do in one pass to keep from generating a
-    // huge delete statement
-    for (int i = 0; i < 20 && rs.next(); i++) deadTxns.add(rs.getLong(1));
-    // We don't care whether all of the transactions get deleted or not,
-    // if some didn't it most likely means someone else deleted them in the interum
-    if (deadTxns.size() > 0) abortTxns(dbConn, deadTxns);
+    Statement stmt = null;
+    try {
+      stmt = dbConn.createStatement();
+      // Abort any timed out locks from the table.
+      String s = "select txn_id from TXNS where txn_state = '" + TXN_OPEN +
+          "' and txn_last_heartbeat <  " + (now - timeout);
+      LOG.debug("Going to execute query <" + s + ">");
+      ResultSet rs = stmt.executeQuery(s);
+      List<Long> deadTxns = new ArrayList<Long>();
+      // Limit the number of timed out transactions we do in one pass to keep from generating a
+      // huge delete statement
+      for (int i = 0; i < 20 && rs.next(); i++) deadTxns.add(rs.getLong(1));
+      // We don't care whether all of the transactions get deleted or not,
+      // if some didn't it most likely means someone else deleted them in the interum
+      if (deadTxns.size() > 0) abortTxns(dbConn, deadTxns);
+    } finally {
+      closeStmt(stmt);
+    }
   }
 
   private static synchronized void setupJdbcConnectionPool(HiveConf conf) throws SQLException {

Modified: hive/branches/cbo/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/cbo/pom.xml?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/pom.xml (original)
+++ hive/branches/cbo/pom.xml Sat Jun 28 00:23:54 2014
@@ -740,7 +740,7 @@
           <redirectTestOutputToFile>true</redirectTestOutputToFile>
           <reuseForks>false</reuseForks>
           <failIfNoTests>false</failIfNoTests>
-          <argLine>-Xmx1024m -XX:MaxPermSize=256m</argLine>
+          <argLine>-Xmx2048m -XX:MaxPermSize=512m</argLine>
           <additionalClasspathElements>
             <additionalClasspathElement>${test.tmp.dir}/conf</additionalClasspathElement>
             <additionalClasspathElement>${basedir}/${hive.path.to.root}/conf</additionalClasspathElement>

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Sat Jun 28 00:23:54 2014
@@ -1076,16 +1076,14 @@ public class DDLTask extends Task<DDLWor
     switch (alterDbDesc.getAlterType()) {
     case ALTER_PROPERTY:
       Map<String, String> newParams = alterDbDesc.getDatabaseProperties();
-      if (database != null) {
-        Map<String, String> params = database.getParameters();
-        // if both old and new params are not null, merge them
-        if (params != null && newParams != null) {
-          params.putAll(newParams);
-          database.setParameters(params);
-        } else { // if one of them is null, replace the old params with the new
-                 // one
-          database.setParameters(newParams);
-        }
+      Map<String, String> params = database.getParameters();
+      // if both old and new params are not null, merge them
+      if (params != null && newParams != null) {
+        params.putAll(newParams);
+        database.setParameters(params);
+      } else {
+        // if one of them is null, replace the old params with the new one
+        database.setParameters(newParams);
       }
       break;
 
@@ -2618,7 +2616,7 @@ public class DDLTask extends Task<DDLWor
    *           Throws this exception if an unexpected error occurs.
    */
   private int showFunctions(ShowFunctionsDesc showFuncs) throws HiveException {
-    // get the tables for the desired pattenn - populate the output stream
+    // get the tables for the desired patten - populate the output stream
     Set<String> funcs = null;
     if (showFuncs.getPattern() != null) {
       LOG.info("pattern: " + showFuncs.getPattern());
@@ -3216,16 +3214,20 @@ public class DDLTask extends Task<DDLWor
 
       if (database == null) {
         throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, descDatabase.getDatabaseName());
-      } else {
-        Map<String, String> params = null;
-        if(descDatabase.isExt()) {
-          params = database.getParameters();
-        }
-        PrincipalType ownerType = database.getOwnerType();
-        formatter.showDatabaseDescription(outStream, database.getName(),
-            database.getDescription(), database.getLocationUri(),
-            database.getOwnerName(), (null == ownerType) ? null : ownerType.name(), params);
       }
+      Map<String, String> params = null;
+      if (descDatabase.isExt()) {
+        params = database.getParameters();
+      }
+      String location = database.getLocationUri();
+      if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEST)) {
+        location = "location/in/test";
+      }
+      PrincipalType ownerType = database.getOwnerType();
+      formatter.showDatabaseDescription(outStream, database.getName(),
+          database.getDescription(), location,
+          database.getOwnerName(), (null == ownerType) ? null : ownerType.name(), params);
+
       outStream.close();
       outStream = null;
     } catch (IOException e) {
@@ -4018,7 +4020,7 @@ public class DDLTask extends Task<DDLWor
     // drop the table
     db.dropTable(dropTbl.getTableName());
     if (tbl != null) {
-      // We have already locked the table in DDLSemenaticAnalyzer, don't do it again here
+      // We have already locked the table in DDLSemanticAnalyzer, don't do it again here
       work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
     }
   }
@@ -4556,8 +4558,6 @@ public class DDLTask extends Task<DDLWor
    /**
    * Make location in specified sd qualified.
    *
-   * @param conf
-   *          Hive configuration.
    * @param databaseName
    *          Database name.
    * @param sd
@@ -4592,8 +4592,6 @@ public class DDLTask extends Task<DDLWor
    /**
    * Make qualified location for an index .
    *
-   * @param conf
-   *          Hive configuration.
    * @param crtIndex
    *          Create index descriptor.
    * @param name
@@ -4623,8 +4621,6 @@ public class DDLTask extends Task<DDLWor
    /**
    * Make qualified location for a database .
    *
-   * @param conf
-   *          Hive configuration.
    * @param database
    *          Database.
    */

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java Sat Jun 28 00:23:54 2014
@@ -290,7 +290,7 @@ public class ReduceSinkOperator extends 
       populateCachedDistributionKeys(row, 0);
 
       // replace bucketing columns with hashcode % numBuckets
-      int buckNum = 0;
+      int buckNum = -1;
       if (bucketEval != null) {
         buckNum = computeBucketNumber(row, conf.getNumBuckets());
         cachedKeys[0][buckColIdxInKey] = new IntWritable(buckNum);
@@ -304,14 +304,12 @@ public class ReduceSinkOperator extends 
       }
 
       final int hashCode;
-      
-      if(autoParallel && partitionEval.length > 0) {
-        // distKeyLength doesn't include tag, but includes buckNum in cachedKeys[0]
-        hashCode = hash.hash(firstKey.getBytes(), distKeyLength, 0);  
-      } else if(bucketEval != null && bucketEval.length > 0) {
-        hashCode = computeHashCode(row, buckNum);
+
+      // distKeyLength doesn't include tag, but includes buckNum in cachedKeys[0]
+      if (autoParallel && partitionEval.length > 0) {
+        hashCode = computeMurmurHash(firstKey);
       } else {
-        hashCode = computeHashCode(row);
+        hashCode = computeHashCode(row, buckNum);
       }
       
       firstKey.setHashCode(hashCode);
@@ -384,7 +382,11 @@ public class ReduceSinkOperator extends 
     union.setTag((byte) index);
   }
 
-  private int computeHashCode(Object row) throws HiveException {
+  protected final int computeMurmurHash(HiveKey firstKey) {
+    return hash.hash(firstKey.getBytes(), firstKey.getDistKeyLength(), 0);
+  }
+
+  private int computeHashCode(Object row, int buckNum) throws HiveException {
     // Evaluate the HashCode
     int keyHashCode = 0;
     if (partitionEval.length == 0) {
@@ -403,13 +405,7 @@ public class ReduceSinkOperator extends 
             + ObjectInspectorUtils.hashCode(o, partitionObjectInspectors[i]);
       }
     }
-    return keyHashCode;
-  }
-
-  private int computeHashCode(Object row, int buckNum) throws HiveException {
-    int keyHashCode = computeHashCode(row);
-    keyHashCode = keyHashCode * 31 + buckNum;
-    return keyHashCode;
+    return buckNum < 0  ? keyHashCode : keyHashCode * 31 + buckNum;
   }
 
   // Serialize the keys and append the tag

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Sat Jun 28 00:23:54 2014
@@ -3220,8 +3220,8 @@ public final class Utilities {
       HiveConf.setVar(conf, var, mWork.getInputformat());
     }
     if (mWork.getIndexIntermediateFile() != null) {
-      conf.set("hive.index.compact.file", mWork.getIndexIntermediateFile());
-      conf.set("hive.index.blockfilter.file", mWork.getIndexIntermediateFile());
+      conf.set(ConfVars.HIVE_INDEX_COMPACT_FILE.varname, mWork.getIndexIntermediateFile());
+      conf.set(ConfVars.HIVE_INDEX_BLOCKFILTER_FILE.varname, mWork.getIndexIntermediateFile());
     }
 
     // Intentionally overwrites anything the user may have put here

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java Sat Jun 28 00:23:54 2014
@@ -551,8 +551,8 @@ public class ExecDriver extends Task<Map
       HiveConf.setVar(conf, ConfVars.HIVEINPUTFORMAT, mWork.getInputformat());
     }
     if (mWork.getIndexIntermediateFile() != null) {
-      conf.set("hive.index.compact.file", mWork.getIndexIntermediateFile());
-      conf.set("hive.index.blockfilter.file", mWork.getIndexIntermediateFile());
+      conf.set(ConfVars.HIVE_INDEX_COMPACT_FILE.varname, mWork.getIndexIntermediateFile());
+      conf.set(ConfVars.HIVE_INDEX_BLOCKFILTER_FILE.varname, mWork.getIndexIntermediateFile());
     }
 
     // Intentionally overwrites anything the user may have put here
@@ -732,7 +732,7 @@ public class ExecDriver extends Task<Map
       memoryMXBean = ManagementFactory.getMemoryMXBean();
       MapredLocalWork plan = Utilities.deserializePlan(pathData, MapredLocalWork.class, conf);
       MapredLocalTask ed = new MapredLocalTask(plan, conf, isSilent);
-      ret = ed.executeFromChildJVM(new DriverContext());
+      ret = ed.executeInProcess(new DriverContext());
 
     } else {
       MapredWork plan = Utilities.deserializePlan(pathData, MapredWork.class, conf);

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java Sat Jun 28 00:23:54 2014
@@ -127,8 +127,7 @@ public class MapRedTask extends ExecDriv
         }
       }
 
-      runningViaChild = ShimLoader.getHadoopShims().isLocalMode(conf) ||
-        conf.getBoolVar(HiveConf.ConfVars.SUBMITVIACHILD);
+      runningViaChild = conf.getBoolVar(HiveConf.ConfVars.SUBMITVIACHILD);
 
       if(!runningViaChild) {
         // we are not running this mapred task via child jvm

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java Sat Jun 28 00:23:54 2014
@@ -130,6 +130,17 @@ public class MapredLocalTask extends Tas
 
   @Override
   public int execute(DriverContext driverContext) {
+    if (conf.getBoolVar(HiveConf.ConfVars.SUBMITLOCALTASKVIACHILD)) {
+      // send task off to another jvm
+      return executeInChildVM(driverContext);
+    } else {
+      // execute in process
+      return executeInProcess(driverContext);
+    }
+  }
+
+  public int executeInChildVM(DriverContext driverContext) {
+    // execute in child jvm
     try {
       // generate the cmd line to run in the child jvm
       Context ctx = driverContext.getCtx();
@@ -285,9 +296,7 @@ public class MapredLocalTask extends Tas
     }
   }
 
-
-
-  public int executeFromChildJVM(DriverContext driverContext) {
+  public int executeInProcess(DriverContext driverContext) {
     // check the local work
     if (work == null) {
       return -1;

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java Sat Jun 28 00:23:54 2014
@@ -210,6 +210,9 @@ public class VectorReduceSinkOperator ex
       if (limit >= 0 && memUsage > 0) {
         reducerHash.initialize(limit, memUsage, conf.isMapGroupBy(), this);
       }
+
+      autoParallel = conf.isAutoParallel();
+
     } catch(Exception e) {
       throw new HiveException(e);
     }
@@ -265,8 +268,8 @@ public class VectorReduceSinkOperator ex
         populatedCachedDistributionKeys(vrg, rowIndex, 0);
 
         // replace bucketing columns with hashcode % numBuckets
-        int buckNum = 0;
-        if (bucketEval != null && bucketEval.length != 0) {
+        int buckNum = -1;
+        if (bucketEval != null) {
           buckNum = computeBucketNumber(vrg, rowIndex, conf.getNumBuckets());
           cachedKeys[0][buckColIdxInKey] = new IntWritable(buckNum);
         }
@@ -280,12 +283,11 @@ public class VectorReduceSinkOperator ex
 
         final int hashCode;
 
-        if(autoParallel && partitionEval.length > 0) {
-          hashCode = hash.hash(firstKey.getBytes(), firstKey.getDistKeyLength(), 0);
-        } else if(bucketEval != null && bucketEval.length > 0) {
-          hashCode = computeHashCode(vrg, rowIndex, buckNum);
+        // distKeyLength doesn't include tag, but includes buckNum in cachedKeys[0]
+        if (autoParallel && partitionEval.length > 0) {
+          hashCode = computeMurmurHash(firstKey);
         } else {
-          hashCode = computeHashCode(vrg, rowIndex);
+          hashCode = computeHashCode(vrg, rowIndex, buckNum);
         }
 
         firstKey.setHashCode(hashCode);
@@ -417,7 +419,7 @@ public class VectorReduceSinkOperator ex
     return (BytesWritable)valueSerializer.serialize(cachedValues, valueObjectInspector);
   }
 
-  private int computeHashCode(VectorizedRowBatch vrg, int rowIndex) throws HiveException {
+  private int computeHashCode(VectorizedRowBatch vrg, int rowIndex, int buckNum) throws HiveException {
     // Evaluate the HashCode
     int keyHashCode = 0;
     if (partitionEval.length == 0) {
@@ -440,13 +442,7 @@ public class VectorReduceSinkOperator ex
                 partitionWriters[p].getObjectInspector());
       }
     }
-    return keyHashCode;
-  }
-
-  private int computeHashCode(VectorizedRowBatch vrg, int rowIndex, int buckNum) throws HiveException {
-    int keyHashCode = computeHashCode(vrg, rowIndex);
-    keyHashCode = keyHashCode * 31 + buckNum;
-    return keyHashCode;
+    return buckNum < 0  ? keyHashCode : keyHashCode * 31 + buckNum;
   }
 
   private int computeBucketNumber(VectorizedRowBatch vrg, int rowIndex, int numBuckets) throws HiveException {

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java Sat Jun 28 00:23:54 2014
@@ -340,15 +340,8 @@ public class RCFile {
 
   private static final Log LOG = LogFactory.getLog(RCFile.class);
 
-  public static final String RECORD_INTERVAL_CONF_STR = "hive.io.rcfile.record.interval";
-
   public static final String COLUMN_NUMBER_METADATA_STR = "hive.io.rcfile.column.number";
 
-  public static final String COLUMN_NUMBER_CONF_STR = "hive.io.rcfile.column.number.conf";
-
-  public static final String TOLERATE_CORRUPTIONS_CONF_STR =
-    "hive.io.rcfile.tolerate.corruptions";
-
   // HACK: We actually need BlockMissingException, but that is not available
   // in all hadoop versions.
   public static final String BLOCK_MISSING_MESSAGE =
@@ -985,8 +978,8 @@ public class RCFile {
     public Writer(FileSystem fs, Configuration conf, Path name, int bufferSize,
         short replication, long blockSize, Progressable progress,
         Metadata metadata, CompressionCodec codec) throws IOException {
-      RECORD_INTERVAL = conf.getInt(RECORD_INTERVAL_CONF_STR, RECORD_INTERVAL);
-      columnNumber = conf.getInt(COLUMN_NUMBER_CONF_STR, 0);
+      RECORD_INTERVAL = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_RCFILE_RECORD_INTERVAL);
+      columnNumber = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_RCFILE_COLUMN_NUMBER_CONF);
 
       if (metadata == null) {
         metadata = new Metadata();
@@ -1346,8 +1339,8 @@ public class RCFile {
     /** Create a new RCFile reader. */
     public Reader(FileSystem fs, Path file, int bufferSize, Configuration conf,
         long start, long length) throws IOException {
-      tolerateCorruptions = conf.getBoolean(
-        TOLERATE_CORRUPTIONS_CONF_STR, false);
+      tolerateCorruptions = HiveConf.getBoolVar(
+          conf, HiveConf.ConfVars.HIVE_RCFILE_TOLERATE_CORRUPTIONS);
       conf.setInt("io.file.buffer.size", bufferSize);
       this.file = file;
       in = openFile(fs, file, bufferSize, length);

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java Sat Jun 28 00:23:54 2014
@@ -25,6 +25,7 @@ import org.apache.commons.lang.StringUti
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable;
 import org.apache.hadoop.io.Writable;
@@ -57,7 +58,7 @@ public class RCFileOutputFormat extends
    */
   public static void setColumnNumber(Configuration conf, int columnNum) {
     assert columnNum > 0;
-    conf.setInt(RCFile.COLUMN_NUMBER_CONF_STR, columnNum);
+    conf.setInt(HiveConf.ConfVars.HIVE_RCFILE_COLUMN_NUMBER_CONF.varname, columnNum);
   }
 
   /**
@@ -67,7 +68,7 @@ public class RCFileOutputFormat extends
    * @return number of columns for RCFile's writer
    */
   public static int getColumnNumber(Configuration conf) {
-    return conf.getInt(RCFile.COLUMN_NUMBER_CONF_STR, 0);
+    return HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_RCFILE_COLUMN_NUMBER_CONF);
   }
 
   /** {@inheritDoc} */

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java Sat Jun 28 00:23:54 2014
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.io.RCFile;
+import org.apache.hadoop.hive.ql.io.RCFileOutputFormat;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
@@ -150,7 +151,7 @@ public class RCFileMergeMapper extends M
       if (outWriter == null) {
         codec = key.codec;
         columnNumber = key.keyBuffer.getColumnNumber();
-        jc.setInt(RCFile.COLUMN_NUMBER_CONF_STR, columnNumber);
+        RCFileOutputFormat.setColumnNumber(jc, columnNumber);
         outWriter = new RCFile.Writer(fs, jc, outPath, null, codec);
       }
 

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java Sat Jun 28 00:23:54 2014
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.io.RCFile;
+import org.apache.hadoop.hive.ql.io.RCFileOutputFormat;
 import org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileKeyBufferWrapper;
 import org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileValueBufferWrapper;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -124,7 +125,7 @@ public class ColumnTruncateMapper extend
       if (outWriter == null) {
         codec = key.getCodec();
         columnNumber = key.getKeyBuffer().getColumnNumber();
-        jc.setInt(RCFile.COLUMN_NUMBER_CONF_STR, columnNumber);
+        RCFileOutputFormat.setColumnNumber(jc, columnNumber);
         outWriter = new RCFile.Writer(fs, jc, outPath, null, codec);
       }
 

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Sat Jun 28 00:23:54 2014
@@ -104,7 +104,6 @@ import org.apache.hadoop.hive.serde2.Des
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.hive.shims.HadoopShims;
-import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatus;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.util.StringUtils;

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/DescDatabaseDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/DescDatabaseDesc.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/DescDatabaseDesc.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/DescDatabaseDesc.java Sat Jun 28 00:23:54 2014
@@ -38,7 +38,7 @@ public class DescDatabaseDesc extends DD
   /**
    * thrift ddl for the result of describe database.
    */
-  private static final String schema = "db_name,comment,location,parameters#string:string:string:string";
+  private static final String schema = "db_name,comment,location,owner_name,owner_type,parameters#string:string:string:string:string:string";
 
   public DescDatabaseDesc() {
   }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java Sat Jun 28 00:23:54 2014
@@ -116,6 +116,7 @@ public class SetProcessor implements Com
     if (varvalue.contains("\n")){
       ss.err.println("Warning: Value had a \\n character in it.");
     }
+    varname = varname.trim();
     if (varname.startsWith(SetProcessor.ENV_PREFIX)){
       ss.err.println("env:* variables can not be set.");
       return 1;
@@ -145,8 +146,8 @@ public class SetProcessor implements Com
         if (!confVars.isType(value)) {
           StringBuilder message = new StringBuilder();
           message.append("'SET ").append(varname).append('=').append(varvalue);
-          message.append("' FAILED because ").append(key).append(" expects an ");
-          message.append(confVars.typeString()).append(" value.");
+          message.append("' FAILED because ").append(key).append(" expects ");
+          message.append(confVars.typeString()).append(" type value.");
           throw new IllegalArgumentException(message.toString());
         }
         String fail = confVars.validate(value);
@@ -156,6 +157,8 @@ public class SetProcessor implements Com
           message.append("' FAILED in validation : ").append(fail).append('.');
           throw new IllegalArgumentException(message.toString());
         }
+      } else if (key.startsWith("hive.")) {
+        throw new IllegalArgumentException("hive configuration " + key + " does not exists.");
       }
     }
     conf.verifyAndSet(key, value);

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java Sat Jun 28 00:23:54 2014
@@ -130,7 +130,6 @@ public class GenericUDAFCount implements
         assert parameters.length == 0;
         ((CountAgg) agg).value++;
       } else {
-        assert parameters.length > 0;
         boolean countThisRow = true;
         for (Object nextParam : parameters) {
           if (nextParam == null) {

Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java (original)
+++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java Sat Jun 28 00:23:54 2014
@@ -80,6 +80,9 @@ public class TestExecDriver extends Test
   static {
     try {
       conf = new HiveConf(ExecDriver.class);
+      conf.setBoolVar(HiveConf.ConfVars.SUBMITVIACHILD, true);
+      conf.setBoolVar(HiveConf.ConfVars.SUBMITLOCALTASKVIACHILD, true);
+
       SessionState.start(conf);
 
       //convert possible incompatible Windows path in config

Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java (original)
+++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java Sat Jun 28 00:23:54 2014
@@ -624,7 +624,7 @@ public class TestRCFile {
     int writeCount = 2500;
     Configuration cloneConf = new Configuration(conf);
     RCFileOutputFormat.setColumnNumber(cloneConf, bytesArray.length);
-    cloneConf.setInt(RCFile.RECORD_INTERVAL_CONF_STR, intervalRecordCount);
+    cloneConf.setInt(HiveConf.ConfVars.HIVE_RCFILE_RECORD_INTERVAL.varname, intervalRecordCount);
     RCFile.Writer writer = new RCFile.Writer(fs, cloneConf, testFile, null, codec);
 
     BytesRefArrayWritable bytes = new BytesRefArrayWritable(bytesArray.length);
@@ -690,7 +690,7 @@ public class TestRCFile {
     fs.delete(testFile, true);
     Configuration cloneConf = new Configuration(conf);
     RCFileOutputFormat.setColumnNumber(cloneConf, bytesArray.length);
-    cloneConf.setInt(RCFile.RECORD_INTERVAL_CONF_STR, intervalRecordCount);
+    cloneConf.setInt(HiveConf.ConfVars.HIVE_RCFILE_RECORD_INTERVAL.varname, intervalRecordCount);
 
     RCFile.Writer writer = new RCFile.Writer(fs, cloneConf, testFile, null, codec);
 

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,7 @@
 set hive.archive.enabled = true;
 set hive.enforce.bucketing = true;
+set hive.exec.submitviachild=true;
+set hive.exec.submit.local.task.via.child=true;
 
 -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20)
 

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q Sat Jun 28 00:23:54 2014
@@ -1,10 +1,8 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.txn.testing=true;
 
 create table T1(key string, val string) stored as textfile;
 
-set hive.txn.testing=true;
 alter table T1 compact 'major';
 
 alter table T1 compact 'minor';

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q Sat Jun 28 00:23:54 2014
@@ -1,6 +1,5 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.txn.testing=true;
 
 create table T1(key string, val string) partitioned by (ds string) stored as textfile;
 

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q Sat Jun 28 00:23:54 2014
@@ -1,6 +1,5 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.txn.testing=true;
 
 create database D1;
 

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q Sat Jun 28 00:23:54 2014
@@ -1,6 +1,5 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.txn.testing=true;
 
 show locks;
 

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q Sat Jun 28 00:23:54 2014
@@ -6,6 +6,8 @@ set hive.exec.dynamic.partition.mode=non
 set hive.vectorized.execution.enabled=true;
 set hive.enforce.bucketing=false;
 set hive.enforce.sorting=false;
+set hive.exec.submitviachild=true;
+set hive.exec.submit.local.task.via.child=true;
 
 create table over1k(
            t tinyint,

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q Sat Jun 28 00:23:54 2014
@@ -5,6 +5,8 @@ set hive.exec.max.dynamic.partitions.per
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.enforce.bucketing=false;
 set hive.enforce.sorting=false;
+set hive.exec.submitviachild=true;
+set hive.exec.submit.local.task.via.child=true;
 
 create table over1k(
            t tinyint,

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/fetch_aggregation.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/fetch_aggregation.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/fetch_aggregation.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/fetch_aggregation.q Sat Jun 28 00:23:54 2014
@@ -1,4 +1,6 @@
 set hive.fetch.task.aggr=true;
+set hive.exec.submitviachild=true;
+set hive.exec.submit.local.task.via.child=true;
 
 explain
 select count(key),sum(key),avg(key),min(key),max(key),std(key),variance(key) from src;

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/index_bitmap_compression.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/index_bitmap_compression.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/index_bitmap_compression.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/index_bitmap_compression.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,5 @@
 set hive.stats.dbclass=fs;
-SET hive.exec.compress.result=true;
+SET hive.exec.compress.output=true;
 
 -- SORT_QUERY_RESULTS
 

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/index_compression.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/index_compression.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/index_compression.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/index_compression.q Sat Jun 28 00:23:54 2014
@@ -1,4 +1,4 @@
-SET hive.exec.compress.result=true;
+SET hive.exec.compress.output=true;
 SET hive.stats.dbclass=fs;
 
 -- SORT_QUERY_RESULTS

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/join25.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/join25.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/join25.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/join25.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,3 @@
-set hive.mapjoin.numrows = 2;
-
 -- SORT_QUERY_RESULTS
 
 CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE;

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/join36.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/join36.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/join36.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/join36.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,3 @@
-set hive.mapjoin.numrows = 2;
-
 -- SORT_QUERY_RESULTS
 
 CREATE TABLE tmp1(key INT, cnt INT);

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/join37.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/join37.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/join37.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/join37.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,3 @@
-set hive.mapjoin.numrows = 2;
-
 -- SORT_QUERY_RESULTS
 
 CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE;

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/join_nulls.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/join_nulls.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/join_nulls.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/join_nulls.q Sat Jun 28 00:23:54 2014
@@ -49,8 +49,8 @@ LOAD DATA LOCAL INPATH '../../data/files
 LOAD DATA LOCAL INPATH '../../data/files/in1.txt' into table smb_input2;
 LOAD DATA LOCAL INPATH '../../data/files/in2.txt' into table smb_input2;
 
-SET hive.optimize.bucketmapJOIN = true;
-SET hive.optimize.bucketmapJOIN.sortedmerge = true;
+SET hive.optimize.bucketmapjoin = true;
+SET hive.optimize.bucketmapjoin.sortedmerge = true;
 SET hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
 
 SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key = b.key;

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/join_nullsafe.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/join_nullsafe.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/join_nullsafe.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/join_nullsafe.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,3 @@
-set hive.nullsafe.equijoin=true;
-
 -- SORT_QUERY_RESULTS
 
 CREATE TABLE myinput1(key int, value int);
@@ -30,16 +28,23 @@ SELECT * FROM myinput1 a FULL OUTER JOIN
 SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value;
 SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value;
 
+CREATE TABLE smb_input(key int, value int);
+LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input;
+LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input;
+
+set hive.enforce.sorting = true;
+set hive.enforce.bucketing = true;
+
 -- smbs
 CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
 CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS;
-LOAD DATA LOCAL INPATH '../../data/files/in8.txt' into table smb_input1;
-LOAD DATA LOCAL INPATH '../../data/files/in9.txt' into table smb_input1;
-LOAD DATA LOCAL INPATH '../../data/files/in8.txt' into table smb_input2;
-LOAD DATA LOCAL INPATH '../../data/files/in9.txt' into table smb_input2;
 
-SET hive.optimize.bucketmapJOIN = true;
-SET hive.optimize.bucketmapJOIN.sortedmerge = true;
+from smb_input
+insert overwrite table smb_input1 select *
+insert overwrite table smb_input2 select *;
+
+SET hive.optimize.bucketmapjoin = true;
+SET hive.optimize.bucketmapjoin.sortedmerge = true;
 SET hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
 
 SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key;

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/metadata_export_drop.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/metadata_export_drop.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/metadata_export_drop.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/metadata_export_drop.q Sat Jun 28 00:23:54 2014
@@ -2,7 +2,7 @@ create table tmp_meta_export_listener_dr
 dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/data/exports/HIVE-3427;
 set hive.metastore.pre.event.listeners=org.apache.hadoop.hive.ql.parse.MetaDataExportListener;
 set hive.metadata.export.location=${system:test.tmp.dir}/data/exports/HIVE-3427;
-set hive.move.exported.metadata.to.trash=false;
+set hive.metadata.move.exported.metadata.to.trash=false;
 drop table tmp_meta_export_listener_drop_test;
 dfs -rmr ${system:test.tmp.dir}/data/exports/HIVE-3427;
 set hive.metastore.pre.event.listeners=;

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/nonmr_fetch.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/nonmr_fetch.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/nonmr_fetch.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/nonmr_fetch.q Sat Jun 28 00:23:54 2014
@@ -1,4 +1,6 @@
 set hive.fetch.task.conversion=minimal;
+set hive.exec.submitviachild=true;
+set hive.exec.submit.local.task.via.child=true;
 
 -- backward compatible (minimal)
 explain select * from src limit 10;

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/orc_analyze.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/orc_analyze.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/orc_analyze.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/orc_analyze.q Sat Jun 28 00:23:54 2014
@@ -1,3 +1,6 @@
+set hive.exec.submitviachild=true;
+set hive.exec.submit.local.task.via.child=true;
+
 CREATE TABLE orc_create_people_staging (
   id int,
   first_name string,

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/overridden_confs.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/overridden_confs.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/overridden_confs.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/overridden_confs.q Sat Jun 28 00:23:54 2014
@@ -1,4 +1,4 @@
 set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyOverriddenConfigsHook;
-set hive.config.doesnt.exit=abc;
+set some.hive.config.doesnt.exit=abc;
 
 select count(*) from src;

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/quotedid_skew.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/quotedid_skew.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/quotedid_skew.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/quotedid_skew.q Sat Jun 28 00:23:54 2014
@@ -2,7 +2,6 @@
 set hive.support.quoted.identifiers=column;
 
 set hive.mapred.supports.subdirectories=true;
-set hive.internal.ddl.list.bucketing.enable=true;
 set hive.optimize.skewjoin.compiletime = true;
 
 CREATE TABLE T1(`!@#$%^&*()_q` string, `y&y` string)

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/sample10.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/sample10.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/sample10.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/sample10.q Sat Jun 28 00:23:54 2014
@@ -1,4 +1,5 @@
-
+set hive.exec.submitviachild=true;
+set hive.exec.submit.local.task.via.child=true;
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.enforce.bucketing=true;

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/sample_islocalmode_hook.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/sample_islocalmode_hook.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/sample_islocalmode_hook.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/sample_islocalmode_hook.q Sat Jun 28 00:23:54 2014
@@ -1,3 +1,5 @@
+set hive.exec.submitviachild=true;
+set hive.exec.submit.local.task.via.child=true;
 set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 set mapred.max.split.size=300;
 set mapred.min.split.size=300;

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q Sat Jun 28 00:23:54 2014
@@ -1,4 +1,3 @@
-set hive.internal.ddl.list.bucketing.enable=true;
 set hive.optimize.skewjoin.compiletime = true;
 set hive.mapred.supports.subdirectories=true;
 

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q Sat Jun 28 00:23:54 2014
@@ -1,4 +1,3 @@
-set hive.internal.ddl.list.bucketing.enable=true;
 set hive.optimize.skewjoin.compiletime = true;
 set hive.mapred.supports.subdirectories=true;
 

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt1.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt1.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt1.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt1.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,4 @@
 set hive.mapred.supports.subdirectories=true;
-set hive.internal.ddl.list.bucketing.enable=true;
 set hive.optimize.skewjoin.compiletime = true;
 
 CREATE TABLE T1(key STRING, val STRING)

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt10.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt10.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt10.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt10.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,4 @@
 set hive.mapred.supports.subdirectories=true;
-set hive.internal.ddl.list.bucketing.enable=true;
 set hive.optimize.skewjoin.compiletime = true;
 
 CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE;

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt11.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt11.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt11.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt11.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,4 @@
 set hive.mapred.supports.subdirectories=true;
-set hive.internal.ddl.list.bucketing.enable=true;	
 set hive.optimize.skewjoin.compiletime = true;
     
 CREATE TABLE T1(key STRING, val STRING)	

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt12.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt12.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt12.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt12.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,4 @@
 set hive.mapred.supports.subdirectories=true;
-set hive.internal.ddl.list.bucketing.enable=true;
 set hive.optimize.skewjoin.compiletime = true;
 
 CREATE TABLE T1(key STRING, val STRING)

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt13.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt13.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt13.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt13.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,4 @@
 set hive.mapred.supports.subdirectories=true;
-set hive.internal.ddl.list.bucketing.enable=true;
 set hive.optimize.skewjoin.compiletime = true;
 
 CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt14.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt14.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt14.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt14.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,4 @@
 set hive.mapred.supports.subdirectories=true;
-set hive.internal.ddl.list.bucketing.enable=true;
 set hive.optimize.skewjoin.compiletime = true;
 
 CREATE TABLE T1(key STRING, val STRING)

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt15.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt15.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt15.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt15.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,4 @@
 set hive.mapred.supports.subdirectories=true;
-set hive.internal.ddl.list.bucketing.enable=true;
 set hive.optimize.skewjoin.compiletime = true;
 
 CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE;

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt16.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt16.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt16.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt16.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,4 @@
 set hive.mapred.supports.subdirectories=true;
-set hive.internal.ddl.list.bucketing.enable=true;
 set hive.optimize.skewjoin.compiletime = true;
 
 CREATE TABLE T1(key STRING, val STRING)

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt17.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt17.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt17.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt17.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,4 @@
 set hive.mapred.supports.subdirectories=true;
-set hive.internal.ddl.list.bucketing.enable=true;
 set hive.optimize.skewjoin.compiletime = true;
 
 CREATE TABLE T1(key STRING, val STRING)

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt18.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt18.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt18.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt18.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,4 @@
 set hive.mapred.supports.subdirectories=true;
-set hive.internal.ddl.list.bucketing.enable=true;
 set hive.optimize.skewjoin.compiletime = true;
 
 CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE;

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt19.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt19.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt19.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt19.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,4 @@
 set hive.mapred.supports.subdirectories=true;
-set hive.internal.ddl.list.bucketing.enable=true;
 set hive.optimize.skewjoin.compiletime = true;
 
 CREATE TABLE T1(key STRING, val STRING)

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt2.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt2.q?rev=1606275&r1=1606274&r2=1606275&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt2.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/skewjoinopt2.q Sat Jun 28 00:23:54 2014
@@ -1,5 +1,4 @@
 set hive.mapred.supports.subdirectories=true;
-set hive.internal.ddl.list.bucketing.enable=true;
 set hive.optimize.skewjoin.compiletime = true;
 
 CREATE TABLE T1(key STRING, val STRING)