You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2014/09/02 21:57:07 UTC

svn commit: r1622108 [15/27] - in /hive/branches/tez: ./ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/ beeline/src/java/org/apache/hive/beeline/ beeline/src/test/org/apache/hive/beeline/ bin/ bin/ext/ checkstyle/ common/src/java/...

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java Tue Sep  2 19:56:56 2014
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.io.sar
 import com.esotericsoftware.kryo.Kryo;
 import com.esotericsoftware.kryo.io.Input;
 import com.esotericsoftware.kryo.io.Output;
+
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.hive.common.type.HiveChar;
@@ -50,6 +51,7 @@ import org.apache.hadoop.hive.serde2.typ
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 
 import java.math.BigDecimal;
+import java.sql.Timestamp;
 import java.util.ArrayDeque;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -107,6 +109,12 @@ final class SearchArgumentImpl implement
 
     @Override
     public Object getLiteral() {
+      // To get around a kryo 2.22 bug while deserialize a Timestamp into Date
+      // (https://github.com/EsotericSoftware/kryo/issues/88)
+      // When we see a Date, convert back into Timestamp
+      if (literal instanceof java.util.Date) {
+        return new Timestamp(((java.util.Date)literal).getTime());
+      }
       return literal;
     }
 
@@ -317,6 +325,8 @@ final class SearchArgumentImpl implement
             return PredicateLeaf.Type.FLOAT;
           case DATE:
             return PredicateLeaf.Type.DATE;
+          case TIMESTAMP:
+            return PredicateLeaf.Type.TIMESTAMP;
           case DECIMAL:
             return PredicateLeaf.Type.DECIMAL;
           default:
@@ -354,6 +364,7 @@ final class SearchArgumentImpl implement
         case FLOAT:
           return ((Number) lit.getValue()).doubleValue();
         case DATE:
+        case TIMESTAMP:
         case DECIMAL:
           return lit;
         default:
@@ -948,6 +959,7 @@ final class SearchArgumentImpl implement
           literal instanceof Long ||
           literal instanceof Double ||
           literal instanceof DateWritable ||
+          literal instanceof Timestamp ||
           literal instanceof HiveDecimal ||
           literal instanceof BigDecimal) {
         return literal;
@@ -981,7 +993,9 @@ final class SearchArgumentImpl implement
         return PredicateLeaf.Type.FLOAT;
       } else if (literal instanceof DateWritable) {
         return PredicateLeaf.Type.DATE;
-      } else if (literal instanceof HiveDecimal ||
+      } else if (literal instanceof Timestamp) {
+        return PredicateLeaf.Type.TIMESTAMP;
+      }else if (literal instanceof HiveDecimal ||
           literal instanceof BigDecimal) {
         return PredicateLeaf.Type.DECIMAL;
       }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java Tue Sep  2 19:56:56 2014
@@ -165,13 +165,13 @@ public class DbTxnManager extends HiveTx
           break;
 
         case TABLE:
+        case DUMMYPARTITION:   // in case of dynamic partitioning lock the table
           t = output.getTable();
           compBuilder.setDbName(t.getDbName());
           compBuilder.setTableName(t.getTableName());
           break;
 
         case PARTITION:
-        case DUMMYPARTITION:
           compBuilder.setPartitionName(output.getPartition().getName());
           t = output.getPartition().getTable();
           compBuilder.setDbName(t.getDbName());
@@ -301,7 +301,10 @@ public class DbTxnManager extends HiveTx
     try {
       if (txnId > 0) rollbackTxn();
       if (lockMgr != null) lockMgr.close();
+      if (client != null) client.close();
     } catch (Exception e) {
+      LOG.error("Caught exception " + e.getClass().getName() + " with message <" + e.getMessage()
+      + ">, swallowing as there is nothing we can do with it.");
       // Not much we can do about it here.
     }
   }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/EmbeddedLockManager.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/EmbeddedLockManager.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/EmbeddedLockManager.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/EmbeddedLockManager.java Tue Sep  2 19:56:56 2014
@@ -25,6 +25,7 @@ import org.apache.hadoop.hive.ql.lockmgr
 import org.apache.hadoop.hive.ql.metadata.*;
 
 import java.util.*;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReentrantLock;
 
 /**
@@ -38,7 +39,7 @@ public class EmbeddedLockManager impleme
 
   private HiveLockManagerCtx ctx;
 
-  private int sleepTime = 1000;
+  private long sleepTime = 1000;
   private int numRetriesForLock = 0;
   private int numRetriesForUnLock = 0;
 
@@ -82,12 +83,13 @@ public class EmbeddedLockManager impleme
 
   public void refresh() {
     HiveConf conf = ctx.getConf();
-    sleepTime = conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_SLEEP_BETWEEN_RETRIES) * 1000;
+    sleepTime = conf.getTimeVar(
+        HiveConf.ConfVars.HIVE_LOCK_SLEEP_BETWEEN_RETRIES, TimeUnit.MILLISECONDS);
     numRetriesForLock = conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_NUMRETRIES);
     numRetriesForUnLock = conf.getIntVar(HiveConf.ConfVars.HIVE_UNLOCK_NUMRETRIES);
   }
 
-  public HiveLock lock(HiveLockObject key, HiveLockMode mode, int numRetriesForLock, int sleepTime)
+  public HiveLock lock(HiveLockObject key, HiveLockMode mode, int numRetriesForLock, long sleepTime)
       throws LockException {
     for (int i = 0; i <= numRetriesForLock; i++) {
       if (i > 0) {
@@ -101,7 +103,7 @@ public class EmbeddedLockManager impleme
     return null;
   }
 
-  private void sleep(int sleepTime) {
+  private void sleep(long sleepTime) {
     try {
       Thread.sleep(sleepTime);
     } catch (InterruptedException e) {
@@ -109,7 +111,7 @@ public class EmbeddedLockManager impleme
     }
   }
 
-  public List<HiveLock> lock(List<HiveLockObj> objs, int numRetriesForLock, int sleepTime)
+  public List<HiveLock> lock(List<HiveLockObj> objs, int numRetriesForLock, long sleepTime)
       throws LockException {
     sortLocks(objs);
     for (int i = 0; i <= numRetriesForLock; i++) {
@@ -132,7 +134,7 @@ public class EmbeddedLockManager impleme
   }
 
   private List<HiveLock> lockPrimitive(List<HiveLockObj> objs, int numRetriesForLock,
-      int sleepTime) throws LockException {
+      long sleepTime) throws LockException {
     List<HiveLock> locks = new ArrayList<HiveLock>();
     for (HiveLockObj obj : objs) {
       HiveLock lock = lockPrimitive(obj.getObj(), obj.getMode());
@@ -164,7 +166,7 @@ public class EmbeddedLockManager impleme
     });
   }
 
-  public void unlock(HiveLock hiveLock, int numRetriesForUnLock, int sleepTime)
+  public void unlock(HiveLock hiveLock, int numRetriesForUnLock, long sleepTime)
       throws LockException {
     String[] paths = hiveLock.getHiveLockObject().getPaths();
     HiveLockObjectData data = hiveLock.getHiveLockObject().getData();
@@ -179,7 +181,7 @@ public class EmbeddedLockManager impleme
     throw new LockException("Failed to release lock " + hiveLock);
   }
 
-  public void releaseLocks(List<HiveLock> hiveLocks, int numRetriesForUnLock, int sleepTime) {
+  public void releaseLocks(List<HiveLock> hiveLocks, int numRetriesForUnLock, long sleepTime) {
     for (HiveLock locked : hiveLocks) {
       try {
         unlock(locked, numRetriesForUnLock, sleepTime);

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java Tue Sep  2 19:56:56 2014
@@ -37,6 +37,7 @@ import org.apache.zookeeper.ZooKeeper;
 import java.io.IOException;
 import java.net.InetAddress;
 import java.util.*;
+import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -53,7 +54,7 @@ public class ZooKeeperHiveLockManager im
   private int sessionTimeout;
   private String quorumServers;
 
-  private int sleepTime;
+  private long sleepTime;
   private int numRetriesForLock;
   private int numRetriesForUnLock;
 
@@ -106,7 +107,8 @@ public class ZooKeeperHiveLockManager im
     sessionTimeout = conf.getIntVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT);
     quorumServers = ZooKeeperHiveLockManager.getQuorumServers(conf);
 
-    sleepTime = conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_SLEEP_BETWEEN_RETRIES) * 1000;
+    sleepTime = conf.getTimeVar(
+        HiveConf.ConfVars.HIVE_LOCK_SLEEP_BETWEEN_RETRIES, TimeUnit.MILLISECONDS);
     numRetriesForLock = conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_NUMRETRIES);
     numRetriesForUnLock = conf.getIntVar(HiveConf.ConfVars.HIVE_UNLOCK_NUMRETRIES);
 
@@ -132,7 +134,8 @@ public class ZooKeeperHiveLockManager im
   @Override
   public void refresh() {
     HiveConf conf = ctx.getConf();
-    sleepTime = conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_SLEEP_BETWEEN_RETRIES) * 1000;
+    sleepTime = conf.getTimeVar(
+        HiveConf.ConfVars.HIVE_LOCK_SLEEP_BETWEEN_RETRIES, TimeUnit.MILLISECONDS);
     numRetriesForLock = conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_NUMRETRIES);
     numRetriesForUnLock = conf.getIntVar(HiveConf.ConfVars.HIVE_UNLOCK_NUMRETRIES);
   }
@@ -268,7 +271,7 @@ public class ZooKeeperHiveLockManager im
    * @param mode
    *          The mode of the lock
    * @param keepAlive
-   *          Whether the lock is to be persisted after the statement Acuire the
+   *          Whether the lock is to be persisted after the statement Acquire the
    *          lock. Return null if a conflicting lock is present.
    **/
   public ZooKeeperHiveLock lock(HiveLockObject key, HiveLockMode mode,
@@ -515,8 +518,8 @@ public class ZooKeeperHiveLockManager im
     try {
       int sessionTimeout = conf.getIntVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT);
       String quorumServers = getQuorumServers(conf);
-      Watcher dummWatcher = new DummyWatcher();
-      zkpClient = new ZooKeeper(quorumServers, sessionTimeout, dummWatcher);
+      Watcher dummyWatcher = new DummyWatcher();
+      zkpClient = new ZooKeeper(quorumServers, sessionTimeout, dummyWatcher);
       String parent = conf.getVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_NAMESPACE);
       List<HiveLock> locks = getLocks(conf, zkpClient, null, parent, false, false);
       Exception lastExceptionGot = null;

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Tue Sep  2 19:56:56 2014
@@ -89,6 +89,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.Role;
 import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
@@ -2553,6 +2554,15 @@ private void constructOneLBLocationMap(F
       throw new HiveException(e);
     }
   }
+  
+  public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws HiveException {
+    try {
+      return getMSC().setPartitionColumnStatistics(request);
+    } catch (Exception e) {
+      LOG.debug(StringUtils.stringifyException(e));
+      throw new HiveException(e);
+    }
+  }
 
   public List<ColumnStatisticsObj> getTableColumnStatistics(
       String dbName, String tableName, List<String> colNames) throws HiveException {

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java Tue Sep  2 19:56:56 2014
@@ -233,6 +233,10 @@ public class Partition implements Serial
     return ret;
   }
 
+  public Path getPartitionPath() {
+    return getDataLocation();
+  }
+
   public Path getDataLocation() {
     if (table.isPartitioned()) {
       return new Path(tPartition.getSd().getLocation());

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java Tue Sep  2 19:56:56 2014
@@ -5,6 +5,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -15,6 +16,9 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaHook;
 import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
@@ -22,16 +26,23 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 import org.apache.hadoop.hive.metastore.api.HiveObjectType;
+import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+import org.apache.hadoop.hive.metastore.api.TableStatsRequest;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.ql.stats.StatsUtils;
 import org.apache.thrift.TException;
 
 public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements IMetaStoreClient {
@@ -74,6 +85,12 @@ public class SessionHiveMetaStoreClient 
     // First try temp table
     org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbname, name);
     if (table != null) {
+      try {
+        deleteTempTableColumnStatsForTable(dbname, name);
+      } catch (NoSuchObjectException err){
+        // No stats to delete, forgivable error.
+        LOG.info(err);
+      }
       dropTempTable(table, deleteData, envContext);
       return;
     }
@@ -220,6 +237,41 @@ public class SessionHiveMetaStoreClient 
     return super.get_privilege_set(hiveObject, userName, groupNames);
   }
 
+  /** {@inheritDoc} */
+  @Override
+  public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
+      throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+      InvalidInputException {
+    String dbName = statsObj.getStatsDesc().getDbName().toLowerCase();
+    String tableName = statsObj.getStatsDesc().getTableName().toLowerCase();
+    if (getTempTable(dbName, tableName) != null) {
+      return updateTempTableColumnStats(dbName, tableName, statsObj);
+    }
+    return super.updateTableColumnStatistics(statsObj);
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName,
+      List<String> colNames) throws NoSuchObjectException, MetaException, TException,
+      InvalidInputException, InvalidObjectException {
+    if (getTempTable(dbName, tableName) != null) {
+      return getTempTableColumnStats(dbName, tableName, colNames);
+    }
+    return super.getTableColumnStatistics(dbName, tableName, colNames);
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName)
+      throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+      InvalidInputException {
+    if (getTempTable(dbName, tableName) != null) {
+      return deleteTempTableColumnStats(dbName, tableName, colName);
+    }
+    return super.deleteTableColumnStatistics(dbName, tableName, colName);
+  }
+
   private void createTempTable(org.apache.hadoop.hive.metastore.api.Table tbl,
       EnvironmentContext envContext) throws AlreadyExistsException, InvalidObjectException,
       MetaException, NoSuchObjectException, TException {
@@ -280,15 +332,19 @@ public class SessionHiveMetaStoreClient 
       org.apache.hadoop.hive.metastore.api.Table oldt,
       org.apache.hadoop.hive.metastore.api.Table newt,
       EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException {
-    Table newTable = new Table(deepCopyAndLowerCaseTable(newt));
     dbname = dbname.toLowerCase();
     tbl_name = tbl_name.toLowerCase();
+    boolean shouldDeleteColStats = false;
 
     // Disallow changing temp table location
     if (!newt.getSd().getLocation().equals(oldt.getSd().getLocation())) {
       throw new MetaException("Temp table location cannot be changed");
     }
 
+    org.apache.hadoop.hive.metastore.api.Table newtCopy = deepCopyAndLowerCaseTable(newt);
+    MetaStoreUtils.updateUnpartitionedTableStatsFast(newtCopy,
+        wh.getFileStatusesForSD(newtCopy.getSd()), false, true);
+    Table newTable = new Table(newtCopy);
     String newDbName = newTable.getDbName();
     String newTableName = newTable.getTableName();
     if (!newDbName.equals(oldt.getDbName()) || !newTableName.equals(oldt.getTableName())) {
@@ -306,6 +362,7 @@ public class SessionHiveMetaStoreClient 
       if (tables == null || tables.remove(tbl_name) == null) {
         throw new MetaException("Could not find temp table entry for " + dbname + "." + tbl_name);
       }
+      shouldDeleteColStats = true;
 
       tables = getTempTablesForDatabase(newDbName);
       if (tables == null) {
@@ -314,8 +371,50 @@ public class SessionHiveMetaStoreClient 
       }
       tables.put(newTableName, newTable);
     } else {
+      if (haveTableColumnsChanged(oldt, newt)) {
+        shouldDeleteColStats = true;
+      }
       getTempTablesForDatabase(dbname).put(tbl_name, newTable);
     }
+
+    if (shouldDeleteColStats) {
+      try {
+        deleteTempTableColumnStatsForTable(dbname, tbl_name);
+      } catch (NoSuchObjectException err){
+        // No stats to delete, forgivable error.
+        LOG.info(err);
+      }
+    }
+  }
+
+  private static boolean haveTableColumnsChanged(org.apache.hadoop.hive.metastore.api.Table oldt,
+      org.apache.hadoop.hive.metastore.api.Table newt) {
+    List<FieldSchema> oldCols = oldt.getSd().getCols();
+    List<FieldSchema> newCols = newt.getSd().getCols();
+    if (oldCols.size() != newCols.size()) {
+      return true;
+    }
+    Iterator<FieldSchema> oldColsIter = oldCols.iterator();
+    Iterator<FieldSchema> newColsIter = newCols.iterator();
+    while (oldColsIter.hasNext()) {
+      // Don't use FieldSchema.equals() since it also compares comments,
+      // which is unnecessary for this method.
+      if (!fieldSchemaEqualsIgnoreComment(oldColsIter.next(), newColsIter.next())) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  private static boolean fieldSchemaEqualsIgnoreComment(FieldSchema left, FieldSchema right) {
+    // Just check name/type for equality, don't compare comment
+    if (!left.getName().equals(right.getName())) {
+      return true;
+    }
+    if (!left.getType().equals(right.getType())) {
+      return true;
+    }
+    return false;
   }
 
   private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boolean deleteData,
@@ -376,4 +475,102 @@ public class SessionHiveMetaStoreClient 
     }
     return ss.getTempTables().get(dbName);
   }
+
+  private Map<String, ColumnStatisticsObj> getTempTableColumnStatsForTable(String dbName,
+      String tableName) {
+    SessionState ss = SessionState.get();
+    if (ss == null) {
+      LOG.debug("No current SessionState, skipping temp tables");
+      return null;
+    }
+    String lookupName = StatsUtils.getFullyQualifiedTableName(dbName.toLowerCase(),
+        tableName.toLowerCase());
+    return ss.getTempTableColStats().get(lookupName);
+  }
+
+  private static List<ColumnStatisticsObj> copyColumnStatisticsObjList(Map<String, ColumnStatisticsObj> csoMap) {
+    List<ColumnStatisticsObj> retval = new ArrayList<ColumnStatisticsObj>(csoMap.size());
+    for (ColumnStatisticsObj cso : csoMap.values()) {
+      retval.add(new ColumnStatisticsObj(cso));
+    }
+    return retval;
+  }
+
+  private List<ColumnStatisticsObj> getTempTableColumnStats(String dbName, String tableName,
+      List<String> colNames) {
+    Map<String, ColumnStatisticsObj> tableColStats =
+        getTempTableColumnStatsForTable(dbName, tableName);
+    List<ColumnStatisticsObj> retval = new ArrayList<ColumnStatisticsObj>();
+
+    if (tableColStats != null) {
+      for (String colName : colNames) {
+        colName = colName.toLowerCase();
+        if (tableColStats.containsKey(colName)) {
+          retval.add(new ColumnStatisticsObj(tableColStats.get(colName)));
+        }
+      }
+    }
+    return retval;
+  }
+
+  private boolean updateTempTableColumnStats(String dbName, String tableName,
+      ColumnStatistics colStats) throws MetaException {
+    SessionState ss = SessionState.get();
+    if (ss == null) {
+      throw new MetaException("No current SessionState, cannot update temporary table stats for "
+          + dbName + "." + tableName);
+    }
+    Map<String, ColumnStatisticsObj> ssTableColStats =
+        getTempTableColumnStatsForTable(dbName, tableName);
+    if (ssTableColStats == null) {
+      // Add new entry for this table
+      ssTableColStats = new HashMap<String, ColumnStatisticsObj>();
+      ss.getTempTableColStats().put(
+          StatsUtils.getFullyQualifiedTableName(dbName, tableName),
+          ssTableColStats);
+    }
+    mergeColumnStats(ssTableColStats, colStats);
+    return true;
+  }
+
+  private static void mergeColumnStats(Map<String, ColumnStatisticsObj> oldStats,
+      ColumnStatistics newStats) {
+    List<ColumnStatisticsObj> newColList = newStats.getStatsObj();
+    if (newColList != null) {
+      for (ColumnStatisticsObj colStat : newColList) {
+        // This is admittedly a bit simple, StatsObjectConverter seems to allow
+        // old stats attributes to be kept if the new values do not overwrite them.
+        oldStats.put(colStat.getColName().toLowerCase(), colStat);
+      }
+    }
+  }
+
+  private boolean deleteTempTableColumnStatsForTable(String dbName, String tableName)
+      throws NoSuchObjectException {
+    Map<String, ColumnStatisticsObj> deletedEntry =
+        getTempTableColumnStatsForTable(dbName, tableName);
+    if (deletedEntry != null) {
+      SessionState.get().getTempTableColStats().remove(
+          StatsUtils.getFullyQualifiedTableName(dbName, tableName));
+    } else {
+      throw new NoSuchObjectException("Column stats doesn't exist for db=" + dbName +
+          " temp table=" + tableName);
+    }
+    return true;
+  }
+
+  private boolean deleteTempTableColumnStats(String dbName, String tableName, String columnName)
+      throws NoSuchObjectException {
+    ColumnStatisticsObj deletedEntry = null;
+    Map<String, ColumnStatisticsObj> ssTableColStats =
+        getTempTableColumnStatsForTable(dbName, tableName);
+    if (ssTableColStats != null) {
+      deletedEntry = ssTableColStats.remove(columnName.toLowerCase());
+    }
+    if (deletedEntry == null) {
+      throw new NoSuchObjectException("Column stats doesn't exist for db=" + dbName +
+          " temp table=" + tableName);
+    }
+    return true;
+  }
 }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java Tue Sep  2 19:56:56 2014
@@ -22,7 +22,10 @@ import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
+import java.util.ListIterator;
 
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -60,8 +63,9 @@ public class VirtualColumn implements Se
   public static final VirtualColumn GROUPINGID =
       new VirtualColumn("GROUPING__ID", (PrimitiveTypeInfo) TypeInfoFactory.intTypeInfo);
 
-  public static final VirtualColumn[] VIRTUAL_COLUMNS =
-      new VirtualColumn[] {FILENAME, BLOCKOFFSET, ROWOFFSET, RAWDATASIZE, GROUPINGID, ROWID};
+  public static ImmutableSet<String> VIRTUAL_COLUMN_NAMES =
+      ImmutableSet.of(FILENAME.getName(), BLOCKOFFSET.getName(), ROWOFFSET.getName(),
+          RAWDATASIZE.getName(), GROUPINGID.getName(), ROWID.getName());
 
   private final String name;
   private final TypeInfo typeInfo;
@@ -139,12 +143,29 @@ public class VirtualColumn implements Se
     return  31 * typeInfo.getTypeName().hashCode() + c;
   }
   public static Collection<String> removeVirtualColumns(final Collection<String> columns) {
-    for(VirtualColumn vcol : VIRTUAL_COLUMNS) {
-      columns.remove(vcol.getName());
-    }
+    Iterables.removeAll(columns, VIRTUAL_COLUMN_NAMES);
     return columns;
   }
 
+  public static List<TypeInfo> removeVirtualColumnTypes(final List<String> columnNames,
+      final List<TypeInfo> columnTypes) {
+    if (columnNames.size() != columnTypes.size()) {
+      throw new IllegalArgumentException("Number of column names in configuration " +
+          columnNames.size() + " differs from column types " + columnTypes.size());
+    }
+
+    int i = 0;
+    ListIterator<TypeInfo> it = columnTypes.listIterator();
+    while(it.hasNext()) {
+      it.next();
+      if (VIRTUAL_COLUMN_NAMES.contains(columnNames.get(i))) {
+        it.remove();
+      }
+      ++i;
+    }
+    return columnTypes;
+  }
+
   public static StructObjectInspector getVCSObjectInspector(List<VirtualColumn> vcs) {
     List<String> names = new ArrayList<String>(vcs.size());
     List<ObjectInspector> inspectors = new ArrayList<ObjectInspector>(vcs.size());

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java Tue Sep  2 19:56:56 2014
@@ -369,7 +369,7 @@ public final class ColumnPrunerProcFacto
         return null;
       }
       cols = cols == null ? new ArrayList<String>() : cols;
-      
+
       cppCtx.getPrunedColLists().put((Operator<? extends OperatorDesc>) nd,
           cols);
       RowResolver inputRR = cppCtx.getOpToParseCtxMap().get(scanOp).getRowResolver();
@@ -479,13 +479,13 @@ public final class ColumnPrunerProcFacto
           flags[index] = true;
           colLists = Utilities.mergeUniqElems(colLists, valCols.get(index).getCols());
         }
-        
+
         Collections.sort(colLists);
         pruneReduceSinkOperator(flags, op, cppCtx);
         cppCtx.getPrunedColLists().put(op, colLists);
         return null;
       }
-      
+
       // Reduce Sink contains the columns needed - no need to aggregate from
       // children
       for (ExprNodeDesc val : valCols) {
@@ -519,7 +519,7 @@ public final class ColumnPrunerProcFacto
       if (cols == null) {
         return null;
       }
-      
+
       Map<String, ExprNodeDesc> colExprMap = op.getColumnExprMap();
       // As columns go down the DAG, the LVJ will transform internal column
       // names from something like 'key' to '_col0'. Because of this, we need
@@ -604,8 +604,8 @@ public final class ColumnPrunerProcFacto
         Object... nodeOutputs) throws SemanticException {
       SelectOperator op = (SelectOperator) nd;
       ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx;
-      
-      
+
+
       if (op.getChildOperators() != null) {
         for (Operator<? extends OperatorDesc> child : op.getChildOperators()) {
           // UDTF is not handled yet, so the parent SelectOp of UDTF should just assume
@@ -858,11 +858,11 @@ public final class ColumnPrunerProcFacto
     if (inputSchema != null) {
       ArrayList<ColumnInfo> rs = new ArrayList<ColumnInfo>();
       ArrayList<ColumnInfo> inputCols = inputSchema.getSignature();
-    	for (ColumnInfo i: inputCols) {
+      for (ColumnInfo i: inputCols) {
         if (cols.contains(i.getInternalName())) {
           rs.add(i);
         }
-    	}
+      }
       op.getSchema().setSignature(rs);
     }
   }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java Tue Sep  2 19:56:56 2014
@@ -4,9 +4,9 @@
  * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance with the License. You may obtain a
  * copy of the License at
- * 
+ *
  * http://www.apache.org/licenses/LICENSE-2.0
- * 
+ *
  * Unless required by applicable law or agreed to in writing, software distributed under the License
  * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
  * or implied. See the License for the specific language governing permissions and limitations under
@@ -100,7 +100,7 @@ public final class ConstantPropagateProc
 
   /**
    * Get ColumnInfo from column expression.
-   * 
+   *
    * @param rr
    * @param desc
    * @return
@@ -139,7 +139,7 @@ public final class ConstantPropagateProc
 
   /**
    * Cast type from expression type to expected type ti.
-   * 
+   *
    * @param desc constant expression
    * @param ti expected type info
    * @return cast constant, or null if the type cast failed.
@@ -189,10 +189,10 @@ public final class ConstantPropagateProc
 
   /**
    * Fold input expression desc.
-   * 
+   *
    * If desc is a UDF and all parameters are constants, evaluate it. If desc is a column expression,
    * find it from propagated constants, and if there is, replace it with constant.
-   * 
+   *
    * @param desc folding expression
    * @param constants current propagated constant map
    * @param cppCtx
@@ -296,7 +296,7 @@ public final class ConstantPropagateProc
 
   /**
    * Propagate assignment expression, adding an entry into constant map constants.
-   * 
+   *
    * @param udf expression UDF, currently only 2 UDFs are supported: '=' and 'is null'.
    * @param newExprs child expressions (parameters).
    * @param cppCtx
@@ -350,7 +350,7 @@ public final class ConstantPropagateProc
           ExprNodeConstantDesc c = (ExprNodeConstantDesc) childExpr;
           if (Boolean.TRUE.equals(c.getValue())) {
 
-        	  // if true, prune it
+            // if true, prune it
             return newExprs.get(Math.abs(i - 1));
           } else {
 
@@ -384,7 +384,7 @@ public final class ConstantPropagateProc
 
   /**
    * Evaluate column, replace the deterministic columns with constants if possible
-   * 
+   *
    * @param desc
    * @param ctx
    * @param op
@@ -435,7 +435,7 @@ public final class ConstantPropagateProc
 
   /**
    * Evaluate UDF
-   * 
+   *
    * @param udf UDF object
    * @param exprs
    * @param oldExprs
@@ -512,7 +512,7 @@ public final class ConstantPropagateProc
 
   /**
    * Change operator row schema, replace column with constant if it is.
-   * 
+   *
    * @param op
    * @param constants
    * @throws SemanticException
@@ -584,7 +584,7 @@ public final class ConstantPropagateProc
 
   /**
    * Factory method to get the ConstantPropagateFilterProc class.
-   * 
+   *
    * @return ConstantPropagateFilterProc
    */
   public static ConstantPropagateFilterProc getFilterProc() {
@@ -621,7 +621,7 @@ public final class ConstantPropagateProc
 
   /**
    * Factory method to get the ConstantPropagateGroupByProc class.
-   * 
+   *
    * @return ConstantPropagateGroupByProc
    */
   public static ConstantPropagateGroupByProc getGroupByProc() {
@@ -650,7 +650,7 @@ public final class ConstantPropagateProc
 
   /**
    * Factory method to get the ConstantPropagateDefaultProc class.
-   * 
+   *
    * @return ConstantPropagateDefaultProc
    */
   public static ConstantPropagateDefaultProc getDefaultProc() {
@@ -683,7 +683,7 @@ public final class ConstantPropagateProc
 
   /**
    * The Factory method to get the ConstantPropagateSelectProc class.
-   * 
+   *
    * @return ConstantPropagateSelectProc
    */
   public static ConstantPropagateSelectProc getSelectProc() {
@@ -877,7 +877,7 @@ public final class ConstantPropagateProc
         return null;
       }
 
-      // Note: the following code (removing folded constants in exprs) is deeply coupled with 
+      // Note: the following code (removing folded constants in exprs) is deeply coupled with
       //    ColumnPruner optimizer.
       // Assuming ColumnPrunner will remove constant columns so we don't deal with output columns.
       //    Except one case that the join operator is followed by a redistribution (RS operator).

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java Tue Sep  2 19:56:56 2014
@@ -353,14 +353,14 @@ public class OpProcFactory {
           if (inpOp.getSchema() != null && inpOp.getSchema().getSignature() != null ) {
             for(ColumnInfo ci : inpOp.getSchema().getSignature()) {
               Dependency inp_dep = lctx.getIndex().getDependency(inpOp, ci);
-            	// The dependency can be null as some of the input cis may not have
-            	// been set in case of joins.
-            	if (inp_dep != null) {
-            	  for(BaseColumnInfo bci : inp_dep.getBaseCols()) {
-            	    new_type = LineageCtx.getNewDependencyType(inp_dep.getType(), new_type);
-            	    tai_set.add(bci.getTabAlias());
-            	  }
-            	}
+              // The dependency can be null as some of the input cis may not have
+              // been set in case of joins.
+              if (inp_dep != null) {
+                for(BaseColumnInfo bci : inp_dep.getBaseCols()) {
+                  new_type = LineageCtx.getNewDependencyType(inp_dep.getType(), new_type);
+                  tai_set.add(bci.getTabAlias());
+                }
+              }
             }
           }
 

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java Tue Sep  2 19:56:56 2014
@@ -815,9 +815,11 @@ public class Vectorizer implements Physi
         ret = validateSelectOperator((SelectOperator) op);
         break;
       case REDUCESINK:
-          ret = validateReduceSinkOperator((ReduceSinkOperator) op);
-          break;
+        ret = validateReduceSinkOperator((ReduceSinkOperator) op);
+        break;
       case FILESINK:
+        ret = validateFileSinkOperator((FileSinkOperator) op);
+        break;
       case LIMIT:
         ret = true;
         break;
@@ -899,6 +901,15 @@ public class Vectorizer implements Physi
     return true;
   }
 
+  private boolean validateFileSinkOperator(FileSinkOperator op) {
+    // HIVE-7557: For now, turn off dynamic partitioning to give more time to 
+    // figure out how to make VectorFileSink work correctly with it...
+   if (op.getConf().getDynPartCtx() != null) {
+     return false;
+   }
+   return true;
+  }
+
   private boolean validateExprNodeDesc(List<ExprNodeDesc> descs) {
     return validateExprNodeDesc(descs, VectorExpressionDescriptor.Mode.PROJECTION);
   }
@@ -927,11 +938,9 @@ public class Vectorizer implements Physi
     if (desc instanceof ExprNodeColumnDesc) {
       ExprNodeColumnDesc c = (ExprNodeColumnDesc) desc;
       // Currently, we do not support vectorized virtual columns (see HIVE-5570).
-      for (VirtualColumn vc : VirtualColumn.VIRTUAL_COLUMNS) {
-        if (c.getColumn().equals(vc.getName())) {
-            LOG.info("Cannot vectorize virtual column " + c.getColumn());
-            return false;
-        }
+      if (VirtualColumn.VIRTUAL_COLUMN_NAMES.contains(c.getColumn())) {
+        LOG.info("Cannot vectorize virtual column " + c.getColumn());
+        return false;
       }
     }
     String typeName = desc.getTypeInfo().getTypeName();
@@ -1076,10 +1085,8 @@ public class Vectorizer implements Physi
 
     // Not using method column.getIsVirtualCol() because partitioning columns are also
     // treated as virtual columns in ColumnInfo.
-    for (VirtualColumn vc : VirtualColumn.VIRTUAL_COLUMNS) {
-      if (column.getInternalName().equals(vc.getName())) {
+    if (VirtualColumn.VIRTUAL_COLUMN_NAMES.contains(column.getInternalName())) {
         return true;
-      }
     }
     return false;
   }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java Tue Sep  2 19:56:56 2014
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.optimi
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -103,7 +104,8 @@ public class StatsRulesProcFactory {
         tsop.setStatistics(stats.clone());
 
         if (LOG.isDebugEnabled()) {
-          LOG.debug("[0] STATS-" + tsop.toString() + ": " + stats.extendedToString());
+          LOG.debug("[0] STATS-" + tsop.toString() + " (" + table.getTableName()
+              + "): " + stats.extendedToString());
         }
       } catch (CloneNotSupportedException e) {
         throw new SemanticException(ErrorMsg.STATISTICS_CLONING_FAILED.getMsg());
@@ -811,6 +813,7 @@ public class StatsRulesProcFactory {
           // 2 relations, multiple attributes
           boolean multiAttr = false;
           int numAttr = 1;
+          int numParent = parents.size();
 
           Map<String, ColStatistics> joinedColStats = Maps.newHashMap();
           Map<Integer, List<String>> joinKeys = Maps.newHashMap();
@@ -873,12 +876,19 @@ public class StatsRulesProcFactory {
                   perAttrDVs.add(cs.getCountDistint());
                 }
               }
+
               distinctVals.add(getDenominator(perAttrDVs));
               perAttrDVs.clear();
             }
 
-            for (Long l : distinctVals) {
-              denom *= l;
+            if (numAttr > numParent) {
+              // To avoid denominator getting larger and aggressively reducing
+              // number of rows, we will ease out denominator.
+              denom = getEasedOutDenominator(distinctVals);
+            } else {
+              for (Long l : distinctVals) {
+                denom *= l;
+              }
             }
           } else {
             for (List<String> jkeys : joinKeys.values()) {
@@ -983,6 +993,20 @@ public class StatsRulesProcFactory {
       return null;
     }
 
+    private Long getEasedOutDenominator(List<Long> distinctVals) {
+      // Exponential back-off for NDVs.
+      // 1) Descending order sort of NDVs
+      // 2) denominator = NDV1 * (NDV2 ^ (1/2)) * (NDV3 ^ (1/4))) * ....
+      Collections.sort(distinctVals, Collections.reverseOrder());
+
+      long denom = distinctVals.get(0);
+      for (int i = 1; i < distinctVals.size(); i++) {
+        denom = (long) (denom * Math.pow(distinctVals.get(i), 1.0 / (1 << i)));
+      }
+
+      return denom;
+    }
+
     private void updateStatsForJoinType(Statistics stats, long newNumRows,
         JoinDesc conf, Map<String, Long> rowCountParents,
         Map<String, String> outInTabAlias) {
@@ -1069,7 +1093,9 @@ public class StatsRulesProcFactory {
             String key = entry.getValue().get(joinColIdx);
             key = StatsUtils.stripPrefixFromColumnName(key);
             ColStatistics cs = joinedColStats.get(key);
-            cs.setCountDistint(minNDV);
+            if (cs != null) {
+              cs.setCountDistint(minNDV);
+            }
           }
         }
 

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java Tue Sep  2 19:56:56 2014
@@ -48,6 +48,7 @@ import org.apache.hadoop.hive.ql.QueryPr
 import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorFactory;
 import org.apache.hadoop.hive.ql.exec.FetchTask;
 import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.hooks.LineageInfo;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
@@ -61,7 +62,6 @@ import org.apache.hadoop.hive.ql.optimiz
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ListBucketingCtx;
 import org.apache.hadoop.hive.ql.plan.PlanUtils;
-import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
@@ -317,7 +317,7 @@ public abstract class BaseSemanticAnalyz
       return new String[] {dbName, tableName};
     }
     String tableName = unescapeIdentifier(tabNameNode.getChild(0).getText());
-    return new String[]{SessionState.get().getCurrentDatabase(), tableName};
+    return Utilities.getDbTableName(tableName);
   }
 
   public static String getDotName(String[] qname) throws SemanticException {
@@ -646,6 +646,20 @@ public abstract class BaseSemanticAnalyz
       this(db, conf, ast, true, false);
     }
 
+    public tableSpec(Hive db, HiveConf conf, String tableName, Map<String, String> partSpec)
+        throws HiveException {
+      this.tableName = tableName;
+      this.partSpec = partSpec;
+      this.tableHandle = db.getTable(tableName);
+      if (partSpec != null) {
+        this.specType = SpecType.STATIC_PARTITION;
+        this.partHandle = db.getPartition(tableHandle, partSpec, false);
+        this.partitions = Arrays.asList(partHandle);
+      } else {
+        this.specType = SpecType.TABLE_ONLY;
+      }
+    }
+
     public tableSpec(Hive db, HiveConf conf, ASTNode ast, boolean allowDynamicPartitionsSpec,
         boolean allowPartialPartitionsSpec) throws SemanticException {
       assert (ast.getToken().getType() == HiveParser.TOK_TAB
@@ -1188,21 +1202,26 @@ public abstract class BaseSemanticAnalyz
   }
 
   protected Database getDatabase(String dbName, boolean throwException) throws SemanticException {
+    Database database;
     try {
-      Database database = db.getDatabase(dbName);
-      if (database == null && throwException) {
-        throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(dbName));
-      }
-      return database;
-    } catch (HiveException e) {
+      database = db.getDatabase(dbName);
+    } catch (Exception e) {
       throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(dbName), e);
     }
+    if (database == null && throwException) {
+      throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(dbName));
+    }
+    return database;
   }
 
   protected Table getTable(String[] qualified) throws SemanticException {
     return getTable(qualified[0], qualified[1], true);
   }
 
+  protected Table getTable(String[] qualified, boolean throwException) throws SemanticException {
+    return getTable(qualified[0], qualified[1], throwException);
+  }
+
   protected Table getTable(String tblName) throws SemanticException {
     return getTable(null, tblName, true);
   }
@@ -1213,43 +1232,46 @@ public abstract class BaseSemanticAnalyz
 
   protected Table getTable(String database, String tblName, boolean throwException)
       throws SemanticException {
+    Table tab;
     try {
-      Table tab = database == null ? db.getTable(tblName, false)
+      tab = database == null ? db.getTable(tblName, false)
           : db.getTable(database, tblName, false);
-      if (tab == null && throwException) {
-        throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName));
-      }
-      return tab;
-    } catch (HiveException e) {
+    } catch (Exception e) {
       throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName), e);
     }
+    if (tab == null && throwException) {
+      throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName));
+    }
+    return tab;
   }
 
   protected Partition getPartition(Table table, Map<String, String> partSpec,
       boolean throwException) throws SemanticException {
+    Partition partition;
     try {
-      Partition partition = db.getPartition(table, partSpec, false);
-      if (partition == null && throwException) {
-        throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec));
-      }
-      return partition;
-    } catch (HiveException e) {
+      partition = db.getPartition(table, partSpec, false);
+    } catch (Exception e) {
       throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e);
     }
+    if (partition == null && throwException) {
+      throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec));
+    }
+    return partition;
   }
 
   protected List<Partition> getPartitions(Table table, Map<String, String> partSpec,
       boolean throwException) throws SemanticException {
+    List<Partition> partitions;
     try {
-      List<Partition> partitions = partSpec == null ? db.getPartitions(table) :
+      partitions = partSpec == null ? db.getPartitions(table) :
           db.getPartitions(table, partSpec);
-      if (partitions.isEmpty() && throwException) {
-        throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec));
-      }
-      return partitions;
-    } catch (HiveException e) {
+    } catch (Exception e) {
       throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e);
     }
+    if (partitions.isEmpty() && throwException) {
+      throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec));
+    }
+    return partitions;
   }
 
   protected String toMessage(ErrorMsg message, Object detail) {

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java Tue Sep  2 19:56:56 2014
@@ -363,7 +363,6 @@ public class ColumnStatsSemanticAnalyzer
       originalTree = tree;
       boolean isPartitionStats = isPartitionLevelStats(tree);
       Map<String,String> partSpec = null;
-      checkIfTemporaryTable();
       checkForPartitionColumns(colNames, Utilities.getColumnNamesFromFieldSchema(tbl.getPartitionKeys()));
       validateSpecifiedColumnNames(colNames);
       if (conf.getBoolVar(ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS) && tbl.isPartitioned()) {
@@ -414,13 +413,6 @@ public class ColumnStatsSemanticAnalyzer
     }
   }
 
-  private void checkIfTemporaryTable() throws SemanticException {
-    if (tbl.isTemporary()) {
-      throw new SemanticException(tbl.getTableName()
-          + " is a temporary table.  Column statistics are not supported on temporary tables.");
-    }
-  }
-
   @Override
   public void analyze(ASTNode ast, Context origCtx) throws SemanticException {
     QB qb;

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Tue Sep  2 19:56:56 2014
@@ -249,39 +249,67 @@ public class DDLSemanticAnalyzer extends
   }
 
   @Override
-  public void analyzeInternal(ASTNode ast) throws SemanticException {
+  public void analyzeInternal(ASTNode input) throws SemanticException {
 
-    switch (ast.getToken().getType()) {
-    case HiveParser.TOK_ALTERTABLE_PARTITION: {
-      ASTNode tablePart = (ASTNode) ast.getChild(0);
-      TablePartition tblPart = new TablePartition(tablePart);
-      String tableName = tblPart.tableName;
-      HashMap<String, String> partSpec = tblPart.partSpec;
-      ast = (ASTNode) ast.getChild(1);
-      if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) {
+    ASTNode ast = input;
+    switch (ast.getType()) {
+    case HiveParser.TOK_ALTERTABLE: {
+      ast = (ASTNode) input.getChild(1);
+      String[] qualified = getQualifiedTableName((ASTNode) input.getChild(0));
+      String tableName = getDotName(qualified);
+      HashMap<String, String> partSpec = DDLSemanticAnalyzer.getPartSpec((ASTNode) input.getChild(2));
+      if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAME) {
+        analyzeAlterTableRename(qualified, ast, false);
+      } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_TOUCH) {
+        analyzeAlterTableTouch(qualified, ast);
+      } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ARCHIVE) {
+        analyzeAlterTableArchive(qualified, ast, false);
+      } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_UNARCHIVE) {
+        analyzeAlterTableArchive(qualified, ast, true);
+      } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDCOLS) {
+        analyzeAlterTableModifyCols(qualified, ast, AlterTableTypes.ADDCOLS);
+      } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_REPLACECOLS) {
+        analyzeAlterTableModifyCols(qualified, ast, AlterTableTypes.REPLACECOLS);
+      } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAMECOL) {
+        analyzeAlterTableRenameCol(qualified, ast);
+      } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDPARTS) {
+        analyzeAlterTableAddParts(qualified, ast, false);
+      } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPARTS) {
+        analyzeAlterTableDropParts(qualified, ast, false);
+      } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_PARTCOLTYPE) {
+        analyzeAlterTablePartColType(qualified, ast);
+      } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_PROPERTIES) {
+        analyzeAlterTableProps(qualified, ast, false, false);
+      } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPROPERTIES) {
+        analyzeAlterTableProps(qualified, ast, false, true);
+      } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_SKEWED) {
+        analyzeAltertableSkewedby(qualified, ast);
+      } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION) {
+        analyzeExchangePartition(qualified, ast);
+      } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) {
         analyzeAlterTableFileFormat(ast, tableName, partSpec);
       } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_PROTECTMODE) {
         analyzeAlterTableProtectMode(ast, tableName, partSpec);
       } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_LOCATION) {
         analyzeAlterTableLocation(ast, tableName, partSpec);
       } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_MERGEFILES) {
-        analyzeAlterTablePartMergeFiles(tablePart, ast, tableName, partSpec);
+        analyzeAlterTablePartMergeFiles(ast, tableName, partSpec);
       } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERIALIZER) {
         analyzeAlterTableSerde(ast, tableName, partSpec);
       } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES) {
         analyzeAlterTableSerdeProps(ast, tableName, partSpec);
       } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART) {
         analyzeAlterTableRenamePart(ast, tableName, partSpec);
-      } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTBLPART_SKEWED_LOCATION) {
+      } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SKEWED_LOCATION) {
         analyzeAlterTableSkewedLocation(ast, tableName, partSpec);
-      } else if (ast.getToken().getType() == HiveParser.TOK_TABLEBUCKETS) {
+      } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_BUCKETS) {
         analyzeAlterTableBucketNum(ast, tableName, partSpec);
       } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_CLUSTER_SORT) {
         analyzeAlterTableClusterSort(ast, tableName, partSpec);
-      } else if (ast.getToken().getType() == HiveParser.TOK_COMPACT) {
+      } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_COMPACT) {
         analyzeAlterTableCompact(ast, tableName, partSpec);
       } else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS){
-        analyzeAlterTableUpdateStats(ast,tblPart);
+        analyzeAlterTableUpdateStats(ast, tableName, partSpec);
       }
       break;
     }
@@ -360,66 +388,22 @@ public class DDLSemanticAnalyzer extends
     case HiveParser.TOK_DROPVIEW:
       analyzeDropTable(ast, true);
       break;
-    case HiveParser.TOK_ALTERVIEW_PROPERTIES:
-      analyzeAlterTableProps(ast, true, false);
-      break;
-    case HiveParser.TOK_DROPVIEW_PROPERTIES:
-      analyzeAlterTableProps(ast, true, true);
-      break;
-    case HiveParser.TOK_ALTERVIEW_ADDPARTS:
-      // for ALTER VIEW ADD PARTITION, we wrapped the ADD to discriminate
-      // view from table; unwrap it now
-      analyzeAlterTableAddParts((ASTNode) ast.getChild(0), true);
-      break;
-    case HiveParser.TOK_ALTERVIEW_DROPPARTS:
-      // for ALTER VIEW DROP PARTITION, we wrapped the DROP to discriminate
-      // view from table; unwrap it now
-      analyzeAlterTableDropParts((ASTNode) ast.getChild(0), true);
-      break;
-    case HiveParser.TOK_ALTERVIEW_RENAME:
-      // for ALTER VIEW RENAME, we wrapped the RENAME to discriminate
-      // view from table; unwrap it now
-      analyzeAlterTableRename(((ASTNode) ast.getChild(0)), true);
-      break;
-    case HiveParser.TOK_ALTERTABLE_RENAME:
-      analyzeAlterTableRename(ast, false);
-      break;
-    case HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS:
-      analyzeAlterTableUpdateStats(ast, null);
-      break;
-    case HiveParser.TOK_ALTERTABLE_TOUCH:
-      analyzeAlterTableTouch(ast);
-      break;
-    case HiveParser.TOK_ALTERTABLE_ARCHIVE:
-      analyzeAlterTableArchive(ast, false);
-      break;
-    case HiveParser.TOK_ALTERTABLE_UNARCHIVE:
-      analyzeAlterTableArchive(ast, true);
-      break;
-    case HiveParser.TOK_ALTERTABLE_ADDCOLS:
-      analyzeAlterTableModifyCols(ast, AlterTableTypes.ADDCOLS);
-      break;
-    case HiveParser.TOK_ALTERTABLE_REPLACECOLS:
-      analyzeAlterTableModifyCols(ast, AlterTableTypes.REPLACECOLS);
-      break;
-    case HiveParser.TOK_ALTERTABLE_RENAMECOL:
-      analyzeAlterTableRenameCol(ast);
-      break;
-    case HiveParser.TOK_ALTERTABLE_ADDPARTS:
-      analyzeAlterTableAddParts(ast, false);
-      break;
-    case HiveParser.TOK_ALTERTABLE_DROPPARTS:
-      analyzeAlterTableDropParts(ast, false);
-      break;
-    case HiveParser.TOK_ALTERTABLE_PARTCOLTYPE:
-      analyzeAlterTablePartColType(ast);
-      break;
-    case HiveParser.TOK_ALTERTABLE_PROPERTIES:
-      analyzeAlterTableProps(ast, false, false);
-      break;
-    case HiveParser.TOK_DROPTABLE_PROPERTIES:
-      analyzeAlterTableProps(ast, false, true);
+    case HiveParser.TOK_ALTERVIEW: {
+      String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
+      ast = (ASTNode) ast.getChild(1);
+      if (ast.getType() == HiveParser.TOK_ALTERVIEW_PROPERTIES) {
+        analyzeAlterTableProps(qualified, ast, true, false);
+      } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_DROPPROPERTIES) {
+        analyzeAlterTableProps(qualified, ast, true, true);
+      } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_ADDPARTS) {
+        analyzeAlterTableAddParts(qualified, ast, true);
+      } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_DROPPARTS) {
+        analyzeAlterTableDropParts(qualified, ast, true);
+      } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_RENAME) {
+        analyzeAlterTableRename(qualified, ast, true);
+      }
       break;
+    }
     case HiveParser.TOK_ALTERINDEX_REBUILD:
       analyzeAlterIndexRebuild(ast);
       break;
@@ -499,12 +483,6 @@ public class DDLSemanticAnalyzer extends
     case HiveParser.TOK_REVOKE:
       analyzeRevoke(ast);
       break;
-    case HiveParser.TOK_ALTERTABLE_SKEWED:
-      analyzeAltertableSkewedby(ast);
-      break;
-   case HiveParser.TOK_EXCHANGEPARTITION:
-      analyzeExchangePartition(ast);
-      break;
    case HiveParser.TOK_SHOW_SET_ROLE:
      analyzeSetShowRole(ast);
      break;
@@ -516,20 +494,14 @@ public class DDLSemanticAnalyzer extends
     }
   }
 
-  private void analyzeAlterTableUpdateStats(ASTNode ast, TablePartition tblPart)
+  private void analyzeAlterTableUpdateStats(ASTNode ast, String tblName, Map<String, String> partSpec)
       throws SemanticException {
-    String tblName = null;
-    String colName = null;
-    Map<String, String> mapProp = null;
-    Map<String, String> partSpec = null;
+    String colName = getUnescapedName((ASTNode) ast.getChild(0));
+    Map<String, String> mapProp = getProps((ASTNode) (ast.getChild(1)).getChild(0));
+
+    Table tbl = getTable(tblName);
     String partName = null;
-    if (tblPart == null) {
-      tblName = getUnescapedName((ASTNode) ast.getChild(0));
-      colName = getUnescapedName((ASTNode) ast.getChild(1));
-      mapProp = getProps((ASTNode) (ast.getChild(2)).getChild(0));
-    } else {
-      tblName = tblPart.tableName;
-      partSpec = tblPart.partSpec;
+    if (partSpec != null) {
       try {
         partName = Warehouse.makePartName(partSpec, false);
       } catch (MetaException e) {
@@ -537,15 +509,6 @@ public class DDLSemanticAnalyzer extends
         throw new SemanticException("partition " + partSpec.toString()
             + " not found");
       }
-      colName = getUnescapedName((ASTNode) ast.getChild(0));
-      mapProp = getProps((ASTNode) (ast.getChild(1)).getChild(0));
-    }
-
-    Table tbl = null;
-    try {
-      tbl = db.getTable(tblName);
-    } catch (HiveException e) {
-      throw new SemanticException("table " + tbl + " not found");
     }
 
     String colType = null;
@@ -711,12 +674,12 @@ public class DDLSemanticAnalyzer extends
     addAlterDbDesc(alterDesc);
   }
 
-  private void analyzeExchangePartition(ASTNode ast) throws SemanticException {
-    Table destTable =  getTable(getUnescapedName((ASTNode)ast.getChild(0)));
-    Table sourceTable = getTable(getUnescapedName((ASTNode)ast.getChild(2)));
+  private void analyzeExchangePartition(String[] qualified, ASTNode ast) throws SemanticException {
+    Table destTable = getTable(qualified);
+    Table sourceTable = getTable(getUnescapedName((ASTNode)ast.getChild(1)));
 
     // Get the partition specs
-    Map<String, String> partSpecs = getPartSpec((ASTNode) ast.getChild(1));
+    Map<String, String> partSpecs = getPartSpec((ASTNode) ast.getChild(0));
     validatePartitionValues(partSpecs);
     boolean sameColumns = MetaStoreUtils.compareFieldColumns(
         destTable.getAllCols(), sourceTable.getAllCols());
@@ -1237,8 +1200,7 @@ public class DDLSemanticAnalyzer extends
       if (indexTableName != null) {
         indexTbl = getTable(Utilities.getDbTableName(index.getDbName(), indexTableName));
       }
-      String baseTblName = index.getOrigTableName();
-      Table baseTbl = getTable(baseTblName);
+      Table baseTbl = getTable(new String[] {index.getDbName(), index.getOrigTableName()});
 
       String handlerCls = index.getIndexHandlerClass();
       HiveIndexHandler handler = HiveUtils.getIndexHandler(conf, handlerCls);
@@ -1331,16 +1293,16 @@ public class DDLSemanticAnalyzer extends
     }
   }
 
-  private void analyzeAlterTableProps(ASTNode ast, boolean expectView, boolean isUnset)
+  private void analyzeAlterTableProps(String[] qualified, ASTNode ast, boolean expectView, boolean isUnset)
       throws SemanticException {
 
-    String tableName = getUnescapedName((ASTNode) ast.getChild(0));
-    HashMap<String, String> mapProp = getProps((ASTNode) (ast.getChild(1))
+    String tableName = getDotName(qualified);
+    HashMap<String, String> mapProp = getProps((ASTNode) (ast.getChild(0))
         .getChild(0));
     AlterTableDesc alterTblDesc = null;
     if (isUnset == true) {
       alterTblDesc = new AlterTableDesc(AlterTableTypes.DROPPROPS, expectView);
-      if (ast.getChild(2) != null) {
+      if (ast.getChild(1) != null) {
         alterTblDesc.setDropIfExists(true);
       }
     } else {
@@ -1527,7 +1489,7 @@ public class DDLSemanticAnalyzer extends
         alterTblDesc), conf));
   }
 
-  private void analyzeAlterTablePartMergeFiles(ASTNode tablePartAST, ASTNode ast,
+  private void analyzeAlterTablePartMergeFiles(ASTNode ast,
       String tableName, HashMap<String, String> partSpec)
       throws SemanticException {
     AlterTablePartMergeFilesDesc mergeDesc = new AlterTablePartMergeFilesDesc(
@@ -1639,7 +1601,7 @@ public class DDLSemanticAnalyzer extends
         StatsWork statDesc;
         if (oldTblPartLoc.equals(newTblPartLoc)) {
           // If we're merging to the same location, we can avoid some metastore calls
-          tableSpec tablepart = new tableSpec(this.db, conf, tablePartAST);
+          tableSpec tablepart = new tableSpec(db, conf, tableName, partSpec);
           statDesc = new StatsWork(tablepart);
         } else {
           statDesc = new StatsWork(ltd);
@@ -1672,7 +1634,7 @@ public class DDLSemanticAnalyzer extends
       alterTblDesc = new AlterTableDesc(tableName, true, partSpec);
       rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf));
       break;
-    case HiveParser.TOK_TABLEBUCKETS:
+    case HiveParser.TOK_ALTERTABLE_BUCKETS:
       ASTNode buckets = (ASTNode) ast.getChild(0);
       List<String> bucketCols = getColumnNames((ASTNode) buckets.getChild(0));
       List<Order> sortCols = new ArrayList<Order>();
@@ -2502,9 +2464,9 @@ public class DDLSemanticAnalyzer extends
   }
 
 
-  private void analyzeAlterTableRename(ASTNode ast, boolean expectView) throws SemanticException {
-    String[] source = getQualifiedTableName((ASTNode) ast.getChild(0));
-    String[] target = getQualifiedTableName((ASTNode) ast.getChild(1));
+  private void analyzeAlterTableRename(String[] source, ASTNode ast, boolean expectView)
+      throws SemanticException {
+    String[] target = getQualifiedTableName((ASTNode) ast.getChild(0));
 
     String sourceName = getDotName(source);
     String targetName = getDotName(target);
@@ -2515,22 +2477,21 @@ public class DDLSemanticAnalyzer extends
         alterTblDesc), conf));
   }
 
-  private void analyzeAlterTableRenameCol(ASTNode ast) throws SemanticException {
-    String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
+  private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast) throws SemanticException {
     String newComment = null;
     String newType = null;
-    newType = getTypeStringFromAST((ASTNode) ast.getChild(3));
+    newType = getTypeStringFromAST((ASTNode) ast.getChild(2));
     boolean first = false;
     String flagCol = null;
     ASTNode positionNode = null;
-    if (ast.getChildCount() == 6) {
-      newComment = unescapeSQLString(ast.getChild(4).getText());
-      positionNode = (ASTNode) ast.getChild(5);
-    } else if (ast.getChildCount() == 5) {
-      if (ast.getChild(4).getType() == HiveParser.StringLiteral) {
-        newComment = unescapeSQLString(ast.getChild(4).getText());
+    if (ast.getChildCount() == 5) {
+      newComment = unescapeSQLString(ast.getChild(3).getText());
+      positionNode = (ASTNode) ast.getChild(4);
+    } else if (ast.getChildCount() == 4) {
+      if (ast.getChild(3).getType() == HiveParser.StringLiteral) {
+        newComment = unescapeSQLString(ast.getChild(3).getText());
       } else {
-        positionNode = (ASTNode) ast.getChild(4);
+        positionNode = (ASTNode) ast.getChild(3);
       }
     }
 
@@ -2542,8 +2503,8 @@ public class DDLSemanticAnalyzer extends
       }
     }
 
-    String oldColName = ast.getChild(1).getText();
-    String newColName = ast.getChild(2).getText();
+    String oldColName = ast.getChild(0).getText();
+    String newColName = ast.getChild(1).getText();
 
     /* Validate the operation of renaming a column name. */
     Table tab = getTable(qualified);
@@ -2603,12 +2564,11 @@ public class DDLSemanticAnalyzer extends
         alterBucketNum), conf));
   }
 
-  private void analyzeAlterTableModifyCols(ASTNode ast,
+  private void analyzeAlterTableModifyCols(String[] qualified, ASTNode ast,
       AlterTableTypes alterType) throws SemanticException {
-    String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
 
     String tblName = getDotName(qualified);
-    List<FieldSchema> newCols = getColumns((ASTNode) ast.getChild(1));
+    List<FieldSchema> newCols = getColumns((ASTNode) ast.getChild(0));
     AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, newCols,
         alterType);
 
@@ -2617,7 +2577,7 @@ public class DDLSemanticAnalyzer extends
         alterTblDesc), conf));
   }
 
-  private void analyzeAlterTableDropParts(ASTNode ast, boolean expectView)
+  private void analyzeAlterTableDropParts(String[] qualified, ASTNode ast, boolean expectView)
       throws SemanticException {
 
     boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null)
@@ -2630,7 +2590,6 @@ public class DDLSemanticAnalyzer extends
     // popular case but that's kinda hacky. Let's not do it for now.
     boolean canGroupExprs = ifExists;
 
-    String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
     Table tab = getTable(qualified);
     Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs =
         getFullPartitionSpecs(ast, tab, canGroupExprs);
@@ -2649,10 +2608,8 @@ public class DDLSemanticAnalyzer extends
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc), conf));
   }
 
-  private void analyzeAlterTablePartColType(ASTNode ast)
+  private void analyzeAlterTablePartColType(String[] qualified, ASTNode ast)
       throws SemanticException {
-    // get table name
-    String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
 
 
     // check if table exists.
@@ -2664,7 +2621,7 @@ public class DDLSemanticAnalyzer extends
 
     // Alter table ... partition column ( column newtype) only takes one column at a time.
     // It must have a column name followed with type.
-    ASTNode colAst = (ASTNode) ast.getChild(1);
+    ASTNode colAst = (ASTNode) ast.getChild(0);
     assert(colAst.getChildCount() == 2);
 
     FieldSchema newCol = new FieldSchema();
@@ -2710,12 +2667,11 @@ public class DDLSemanticAnalyzer extends
    * @throws SemanticException
    *           Parsing failed
    */
-  private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView)
+  private void analyzeAlterTableAddParts(String[] qualified, CommonTree ast, boolean expectView)
       throws SemanticException {
 
     // ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists? alterStatementSuffixAddPartitionsElement+)
-    String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
-    boolean ifNotExists = ast.getChild(1).getType() == HiveParser.TOK_IFNOTEXISTS;
+    boolean ifNotExists = ast.getChild(0).getType() == HiveParser.TOK_IFNOTEXISTS;
 
     Table tab = getTable(qualified);
     boolean isView = tab.isView();
@@ -2723,7 +2679,7 @@ public class DDLSemanticAnalyzer extends
     outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_SHARED));
 
     int numCh = ast.getChildCount();
-    int start = ifNotExists ? 2 : 1;
+    int start = ifNotExists ? 1 : 0;
 
     String currentLocation = null;
     Map<String, String> currentPart = null;
@@ -2840,9 +2796,8 @@ public class DDLSemanticAnalyzer extends
    * @throws SemanticException
    *           Parsin failed
    */
-  private void analyzeAlterTableTouch(CommonTree ast)
+  private void analyzeAlterTableTouch(String[] qualified, CommonTree ast)
       throws SemanticException {
-    String[] qualified = getQualifiedTableName((ASTNode)ast.getChild(0));
 
     Table tab = getTable(qualified);
     validateAlterTableType(tab, AlterTableTypes.TOUCH);
@@ -2870,14 +2825,13 @@ public class DDLSemanticAnalyzer extends
     }
   }
 
-  private void analyzeAlterTableArchive(CommonTree ast, boolean isUnArchive)
+  private void analyzeAlterTableArchive(String[] qualified, CommonTree ast, boolean isUnArchive)
       throws SemanticException {
 
     if (!conf.getBoolVar(HiveConf.ConfVars.HIVEARCHIVEENABLED)) {
       throw new SemanticException(ErrorMsg.ARCHIVE_METHODS_DISABLED.getMsg());
 
     }
-    String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
     // partition name to value
     List<Map<String, String>> partSpecs = getPartitionSpecs(ast);
 
@@ -2948,7 +2902,7 @@ public class DDLSemanticAnalyzer extends
     List<Map<String, String>> partSpecs = new ArrayList<Map<String, String>>();
     int childIndex = 0;
     // get partition metadata if partition specified
-    for (childIndex = 1; childIndex < ast.getChildCount(); childIndex++) {
+    for (childIndex = 0; childIndex < ast.getChildCount(); childIndex++) {
       Tree partspec = ast.getChild(childIndex);
       // sanity check
       if (partspec.getType() == HiveParser.TOK_PARTSPEC) {
@@ -2976,7 +2930,7 @@ public class DDLSemanticAnalyzer extends
 
     Map<Integer, List<ExprNodeGenericFuncDesc>> result =
         new HashMap<Integer, List<ExprNodeGenericFuncDesc>>();
-    for (int childIndex = 1; childIndex < ast.getChildCount(); childIndex++) {
+    for (int childIndex = 0; childIndex < ast.getChildCount(); childIndex++) {
       Tree partSpecTree = ast.getChild(childIndex);
       if (partSpecTree.getType() != HiveParser.TOK_PARTSPEC) continue;
       ExprNodeGenericFuncDesc expr = null;
@@ -3184,14 +3138,13 @@ public class DDLSemanticAnalyzer extends
    *          node
    * @throws SemanticException
    */
-  private void analyzeAltertableSkewedby(ASTNode ast) throws SemanticException {
+  private void analyzeAltertableSkewedby(String[] qualified, ASTNode ast) throws SemanticException {
     /**
      * Throw an error if the user tries to use the DDL with
      * hive.internal.ddl.list.bucketing.enable set to false.
      */
     HiveConf hiveConf = SessionState.get().getConf();
 
-    String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
     Table tab = getTable(qualified);
 
     inputs.add(new ReadEntity(tab));
@@ -3200,7 +3153,7 @@ public class DDLSemanticAnalyzer extends
     validateAlterTableType(tab, AlterTableTypes.ADDSKEWEDBY);
 
     String tableName = getDotName(qualified);
-    if (ast.getChildCount() == 1) {
+    if (ast.getChildCount() == 0) {
       /* Convert a skewed table to non-skewed table. */
       AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, true,
           new ArrayList<String>(), new ArrayList<List<String>>());
@@ -3208,7 +3161,7 @@ public class DDLSemanticAnalyzer extends
       rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
           alterTblDesc), conf));
     } else {
-      switch (((ASTNode) ast.getChild(1)).getToken().getType()) {
+      switch (((ASTNode) ast.getChild(0)).getToken().getType()) {
       case HiveParser.TOK_TABLESKEWED:
         handleAlterTableSkewedBy(ast, tableName, tab);
         break;
@@ -3255,7 +3208,7 @@ public class DDLSemanticAnalyzer extends
     List<String> skewedColNames = new ArrayList<String>();
     List<List<String>> skewedValues = new ArrayList<List<String>>();
     /* skewed column names. */
-    ASTNode skewedNode = (ASTNode) ast.getChild(1);
+    ASTNode skewedNode = (ASTNode) ast.getChild(0);
     skewedColNames = analyzeSkewedTablDDLColNames(skewedColNames, skewedNode);
     /* skewed value. */
     analyzeDDLSkewedValues(skewedValues, skewedNode);

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g Tue Sep  2 19:56:56 2014
@@ -144,7 +144,7 @@ fromSource
 @init { gParent.pushMsg("from source", state); }
 @after { gParent.popMsg(state); }
     :
-    ((Identifier LPAREN)=> partitionedTableFunction | tableSource | subQuerySource) (lateralView^)*
+    ((Identifier LPAREN)=> partitionedTableFunction | tableSource | subQuerySource | virtualTableSource) (lateralView^)*
     ;
 
 tableBucketSample
@@ -256,3 +256,46 @@ searchCondition
     ;
 
 //-----------------------------------------------------------------------------------
+
+//-------- Row Constructor ----------------------------------------------------------
+//in support of SELECT * FROM (VALUES(1,2,3),(4,5,6),...) as FOO(a,b,c) and
+// INSERT INTO <table> (col1,col2,...) VALUES(...),(...),...
+// INSERT INTO <table> (col1,col2,...) SELECT * FROM (VALUES(1,2,3),(4,5,6),...) as Foo(a,b,c)
+valueRowConstructor
+    :
+    LPAREN atomExpression (COMMA atomExpression)* RPAREN -> ^(TOK_VALUE_ROW atomExpression+)
+    ;
+
+valuesTableConstructor
+    :
+    valueRowConstructor (COMMA valueRowConstructor)* -> ^(TOK_VALUES_TABLE valueRowConstructor+)
+    ;
+
+/*
+VALUES(1),(2) means 2 rows, 1 column each.
+VALUES(1,2),(3,4) means 2 rows, 2 columns each.
+VALUES(1,2,3) means 1 row, 3 columns
+*/
+valuesClause
+    :
+    KW_VALUES valuesTableConstructor -> valuesTableConstructor
+    ;
+
+/*
+This represents a clause like this:
+(VALUES(1,2),(2,3)) as VirtTable(col1,col2)
+*/
+virtualTableSource
+   	:
+   	LPAREN valuesClause RPAREN tableNameColList -> ^(TOK_VIRTUAL_TABLE tableNameColList valuesClause)
+   	;
+/*
+e.g. as VirtTable(col1,col2)
+Note that we only want literals as column names
+*/
+tableNameColList
+    :
+    KW_AS? identifier LPAREN identifier (COMMA identifier)* RPAREN -> ^(TOK_VIRTUAL_TABREF ^(TOK_TABNAME identifier) ^(TOK_COL_NAME identifier+))
+    ;
+
+//-----------------------------------------------------------------------------------
\ No newline at end of file

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java Tue Sep  2 19:56:56 2014
@@ -177,10 +177,19 @@ public class GenTezUtils {
 
     // map work starts with table scan operators
     assert root instanceof TableScanOperator;
-    String alias = ((TableScanOperator)root).getConf().getAlias();
+    TableScanOperator ts = (TableScanOperator) root;
+
+    String alias = ts.getConf().getAlias();
 
     setupMapWork(mapWork, context, partitions, root, alias);
 
+    if (context.parseContext != null
+        && context.parseContext.getTopToTable() != null
+        && context.parseContext.getTopToTable().containsKey(ts)
+        && context.parseContext.getTopToTable().get(ts).isDummyTable()) {
+      mapWork.setDummyTableScan(true);
+    }
+
     // add new item to the tez work
     tezWork.add(mapWork);
 

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g Tue Sep  2 19:56:56 2014
@@ -292,6 +292,7 @@ KW_TRANSACTIONS: 'TRANSACTIONS';
 KW_REWRITE : 'REWRITE';
 KW_AUTHORIZATION: 'AUTHORIZATION';
 KW_CONF: 'CONF';
+KW_VALUES: 'VALUES';
 
 // Operators
 // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work.