You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/09/08 06:38:26 UTC

svn commit: r1623263 [15/28] - in /hive/branches/spark: ./ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/ ant/src/org/apache/hadoop/hive/ant/ beeline/src/java/org/apache/hive/beeline/ beeline/src/test/org/apache/hive/beeline/ bin/...

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Mon Sep  8 04:38:17 2014
@@ -59,6 +59,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.Deserializer;
 import org.apache.hadoop.hive.serde2.SerDeException;
@@ -288,6 +289,22 @@ public class MetaStoreUtils {
    */
   public static boolean updatePartitionStatsFast(Partition part, Warehouse wh,
       boolean madeDir, boolean forceRecompute) throws MetaException {
+    return updatePartitionStatsFast(new PartitionSpecProxy.SimplePartitionWrapperIterator(part),
+                                    wh, madeDir, forceRecompute);
+  }
+
+  /**
+   * Updates the numFiles and totalSize parameters for the passed Partition by querying
+   *  the warehouse if the passed Partition does not already have values for these parameters.
+   * @param part
+   * @param wh
+   * @param madeDir if true, the directory was just created and can be assumed to be empty
+   * @param forceRecompute Recompute stats even if the passed Partition already has
+   * these parameters set
+   * @return true if the stats were updated, false otherwise
+   */
+  public static boolean updatePartitionStatsFast(PartitionSpecProxy.PartitionIterator part, Warehouse wh,
+      boolean madeDir, boolean forceRecompute) throws MetaException {
     Map<String,String> params = part.getParameters();
     boolean updated = false;
     if (forceRecompute ||
@@ -297,10 +314,10 @@ public class MetaStoreUtils {
         params = new HashMap<String,String>();
       }
       if (!madeDir) {
-        // The partitition location already existed and may contain data. Lets try to
+        // The partition location already existed and may contain data. Lets try to
         // populate those statistics that don't require a full scan of the data.
         LOG.warn("Updating partition stats fast for: " + part.getTableName());
-        FileStatus[] fileStatus = wh.getFileStatusesForSD(part.getSd());
+        FileStatus[] fileStatus = wh.getFileStatusesForLocation(part.getLocation());
         populateQuickStats(fileStatus, params);
         LOG.warn("Updated size to " + params.get(StatsSetupConst.TOTAL_SIZE));
         if(!params.containsKey(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK)) {

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Mon Sep  8 04:38:17 2014
@@ -35,9 +35,11 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
+import java.util.regex.Pattern;
 
 import javax.jdo.JDODataStoreException;
 import javax.jdo.JDOHelper;
@@ -62,6 +64,7 @@ import org.apache.hadoop.hive.common.cla
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
@@ -89,6 +92,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.ResourceUri;
 import org.apache.hadoop.hive.metastore.api.Role;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
@@ -131,6 +135,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.parser.FilterLexer;
 import org.apache.hadoop.hive.metastore.parser.FilterParser;
 import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.thrift.TException;
 import org.datanucleus.store.rdbms.exceptions.MissingTableException;
@@ -159,7 +164,7 @@ public class ObjectStore implements RawS
 
   private static final Map<String, Class> PINCLASSMAP;
   static {
-    Map<String, Class> map = new HashMap();
+    Map<String, Class> map = new HashMap<String, Class>();
     map.put("table", MTable.class);
     map.put("storagedescriptor", MStorageDescriptor.class);
     map.put("serdeinfo", MSerDeInfo.class);
@@ -181,6 +186,8 @@ public class ObjectStore implements RawS
   private TXN_STATUS transactionStatus = TXN_STATUS.NO_STATE;
   private final AtomicBoolean isSchemaVerified = new AtomicBoolean(false);
 
+  private Pattern partitionValidationPattern;
+
   public ObjectStore() {
   }
 
@@ -225,6 +232,14 @@ public class ObjectStore implements RawS
 
       initialize(propsFromConf);
 
+      String partitionValidationRegex =
+          hiveConf.get(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN.name());
+      if (partitionValidationRegex != null && partitionValidationRegex.equals("")) {
+        partitionValidationPattern = Pattern.compile(partitionValidationRegex);
+      } else {
+        partitionValidationPattern = null;
+      }
+
       if (!isInitialized) {
         throw new RuntimeException(
         "Unable to create persistence manager. Check dss.log for details");
@@ -1080,14 +1095,14 @@ public class ObjectStore implements RawS
     return keys;
   }
 
-  private SerDeInfo converToSerDeInfo(MSerDeInfo ms) throws MetaException {
+  private SerDeInfo convertToSerDeInfo(MSerDeInfo ms) throws MetaException {
     if (ms == null) {
       throw new MetaException("Invalid SerDeInfo object");
     }
     return new SerDeInfo(ms.getName(), ms.getSerializationLib(), convertMap(ms.getParameters()));
   }
 
-  private MSerDeInfo converToMSerDeInfo(SerDeInfo ms) throws MetaException {
+  private MSerDeInfo convertToMSerDeInfo(SerDeInfo ms) throws MetaException {
     if (ms == null) {
       throw new MetaException("Invalid SerDeInfo object");
     }
@@ -1119,7 +1134,7 @@ public class ObjectStore implements RawS
 
     StorageDescriptor sd = new StorageDescriptor(noFS ? null : convertToFieldSchemas(mFieldSchemas),
         msd.getLocation(), msd.getInputFormat(), msd.getOutputFormat(), msd
-        .isCompressed(), msd.getNumBuckets(), converToSerDeInfo(msd
+        .isCompressed(), msd.getNumBuckets(), convertToSerDeInfo(msd
         .getSerDeInfo()), convertList(msd.getBucketCols()), convertToOrders(msd
         .getSortCols()), convertMap(msd.getParameters()));
     SkewedInfo skewedInfo = new SkewedInfo(convertList(msd.getSkewedColNames()),
@@ -1231,7 +1246,7 @@ public class ObjectStore implements RawS
     }
     return new MStorageDescriptor(mcd, sd
         .getLocation(), sd.getInputFormat(), sd.getOutputFormat(), sd
-        .isCompressed(), sd.getNumBuckets(), converToMSerDeInfo(sd
+        .isCompressed(), sd.getNumBuckets(), convertToMSerDeInfo(sd
         .getSerdeInfo()), sd.getBucketCols(),
         convertToMOrders(sd.getSortCols()), sd.getParameters(),
         (null == sd.getSkewedInfo()) ? null
@@ -1293,6 +1308,76 @@ public class ObjectStore implements RawS
     return success;
   }
 
+  private boolean isValidPartition(
+      Partition part, boolean ifNotExists) throws MetaException {
+    MetaStoreUtils.validatePartitionNameCharacters(part.getValues(),
+        partitionValidationPattern);
+    boolean doesExist = doesPartitionExist(
+        part.getDbName(), part.getTableName(), part.getValues());
+    if (doesExist && !ifNotExists) {
+      throw new MetaException("Partition already exists: " + part);
+    }
+    return !doesExist;
+  }
+
+
+  @Override
+  public boolean addPartitions(String dbName, String tblName,
+                               PartitionSpecProxy partitionSpec, boolean ifNotExists)
+      throws InvalidObjectException, MetaException {
+    boolean success = false;
+    openTransaction();
+    try {
+      List<MTablePrivilege> tabGrants = null;
+      List<MTableColumnPrivilege> tabColumnGrants = null;
+      MTable table = this.getMTable(dbName, tblName);
+      if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
+        tabGrants = this.listAllTableGrants(dbName, tblName);
+        tabColumnGrants = this.listTableAllColumnGrants(dbName, tblName);
+      }
+
+      if (!partitionSpec.getTableName().equals(tblName) || !partitionSpec.getDbName().equals(dbName)) {
+        throw new MetaException("Partition does not belong to target table "
+            + dbName + "." + tblName + ": " + partitionSpec);
+      }
+
+      PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator();
+
+      int now = (int)(System.currentTimeMillis()/1000);
+
+      while (iterator.hasNext()) {
+        Partition part = iterator.next();
+
+        if (isValidPartition(part, ifNotExists)) {
+          MPartition mpart = convertToMPart(part, true);
+          pm.makePersistent(mpart);
+          if (tabGrants != null) {
+            for (MTablePrivilege tab : tabGrants) {
+              pm.makePersistent(new MPartitionPrivilege(tab.getPrincipalName(),
+                  tab.getPrincipalType(), mpart, tab.getPrivilege(), now,
+                  tab.getGrantor(), tab.getGrantorType(), tab.getGrantOption()));
+            }
+          }
+
+          if (tabColumnGrants != null) {
+            for (MTableColumnPrivilege col : tabColumnGrants) {
+              pm.makePersistent(new MPartitionColumnPrivilege(col.getPrincipalName(),
+                  col.getPrincipalType(), mpart, col.getColumnName(), col.getPrivilege(),
+                  now, col.getGrantor(), col.getGrantorType(), col.getGrantOption()));
+            }
+          }
+        }
+      }
+
+      success = commitTransaction();
+    } finally {
+      if (!success) {
+        rollbackTransaction();
+      }
+    }
+    return success;
+  }
+
   @Override
   public boolean addPartition(Partition part) throws InvalidObjectException,
       MetaException {
@@ -2394,7 +2479,7 @@ public class ObjectStore implements RawS
    * Makes a JDO query filter string.
    * Makes a JDO query filter string for tables or partitions.
    * @param dbName Database name.
-   * @param table Table. If null, the query returned is over tables in a database.
+   * @param mtable Table. If null, the query returned is over tables in a database.
    *   If not null, the query returned is over partitions in a table.
    * @param filter The filter from which JDOQL filter will be made.
    * @param params Parameters for the filter. Some parameters may be added here.
@@ -5716,7 +5801,7 @@ public class ObjectStore implements RawS
       pm.makePersistent(mStatsObj);
     }
   }
-
+  
   @Override
   public boolean updateTableColumnStatistics(ColumnStatistics colStats)
     throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
@@ -6178,7 +6263,7 @@ public class ObjectStore implements RawS
     boolean commited = false;
     long delCnt;
     LOG.debug("Begin executing cleanupEvents");
-    Long expiryTime = HiveConf.getLongVar(getConf(), ConfVars.METASTORE_EVENT_EXPIRY_DURATION) * 1000L;
+    Long expiryTime = HiveConf.getTimeVar(getConf(), ConfVars.METASTORE_EVENT_EXPIRY_DURATION, TimeUnit.MILLISECONDS);
     Long curTime = System.currentTimeMillis();
     try {
       openTransaction();

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java Mon Sep  8 04:38:17 2014
@@ -44,6 +44,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
@@ -56,6 +57,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.model.MRoleMap;
 import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
 import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.thrift.TException;
 
 public interface RawStore extends Configurable {
@@ -129,6 +131,9 @@ public interface RawStore extends Config
   public abstract boolean addPartitions(String dbName, String tblName, List<Partition> parts)
       throws InvalidObjectException, MetaException;
 
+  public abstract boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists)
+      throws InvalidObjectException, MetaException;
+
   public abstract Partition getPartition(String dbName, String tableName,
       List<String> part_vals) throws MetaException, NoSuchObjectException;
 
@@ -551,4 +556,5 @@ public interface RawStore extends Config
 
   public AggrStats get_aggr_stats_for(String dbName, String tblName,
     List<String> partNames, List<String> colNames) throws MetaException, NoSuchObjectException;
+  
 }

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java Mon Sep  8 04:38:17 2014
@@ -23,6 +23,7 @@ import java.lang.reflect.InvocationTarge
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
 import java.lang.reflect.UndeclaredThrowableException;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.lang.exception.ExceptionUtils;
 import org.apache.commons.logging.Log;
@@ -80,8 +81,8 @@ public class RetryingHMSHandler implemen
     boolean gotNewConnectUrl = false;
     boolean reloadConf = HiveConf.getBoolVar(hiveConf,
         HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF);
-    int retryInterval = HiveConf.getIntVar(hiveConf,
-        HiveConf.ConfVars.HMSHANDLERINTERVAL);
+    long retryInterval = HiveConf.getTimeVar(hiveConf,
+        HiveConf.ConfVars.HMSHANDLERINTERVAL, TimeUnit.MILLISECONDS);
     int retryLimit = HiveConf.getIntVar(hiveConf,
         HiveConf.ConfVars.HMSHANDLERATTEMPTS);
 

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java Mon Sep  8 04:38:17 2014
@@ -24,6 +24,7 @@ import java.lang.reflect.InvocationTarge
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
 import java.lang.reflect.UndeclaredThrowableException;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -48,18 +49,18 @@ public class RetryingMetaStoreClient imp
 
   private final IMetaStoreClient base;
   private final int retryLimit;
-  private final int retryDelaySeconds;
+  private final long retryDelaySeconds;
 
 
 
   protected RetryingMetaStoreClient(HiveConf hiveConf, HiveMetaHookLoader hookLoader,
       Class<? extends IMetaStoreClient> msClientClass) throws MetaException {
     this.retryLimit = hiveConf.getIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES);
-    this.retryDelaySeconds =
-        hiveConf.getIntVar(HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY);
+    this.retryDelaySeconds = hiveConf.getTimeVar(
+        HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS);
 
     reloginExpiringKeytabUser();
-    this.base = (IMetaStoreClient) MetaStoreUtils.newInstance(msClientClass, new Class[] {
+    this.base = MetaStoreUtils.newInstance(msClientClass, new Class[] {
         HiveConf.class, HiveMetaHookLoader.class}, new Object[] {hiveConf, hookLoader});
   }
 

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java Mon Sep  8 04:38:17 2014
@@ -65,20 +65,20 @@ public class StatObjectConverter {
      if (statsObj.getStatsData().isSetBooleanStats()) {
        BooleanColumnStatsData boolStats = statsObj.getStatsData().getBooleanStats();
        mColStats.setBooleanStats(
-           boolStats.isSetNumTrues() ? boolStats.getNumTrues() : null, 
+           boolStats.isSetNumTrues() ? boolStats.getNumTrues() : null,
            boolStats.isSetNumFalses() ? boolStats.getNumFalses() : null,
            boolStats.isSetNumNulls() ? boolStats.getNumNulls() : null);
      } else if (statsObj.getStatsData().isSetLongStats()) {
        LongColumnStatsData longStats = statsObj.getStatsData().getLongStats();
        mColStats.setLongStats(
-           longStats.isSetNumNulls() ? longStats.getNumNulls() : null, 
+           longStats.isSetNumNulls() ? longStats.getNumNulls() : null,
            longStats.isSetNumDVs() ? longStats.getNumDVs() : null,
            longStats.isSetLowValue() ? longStats.getLowValue() : null,
            longStats.isSetHighValue() ? longStats.getHighValue() : null);
      } else if (statsObj.getStatsData().isSetDoubleStats()) {
        DoubleColumnStatsData doubleStats = statsObj.getStatsData().getDoubleStats();
        mColStats.setDoubleStats(
-           doubleStats.isSetNumNulls() ? doubleStats.getNumNulls() : null, 
+           doubleStats.isSetNumNulls() ? doubleStats.getNumNulls() : null,
            doubleStats.isSetNumDVs() ? doubleStats.getNumDVs() : null,
            doubleStats.isSetLowValue() ? doubleStats.getLowValue() : null,
            doubleStats.isSetHighValue() ? doubleStats.getHighValue() : null);
@@ -87,20 +87,20 @@ public class StatObjectConverter {
        String low = decimalStats.isSetLowValue() ? createJdoDecimalString(decimalStats.getLowValue()) : null;
        String high = decimalStats.isSetHighValue() ? createJdoDecimalString(decimalStats.getHighValue()) : null;
        mColStats.setDecimalStats(
-           decimalStats.isSetNumNulls() ? decimalStats.getNumNulls() : null, 
-           decimalStats.isSetNumDVs() ? decimalStats.getNumDVs() : null, 
+           decimalStats.isSetNumNulls() ? decimalStats.getNumNulls() : null,
+           decimalStats.isSetNumDVs() ? decimalStats.getNumDVs() : null,
                low, high);
      } else if (statsObj.getStatsData().isSetStringStats()) {
        StringColumnStatsData stringStats = statsObj.getStatsData().getStringStats();
        mColStats.setStringStats(
-           stringStats.isSetNumNulls() ? stringStats.getNumNulls() : null, 
+           stringStats.isSetNumNulls() ? stringStats.getNumNulls() : null,
            stringStats.isSetNumDVs() ? stringStats.getNumDVs() : null,
-           stringStats.isSetMaxColLen() ? stringStats.getMaxColLen() : null, 
+           stringStats.isSetMaxColLen() ? stringStats.getMaxColLen() : null,
            stringStats.isSetAvgColLen() ? stringStats.getAvgColLen() : null);
      } else if (statsObj.getStatsData().isSetBinaryStats()) {
        BinaryColumnStatsData binaryStats = statsObj.getStatsData().getBinaryStats();
        mColStats.setBinaryStats(
-           binaryStats.isSetNumNulls() ? binaryStats.getNumNulls() : null, 
+           binaryStats.isSetNumNulls() ? binaryStats.getNumNulls() : null,
            binaryStats.isSetMaxColLen() ? binaryStats.getMaxColLen() : null,
            binaryStats.isSetAvgColLen() ? binaryStats.getAvgColLen() : null);
      }
@@ -109,9 +109,9 @@ public class StatObjectConverter {
 
   public static void setFieldsIntoOldStats(
       MTableColumnStatistics mStatsObj, MTableColumnStatistics oldStatsObj) {
-	  if (mStatsObj.getAvgColLen() != null) {
-	    oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen());
-	  }
+    if (mStatsObj.getAvgColLen() != null) {
+      oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen());
+    }
     if (mStatsObj.getLongHighValue() != null) {
       oldStatsObj.setLongHighValue(mStatsObj.getLongHighValue());
     }
@@ -131,19 +131,19 @@ public class StatObjectConverter {
       oldStatsObj.setDecimalHighValue(mStatsObj.getDecimalHighValue());
     }
     if (mStatsObj.getMaxColLen() != null) {
-  	  oldStatsObj.setMaxColLen(mStatsObj.getMaxColLen());
+      oldStatsObj.setMaxColLen(mStatsObj.getMaxColLen());
     }
     if (mStatsObj.getNumDVs() != null) {
-  	  oldStatsObj.setNumDVs(mStatsObj.getNumDVs());
+      oldStatsObj.setNumDVs(mStatsObj.getNumDVs());
     }
     if (mStatsObj.getNumFalses() != null) {
-  	  oldStatsObj.setNumFalses(mStatsObj.getNumFalses());
+      oldStatsObj.setNumFalses(mStatsObj.getNumFalses());
     }
     if (mStatsObj.getNumTrues() != null) {
-  	  oldStatsObj.setNumTrues(mStatsObj.getNumTrues());
+      oldStatsObj.setNumTrues(mStatsObj.getNumTrues());
     }
     if (mStatsObj.getNumNulls() != null) {
-  	  oldStatsObj.setNumNulls(mStatsObj.getNumNulls());
+      oldStatsObj.setNumNulls(mStatsObj.getNumNulls());
     }
     oldStatsObj.setLastAnalyzed(mStatsObj.getLastAnalyzed());
   }
@@ -152,13 +152,13 @@ public class StatObjectConverter {
       MPartitionColumnStatistics mStatsObj, MPartitionColumnStatistics oldStatsObj) {
     if (mStatsObj.getAvgColLen() != null) {
           oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen());
-	  }
+    }
     if (mStatsObj.getLongHighValue() != null) {
-		  oldStatsObj.setLongHighValue(mStatsObj.getLongHighValue());
-	  }
-	  if (mStatsObj.getDoubleHighValue() != null) {
-		  oldStatsObj.setDoubleHighValue(mStatsObj.getDoubleHighValue());
-	  }
+      oldStatsObj.setLongHighValue(mStatsObj.getLongHighValue());
+    }
+    if (mStatsObj.getDoubleHighValue() != null) {
+      oldStatsObj.setDoubleHighValue(mStatsObj.getDoubleHighValue());
+    }
     oldStatsObj.setLastAnalyzed(mStatsObj.getLastAnalyzed());
     if (mStatsObj.getLongLowValue() != null) {
       oldStatsObj.setLongLowValue(mStatsObj.getLongLowValue());
@@ -292,20 +292,20 @@ public class StatObjectConverter {
     if (statsObj.getStatsData().isSetBooleanStats()) {
       BooleanColumnStatsData boolStats = statsObj.getStatsData().getBooleanStats();
       mColStats.setBooleanStats(
-          boolStats.isSetNumTrues() ? boolStats.getNumTrues() : null, 
+          boolStats.isSetNumTrues() ? boolStats.getNumTrues() : null,
           boolStats.isSetNumFalses() ? boolStats.getNumFalses() : null,
           boolStats.isSetNumNulls() ? boolStats.getNumNulls() : null);
     } else if (statsObj.getStatsData().isSetLongStats()) {
       LongColumnStatsData longStats = statsObj.getStatsData().getLongStats();
       mColStats.setLongStats(
-          longStats.isSetNumNulls() ? longStats.getNumNulls() : null, 
+          longStats.isSetNumNulls() ? longStats.getNumNulls() : null,
           longStats.isSetNumDVs() ? longStats.getNumDVs() : null,
           longStats.isSetLowValue() ? longStats.getLowValue() : null,
           longStats.isSetHighValue() ? longStats.getHighValue() : null);
     } else if (statsObj.getStatsData().isSetDoubleStats()) {
       DoubleColumnStatsData doubleStats = statsObj.getStatsData().getDoubleStats();
       mColStats.setDoubleStats(
-          doubleStats.isSetNumNulls() ? doubleStats.getNumNulls() : null, 
+          doubleStats.isSetNumNulls() ? doubleStats.getNumNulls() : null,
           doubleStats.isSetNumDVs() ? doubleStats.getNumDVs() : null,
           doubleStats.isSetLowValue() ? doubleStats.getLowValue() : null,
           doubleStats.isSetHighValue() ? doubleStats.getHighValue() : null);
@@ -314,20 +314,20 @@ public class StatObjectConverter {
       String low = decimalStats.isSetLowValue() ? createJdoDecimalString(decimalStats.getLowValue()) : null;
       String high = decimalStats.isSetHighValue() ? createJdoDecimalString(decimalStats.getHighValue()) : null;
       mColStats.setDecimalStats(
-          decimalStats.isSetNumNulls() ? decimalStats.getNumNulls() : null, 
-          decimalStats.isSetNumDVs() ? decimalStats.getNumDVs() : null, 
+          decimalStats.isSetNumNulls() ? decimalStats.getNumNulls() : null,
+          decimalStats.isSetNumDVs() ? decimalStats.getNumDVs() : null,
               low, high);
     } else if (statsObj.getStatsData().isSetStringStats()) {
       StringColumnStatsData stringStats = statsObj.getStatsData().getStringStats();
       mColStats.setStringStats(
-          stringStats.isSetNumNulls() ? stringStats.getNumNulls() : null, 
+          stringStats.isSetNumNulls() ? stringStats.getNumNulls() : null,
           stringStats.isSetNumDVs() ? stringStats.getNumDVs() : null,
-          stringStats.isSetMaxColLen() ? stringStats.getMaxColLen() : null, 
+          stringStats.isSetMaxColLen() ? stringStats.getMaxColLen() : null,
           stringStats.isSetAvgColLen() ? stringStats.getAvgColLen() : null);
     } else if (statsObj.getStatsData().isSetBinaryStats()) {
       BinaryColumnStatsData binaryStats = statsObj.getStatsData().getBinaryStats();
       mColStats.setBinaryStats(
-          binaryStats.isSetNumNulls() ? binaryStats.getNumNulls() : null, 
+          binaryStats.isSetNumNulls() ? binaryStats.getNumNulls() : null,
           binaryStats.isSetMaxColLen() ? binaryStats.getMaxColLen() : null,
           binaryStats.isSetAvgColLen() ? binaryStats.getAvgColLen() : null);
     }

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java Mon Sep  8 04:38:17 2014
@@ -507,8 +507,18 @@ public class Warehouse {
    */
   public FileStatus[] getFileStatusesForSD(StorageDescriptor desc)
       throws MetaException {
+    return getFileStatusesForLocation(desc.getLocation());
+  }
+
+  /**
+   * @param location
+   * @return array of FileStatus objects corresponding to the files
+   * making up the passed storage description
+   */
+  public FileStatus[] getFileStatusesForLocation(String location)
+      throws MetaException {
     try {
-      Path path = new Path(desc.getLocation());
+      Path path = new Path(location);
       FileSystem fileSys = path.getFileSystem(conf);
       return HiveStatsUtils.getFileStatusRecurse(path, -1, fileSys);
     } catch (IOException ioe) {

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddPartitionEvent.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddPartitionEvent.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddPartitionEvent.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddPartitionEvent.java Mon Sep  8 04:38:17 2014
@@ -21,19 +21,23 @@ package org.apache.hadoop.hive.metastore
 import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 
 import java.util.Arrays;
+import java.util.Iterator;
 import java.util.List;
 
 public class AddPartitionEvent extends ListenerEvent {
 
   private final Table table;
   private final List<Partition> partitions;
+  private PartitionSpecProxy partitionSpecProxy;
 
   public AddPartitionEvent(Table table, List<Partition> partitions, boolean status, HMSHandler handler) {
     super(status, handler);
     this.table = table;
     this.partitions = partitions;
+    this.partitionSpecProxy = null;
   }
 
   public AddPartitionEvent(Table table, Partition partition, boolean status, HMSHandler handler) {
@@ -41,6 +45,16 @@ public class AddPartitionEvent extends L
   }
 
   /**
+   * Alternative constructor to use PartitionSpec APIs.
+   */
+  public AddPartitionEvent(Table table, PartitionSpecProxy partitionSpec, boolean status, HMSHandler handler) {
+    super(status, handler);
+    this.table = table;
+    this.partitions = null;
+    this.partitionSpecProxy = partitionSpec;
+  }
+
+  /**
    * @return The table.
    */
   public Table getTable() {
@@ -54,4 +68,11 @@ public class AddPartitionEvent extends L
     return partitions;
   }
 
+  /**
+   * @return Iterator for partitions.
+   */
+  public Iterator<Partition> getPartitionIterator() {
+    return partitionSpecProxy == null ? null : partitionSpecProxy.getPartitionIterator();
+  }
+
 }

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreAddPartitionEvent.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreAddPartitionEvent.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreAddPartitionEvent.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreAddPartitionEvent.java Mon Sep  8 04:38:17 2014
@@ -21,19 +21,23 @@ package org.apache.hadoop.hive.metastore
 import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 
 import java.util.Arrays;
+import java.util.Iterator;
 import java.util.List;
 
 public class PreAddPartitionEvent extends PreEventContext {
 
   private final Table table;
   private final List<Partition> partitions;
+  private PartitionSpecProxy partitionSpecProxy;
 
   public PreAddPartitionEvent (Table table, List<Partition> partitions, HMSHandler handler) {
     super(PreEventType.ADD_PARTITION, handler);
     this.table = table;
     this.partitions = partitions;
+    this.partitionSpecProxy = null;
   }
 
   public PreAddPartitionEvent(Table table, Partition partition, HMSHandler handler) {
@@ -41,6 +45,14 @@ public class PreAddPartitionEvent extend
   }
 
   /**
+   * Alternative constructor, using
+   */
+  public PreAddPartitionEvent(Table table, PartitionSpecProxy partitionSpecProxy, HMSHandler handler) {
+    this(table, (List<Partition>)null, handler);
+    this.partitionSpecProxy = partitionSpecProxy;
+  }
+
+  /**
    * @return the partitions
    */
   public List<Partition> getPartitions() {
@@ -53,4 +65,11 @@ public class PreAddPartitionEvent extend
   public Table getTable() {
     return table ;
   }
+
+  /**
+   * @return Iterator over partition-list.
+   */
+  public Iterator<Partition> getPartitionIterator() {
+    return partitionSpecProxy == null ? null : partitionSpecProxy.getPartitionIterator();
+  }
 }

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java Mon Sep  8 04:38:17 2014
@@ -34,9 +34,17 @@ public class CompactionInfo {
   private String fullPartitionName = null;
   private String fullTableName = null;
 
+  public CompactionInfo(String dbname, String tableName, String partName, CompactionType type) {
+    this.dbname = dbname;
+    this.tableName = tableName;
+    this.partName = partName;
+    this.type = type;
+  }
+  CompactionInfo() {}
+  
   public String getFullPartitionName() {
     if (fullPartitionName == null) {
-      StringBuffer buf = new StringBuffer(dbname);
+      StringBuilder buf = new StringBuilder(dbname);
       buf.append('.');
       buf.append(tableName);
       if (partName != null) {
@@ -50,11 +58,14 @@ public class CompactionInfo {
 
   public String getFullTableName() {
     if (fullTableName == null) {
-      StringBuffer buf = new StringBuffer(dbname);
+      StringBuilder buf = new StringBuilder(dbname);
       buf.append('.');
       buf.append(tableName);
       fullTableName = buf.toString();
     }
     return fullTableName;
   }
+  public boolean isMajorCompaction() {
+    return CompactionType.MAJOR == type;
+  }
 }

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java Mon Sep  8 04:38:17 2014
@@ -535,6 +535,46 @@ public class CompactionTxnHandler extend
       deadlockCnt = 0;
     }
   }
+
+  /**
+   * Queries metastore DB directly to find columns in the table which have statistics information.
+   * If {@code ci} includes partition info then per partition stats info is examined, otherwise
+   * table level stats are examined.
+   * @throws MetaException
+   */
+  public List<String> findColumnsWithStats(CompactionInfo ci) throws MetaException {
+    Connection dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+    Statement stmt = null;
+    ResultSet rs = null;
+    try {
+      stmt = dbConn.createStatement();
+      String s = "SELECT COLUMN_NAME FROM " + (ci.partName == null ? "TAB_COL_STATS" : "PART_COL_STATS")
+         + " WHERE DB_NAME='" + ci.dbname + "' AND TABLE_NAME='" + ci.tableName + "'"
+        + (ci.partName == null ? "" : " AND PARTITION_NAME='" + ci.partName + "'");
+      LOG.debug("Going to execute <" + s + ">");
+      rs = stmt.executeQuery(s);
+      List<String> columns = new ArrayList<String>();
+      while(rs.next()) {
+        columns.add(rs.getString(1));
+      }
+      LOG.debug("Found columns to update stats: " + columns + " on " + ci.tableName +
+        (ci.partName == null ? "" : "/" + ci.partName));
+      dbConn.commit();
+      return columns;
+    } catch (SQLException e) {
+      try {
+        LOG.error("Failed to find columns to analyze stats on for " + ci.tableName +
+            (ci.partName == null ? "" : "/" + ci.partName), e);
+        dbConn.rollback();
+      } catch (SQLException e1) {
+        //nothing we can do here
+      }
+      throw new MetaException("Unable to connect to transaction database " +
+        StringUtils.stringifyException(e));
+    } finally {
+      close(rs, stmt, dbConn);
+    }
+  }
 }
 
 

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java Mon Sep  8 04:38:17 2014
@@ -40,6 +40,7 @@ import javax.sql.DataSource;
 import java.io.IOException;
 import java.sql.*;
 import java.util.*;
+import java.util.concurrent.TimeUnit;
 
 /**
  * A handler to answer transaction related calls that come into the metastore
@@ -119,7 +120,7 @@ public class TxnHandler {
       throw new RuntimeException(e);
     }
 
-    timeout = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT) * 1000;
+    timeout = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS);
     deadlockCnt = 0;
     buildJumpTable();
   }
@@ -859,6 +860,29 @@ public class TxnHandler {
   }
 
   /**
+   * Close the ResultSet.
+   * @param rs may be {@code null}
+   */
+  void close(ResultSet rs) {
+    try {
+      if (rs != null && !rs.isClosed()) {
+        rs.close();
+      }
+    }
+    catch(SQLException ex) {
+      LOG.warn("Failed to close statement " + ex.getMessage());
+    }
+  }
+
+  /**
+   * Close all 3 JDBC artifacts in order: {@code rs stmt dbConn}
+   */
+  void close(ResultSet rs, Statement stmt, Connection dbConn) {
+    close(rs);
+    closeStmt(stmt);
+    closeDbConn(dbConn);
+  }
+  /**
    * Determine if an exception was a deadlock.  Unfortunately there is no standard way to do
    * this, so we have to inspect the error messages and catch the telltale signs for each
    * different database.

Modified: hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java (original)
+++ hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java Mon Sep  8 04:38:17 2014
@@ -43,6 +43,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
@@ -55,6 +56,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.model.MRoleMap;
 import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
 import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.thrift.TException;
 
 /**
@@ -676,6 +678,11 @@ public class DummyRawStoreControlledComm
   }
 
   @Override
+  public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException {
+    return false;
+  }
+
+  @Override
   public void dropPartitions(String dbName, String tblName, List<String> partNames)
       throws MetaException, NoSuchObjectException {
     objectStore.dropPartitions(dbName, tblName, partNames);
@@ -719,5 +726,4 @@ public class DummyRawStoreControlledComm
     return null;
   }
 
-
 }

Modified: hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java (original)
+++ hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java Mon Sep  8 04:38:17 2014
@@ -44,6 +44,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
@@ -56,6 +57,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.model.MRoleMap;
 import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
 import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.thrift.TException;
 
 /**
@@ -698,6 +700,11 @@ public class DummyRawStoreForJdoConnecti
   }
 
   @Override
+  public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException {
+    return false;
+  }
+
+  @Override
   public void dropPartitions(String dbName, String tblName, List<String> partNames) {
   }
 
@@ -735,6 +742,7 @@ public class DummyRawStoreForJdoConnecti
       throws MetaException {
     return null;
   }
+  
 }
 
 

Modified: hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java (original)
+++ hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java Mon Sep  8 04:38:17 2014
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hive.metastore.txn;
 
-import junit.framework.Assert;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -26,11 +25,11 @@ import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.junit.After;
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 import static junit.framework.Assert.*;
 
@@ -868,7 +867,7 @@ public class TestTxnHandler {
 
   @Test
   public void testHeartbeatLock() throws Exception {
-    conf.setIntVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 1);
+    conf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 1, TimeUnit.SECONDS);
     HeartbeatRequest h = new HeartbeatRequest();
     LockComponent comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
     comp.setTablename("mytable");

Modified: hive/branches/spark/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/pom.xml?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/pom.xml (original)
+++ hive/branches/spark/pom.xml Mon Sep  8 04:38:17 2014
@@ -60,6 +60,7 @@
     <maven.repo.local>${settings.localRepository}</maven.repo.local>
     <hive.path.to.root>.</hive.path.to.root>
     <hive.jdbc.driver.classifier>standalone</hive.jdbc.driver.classifier>
+    <checkstyle.conf.dir>${hive.path.to.root}/checkstyle</checkstyle.conf.dir>
 
     <!-- Test Properties -->
     <test.extra.path></test.extra.path>
@@ -75,6 +76,7 @@
     <datanucleus.maven.plugin.version>3.3.0-release</datanucleus.maven.plugin.version>
     <maven.antrun.plugin.version>1.7</maven.antrun.plugin.version>
     <maven.assembly.plugin.version>2.3</maven.assembly.plugin.version>
+    <maven.checkstyle.plugin.version>2.12.1</maven.checkstyle.plugin.version>
     <maven.compiler.plugin.version>3.1</maven.compiler.plugin.version>
     <maven.enforcer.plugin.version>1.3.1</maven.enforcer.plugin.version>
     <maven.install.plugin.version>2.4</maven.install.plugin.version>
@@ -148,8 +150,8 @@
     <stax.version>1.0.1</stax.version>
     <slf4j.version>1.7.5</slf4j.version>
     <ST4.version>4.0.4</ST4.version>
+    <tez.version>0.5.0</tez.version>
     <super-csv.version>2.2.0</super-csv.version>
-    <tez.version>0.4.1-incubating</tez.version>
     <spark.version>1.1.0-SNAPSHOT</spark.version>
     <scala.binary.version>2.10</scala.binary.version>
     <scala.version>2.10.4</scala.version>
@@ -629,6 +631,11 @@
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
+          <artifactId>maven-checkstyle-plugin</artifactId>
+          <version>${maven.checkstyle.plugin.version}</version>
+        </plugin>
+        <plugin>
+          <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-enforcer-plugin</artifactId>
           <version>${maven.enforcer.plugin.version}</version>
         </plugin>
@@ -758,6 +765,13 @@
       </plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+        <configuration>
+          <configLocation>${checkstyle.conf.dir}/checkstyle.xml</configLocation>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-surefire-plugin</artifactId>
         <configuration>
           <excludes>

Modified: hive/branches/spark/ql/if/queryplan.thrift
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/if/queryplan.thrift?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/if/queryplan.thrift (original)
+++ hive/branches/spark/ql/if/queryplan.thrift Mon Sep  8 04:38:17 2014
@@ -56,6 +56,7 @@ enum OperatorType {
   PTF,
   MUX,
   DEMUX,
+  EVENT,
 }
 
 struct Operator {

Modified: hive/branches/spark/ql/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/pom.xml?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/pom.xml (original)
+++ hive/branches/spark/ql/pom.xml Mon Sep  8 04:38:17 2014
@@ -297,6 +297,38 @@
     </dependency>
     <dependency>
       <groupId>org.apache.tez</groupId>
+      <artifactId>tez-runtime-internals</artifactId>
+      <version>${tez.version}</version>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-common</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs</artifactId>
+        </exclusion>
+       <exclusion>
+         <groupId>org.apache.hadoop</groupId>
+         <artifactId>hadoop-yarn-client</artifactId>
+       </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.tez</groupId>
       <artifactId>tez-mapreduce</artifactId>
       <version>${tez.version}</version>
       <optional>true</optional>

Modified: hive/branches/spark/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp (original)
+++ hive/branches/spark/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp Mon Sep  8 04:38:17 2014
@@ -51,7 +51,8 @@ int _kOperatorTypeValues[] = {
   OperatorType::HASHTABLEDUMMY,
   OperatorType::PTF,
   OperatorType::MUX,
-  OperatorType::DEMUX
+  OperatorType::DEMUX,
+  OperatorType::EVENT
 };
 const char* _kOperatorTypeNames[] = {
   "JOIN",
@@ -74,9 +75,10 @@ const char* _kOperatorTypeNames[] = {
   "HASHTABLEDUMMY",
   "PTF",
   "MUX",
-  "DEMUX"
+  "DEMUX",
+  "EVENT"
 };
-const std::map<int, const char*> _OperatorType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(21, _kOperatorTypeValues, _kOperatorTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+const std::map<int, const char*> _OperatorType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(22, _kOperatorTypeValues, _kOperatorTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
 
 int _kTaskTypeValues[] = {
   TaskType::MAP,

Modified: hive/branches/spark/ql/src/gen/thrift/gen-cpp/queryplan_types.h
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/thrift/gen-cpp/queryplan_types.h?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/thrift/gen-cpp/queryplan_types.h (original)
+++ hive/branches/spark/ql/src/gen/thrift/gen-cpp/queryplan_types.h Mon Sep  8 04:38:17 2014
@@ -56,7 +56,8 @@ struct OperatorType {
     HASHTABLEDUMMY = 17,
     PTF = 18,
     MUX = 19,
-    DEMUX = 20
+    DEMUX = 20,
+    EVENT = 21
   };
 };
 

Modified: hive/branches/spark/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java (original)
+++ hive/branches/spark/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java Mon Sep  8 04:38:17 2014
@@ -32,7 +32,8 @@ public enum OperatorType implements org.
   HASHTABLEDUMMY(17),
   PTF(18),
   MUX(19),
-  DEMUX(20);
+  DEMUX(20),
+  EVENT(21);
 
   private final int value;
 
@@ -95,6 +96,8 @@ public enum OperatorType implements org.
         return MUX;
       case 20:
         return DEMUX;
+      case 21:
+        return EVENT;
       default:
         return null;
     }

Modified: hive/branches/spark/ql/src/gen/thrift/gen-php/Types.php
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/thrift/gen-php/Types.php?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/thrift/gen-php/Types.php (original)
+++ hive/branches/spark/ql/src/gen/thrift/gen-php/Types.php Mon Sep  8 04:38:17 2014
@@ -56,6 +56,7 @@ final class OperatorType {
   const PTF = 18;
   const MUX = 19;
   const DEMUX = 20;
+  const EVENT = 21;
   static public $__names = array(
     0 => 'JOIN',
     1 => 'MAPJOIN',
@@ -78,6 +79,7 @@ final class OperatorType {
     18 => 'PTF',
     19 => 'MUX',
     20 => 'DEMUX',
+    21 => 'EVENT',
   );
 }
 

Modified: hive/branches/spark/ql/src/gen/thrift/gen-py/queryplan/ttypes.py
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/thrift/gen-py/queryplan/ttypes.py?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/thrift/gen-py/queryplan/ttypes.py (original)
+++ hive/branches/spark/ql/src/gen/thrift/gen-py/queryplan/ttypes.py Mon Sep  8 04:38:17 2014
@@ -66,6 +66,7 @@ class OperatorType:
   PTF = 18
   MUX = 19
   DEMUX = 20
+  EVENT = 21
 
   _VALUES_TO_NAMES = {
     0: "JOIN",
@@ -89,6 +90,7 @@ class OperatorType:
     18: "PTF",
     19: "MUX",
     20: "DEMUX",
+    21: "EVENT",
   }
 
   _NAMES_TO_VALUES = {
@@ -113,6 +115,7 @@ class OperatorType:
     "PTF": 18,
     "MUX": 19,
     "DEMUX": 20,
+    "EVENT": 21,
   }
 
 class TaskType:

Modified: hive/branches/spark/ql/src/gen/thrift/gen-rb/queryplan_types.rb
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/thrift/gen-rb/queryplan_types.rb?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/thrift/gen-rb/queryplan_types.rb (original)
+++ hive/branches/spark/ql/src/gen/thrift/gen-rb/queryplan_types.rb Mon Sep  8 04:38:17 2014
@@ -42,8 +42,9 @@ module OperatorType
   PTF = 18
   MUX = 19
   DEMUX = 20
-  VALUE_MAP = {0 => "JOIN", 1 => "MAPJOIN", 2 => "EXTRACT", 3 => "FILTER", 4 => "FORWARD", 5 => "GROUPBY", 6 => "LIMIT", 7 => "SCRIPT", 8 => "SELECT", 9 => "TABLESCAN", 10 => "FILESINK", 11 => "REDUCESINK", 12 => "UNION", 13 => "UDTF", 14 => "LATERALVIEWJOIN", 15 => "LATERALVIEWFORWARD", 16 => "HASHTABLESINK", 17 => "HASHTABLEDUMMY", 18 => "PTF", 19 => "MUX", 20 => "DEMUX"}
-  VALID_VALUES = Set.new([JOIN, MAPJOIN, EXTRACT, FILTER, FORWARD, GROUPBY, LIMIT, SCRIPT, SELECT, TABLESCAN, FILESINK, REDUCESINK, UNION, UDTF, LATERALVIEWJOIN, LATERALVIEWFORWARD, HASHTABLESINK, HASHTABLEDUMMY, PTF, MUX, DEMUX]).freeze
+  EVENT = 21
+  VALUE_MAP = {0 => "JOIN", 1 => "MAPJOIN", 2 => "EXTRACT", 3 => "FILTER", 4 => "FORWARD", 5 => "GROUPBY", 6 => "LIMIT", 7 => "SCRIPT", 8 => "SELECT", 9 => "TABLESCAN", 10 => "FILESINK", 11 => "REDUCESINK", 12 => "UNION", 13 => "UDTF", 14 => "LATERALVIEWJOIN", 15 => "LATERALVIEWFORWARD", 16 => "HASHTABLESINK", 17 => "HASHTABLEDUMMY", 18 => "PTF", 19 => "MUX", 20 => "DEMUX", 21 => "EVENT"}
+  VALID_VALUES = Set.new([JOIN, MAPJOIN, EXTRACT, FILTER, FORWARD, GROUPBY, LIMIT, SCRIPT, SELECT, TABLESCAN, FILESINK, REDUCESINK, UNION, UDTF, LATERALVIEWJOIN, LATERALVIEWFORWARD, HASHTABLESINK, HASHTABLEDUMMY, PTF, MUX, DEMUX, EVENT]).freeze
 end
 
 module TaskType

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareColumn.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareColumn.txt?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareColumn.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareColumn.txt Mon Sep  8 04:38:17 2014
@@ -163,8 +163,8 @@ public class <ClassName> extends VectorE
             VectorExpressionDescriptor.Mode.PROJECTION)
         .setNumArguments(2)
         .setArgumentTypes(
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType1>"),
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType2>"))
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType1>"),
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType2>"))
         .setInputExpressionTypes(
             VectorExpressionDescriptor.InputExpressionType.COLUMN,
             VectorExpressionDescriptor.InputExpressionType.COLUMN).build();

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareScalar.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareScalar.txt?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareScalar.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareScalar.txt Mon Sep  8 04:38:17 2014
@@ -155,8 +155,8 @@ public class <ClassName> extends VectorE
             VectorExpressionDescriptor.Mode.PROJECTION)
         .setNumArguments(2)
         .setArgumentTypes(
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType1>"),
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType2>"))
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType1>"),
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType2>"))
         .setInputExpressionTypes(
             VectorExpressionDescriptor.InputExpressionType.COLUMN,
             VectorExpressionDescriptor.InputExpressionType.SCALAR).build();

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnUnaryFunc.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnUnaryFunc.txt?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnUnaryFunc.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ColumnUnaryFunc.txt Mon Sep  8 04:38:17 2014
@@ -128,7 +128,7 @@ public class <ClassName> extends VectorE
             VectorExpressionDescriptor.Mode.PROJECTION)
         .setNumArguments(1)
         .setArgumentTypes(
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType>"))
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType>"))
         .setInputExpressionTypes(
             VectorExpressionDescriptor.InputExpressionType.COLUMN).build();
   }

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt Mon Sep  8 04:38:17 2014
@@ -188,8 +188,8 @@ public class <ClassName> extends VectorE
             VectorExpressionDescriptor.Mode.FILTER)
         .setNumArguments(2)
         .setArgumentTypes(
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType1>"),
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType2>"))
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType1>"),
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType2>"))
         .setInputExpressionTypes(
             VectorExpressionDescriptor.InputExpressionType.COLUMN,
             VectorExpressionDescriptor.InputExpressionType.COLUMN).build();

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt Mon Sep  8 04:38:17 2014
@@ -164,8 +164,8 @@ public class <ClassName> extends VectorE
             VectorExpressionDescriptor.Mode.FILTER)
         .setNumArguments(2)
         .setArgumentTypes(
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType1>"),
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType2>"))
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType1>"),
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType2>"))
         .setInputExpressionTypes(
             VectorExpressionDescriptor.InputExpressionType.COLUMN,
             VectorExpressionDescriptor.InputExpressionType.SCALAR).build();

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt Mon Sep  8 04:38:17 2014
@@ -164,8 +164,8 @@ public class <ClassName> extends VectorE
             VectorExpressionDescriptor.Mode.FILTER)
         .setNumArguments(2)
         .setArgumentTypes(
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType1>"),
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType2>"))
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType1>"),
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType2>"))
         .setInputExpressionTypes(
             VectorExpressionDescriptor.InputExpressionType.SCALAR,
             VectorExpressionDescriptor.InputExpressionType.COLUMN).build();

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterStringColumnBetween.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterStringColumnBetween.txt?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterStringColumnBetween.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/FilterStringColumnBetween.txt Mon Sep  8 04:38:17 2014
@@ -184,9 +184,9 @@ public class <ClassName> extends VectorE
             VectorExpressionDescriptor.Mode.FILTER)
         .setNumArguments(3)
         .setArgumentTypes(
-            VectorExpressionDescriptor.ArgumentType.getType("string"),
-            VectorExpressionDescriptor.ArgumentType.getType("string"),
-            VectorExpressionDescriptor.ArgumentType.getType("string"))
+            VectorExpressionDescriptor.ArgumentType.STRING,
+            VectorExpressionDescriptor.ArgumentType.STRING,
+            VectorExpressionDescriptor.ArgumentType.STRING)
         .setInputExpressionTypes(
             VectorExpressionDescriptor.InputExpressionType.COLUMN,
             VectorExpressionDescriptor.InputExpressionType.SCALAR,

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/IfExprColumnColumn.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/IfExprColumnColumn.txt?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/IfExprColumnColumn.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/IfExprColumnColumn.txt Mon Sep  8 04:38:17 2014
@@ -176,8 +176,8 @@ public class <ClassName> extends VectorE
         .setNumArguments(3)
         .setArgumentTypes(
             VectorExpressionDescriptor.ArgumentType.getType("long"),
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType>"),
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType>"))
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType>"),
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType>"))
         .setInputExpressionTypes(
             VectorExpressionDescriptor.InputExpressionType.COLUMN,
             VectorExpressionDescriptor.InputExpressionType.COLUMN,

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/IfExprColumnScalar.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/IfExprColumnScalar.txt?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/IfExprColumnScalar.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/IfExprColumnScalar.txt Mon Sep  8 04:38:17 2014
@@ -167,8 +167,8 @@ public class <ClassName> extends VectorE
         .setNumArguments(3)
         .setArgumentTypes(
             VectorExpressionDescriptor.ArgumentType.getType("long"),
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType2>"),
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType3>"))
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType2>"),
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType3>"))
         .setInputExpressionTypes(
             VectorExpressionDescriptor.InputExpressionType.COLUMN,
             VectorExpressionDescriptor.InputExpressionType.COLUMN,

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarColumn.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarColumn.txt?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarColumn.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarColumn.txt Mon Sep  8 04:38:17 2014
@@ -169,8 +169,8 @@ public class <ClassName> extends VectorE
         .setNumArguments(3)
         .setArgumentTypes(
             VectorExpressionDescriptor.ArgumentType.getType("long"),
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType2>"),
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType3>"))
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType2>"),
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType3>"))
         .setInputExpressionTypes(
             VectorExpressionDescriptor.InputExpressionType.COLUMN,
             VectorExpressionDescriptor.InputExpressionType.SCALAR,

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarScalar.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarScalar.txt?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarScalar.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarScalar.txt Mon Sep  8 04:38:17 2014
@@ -154,8 +154,8 @@ public class <ClassName> extends VectorE
         .setNumArguments(3)
         .setArgumentTypes(
             VectorExpressionDescriptor.ArgumentType.getType("long"),
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType2>"),
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType3>"))
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType2>"),
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType3>"))
         .setInputExpressionTypes(
             VectorExpressionDescriptor.InputExpressionType.COLUMN,
             VectorExpressionDescriptor.InputExpressionType.SCALAR,

Modified: hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ScalarCompareColumn.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ScalarCompareColumn.txt?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ScalarCompareColumn.txt (original)
+++ hive/branches/spark/ql/src/gen/vectorization/ExpressionTemplates/ScalarCompareColumn.txt Mon Sep  8 04:38:17 2014
@@ -155,8 +155,8 @@ public class <ClassName> extends VectorE
             VectorExpressionDescriptor.Mode.PROJECTION)
         .setNumArguments(2)
         .setArgumentTypes(
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType1>"),
-            VectorExpressionDescriptor.ArgumentType.getType("<OperandType2>"))
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType1>"),
+            VectorExpressionDescriptor.ArgumentType.getType("<VectorExprArgType2>"))
         .setInputExpressionTypes(
             VectorExpressionDescriptor.InputExpressionType.SCALAR,
             VectorExpressionDescriptor.InputExpressionType.COLUMN).build();

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Context.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Context.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Context.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Context.java Mon Sep  8 04:38:17 2014
@@ -18,6 +18,18 @@
 
 package org.apache.hadoop.hive.ql;
 
+import java.io.DataInput;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.ConcurrentHashMap;
+
 import org.antlr.runtime.TokenRewriteStream;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -30,7 +42,6 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.TaskRunner;
-import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager;
@@ -41,18 +52,6 @@ import org.apache.hadoop.hive.ql.session
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.util.StringUtils;
 
-import java.io.DataInput;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URI;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.concurrent.ConcurrentHashMap;
-
 /**
  * Context for Semantic Analyzers. Usage: not reusable - construct a new one for
  * each query should call clear() at end of use to remove temporary folders
@@ -191,7 +190,7 @@ public class Context {
    * @param scratchDir path of tmp directory
    */
   private Path getScratchDir(String scheme, String authority,
-                               boolean mkdir, String scratchDir) {
+      boolean mkdir, String scratchDir) {
 
     String fileSystem =  scheme + ":" + authority;
     Path dir = fsScratchDirs.get(fileSystem + "-" + TaskRunner.getTaskRunnerID());
@@ -203,11 +202,11 @@ public class Context {
         try {
           FileSystem fs = dirPath.getFileSystem(conf);
           dirPath = new Path(fs.makeQualified(dirPath).toString());
-          FsPermission fsPermission = new FsPermission(Short.parseShort(scratchDirPermission.trim(), 8));
+          FsPermission fsPermission = new FsPermission(scratchDirPermission);
 
-          if (!Utilities.createDirsWithPermission(conf, dirPath, fsPermission)) {
+          if (!fs.mkdirs(dirPath, fsPermission)) {
             throw new RuntimeException("Cannot make directory: "
-                                       + dirPath.toString());
+                + dirPath.toString());
           }
           if (isHDFSCleanup) {
             fs.deleteOnExit(dirPath);
@@ -233,7 +232,7 @@ public class Context {
       FileSystem fs = FileSystem.getLocal(conf);
       URI uri = fs.getUri();
       return getScratchDir(uri.getScheme(), uri.getAuthority(),
-                           mkdir, localScratchDir);
+          mkdir, localScratchDir);
     } catch (IOException e) {
       throw new RuntimeException (e);
     }
@@ -257,7 +256,7 @@ public class Context {
       URI uri = dir.toUri();
 
       Path newScratchDir = getScratchDir(uri.getScheme(), uri.getAuthority(),
-                           !explain, uri.getPath());
+          !explain, uri.getPath());
       LOG.info("New scratch dir is " + newScratchDir);
       return newScratchDir;
     } catch (IOException e) {
@@ -270,7 +269,7 @@ public class Context {
 
   private Path getExternalScratchDir(URI extURI) {
     return getScratchDir(extURI.getScheme(), extURI.getAuthority(),
-                         !explain, nonLocalScratchPath.toUri().getPath());
+        !explain, nonLocalScratchPath.toUri().getPath());
   }
 
   /**
@@ -283,7 +282,7 @@ public class Context {
         p.getFileSystem(conf).delete(p, true);
       } catch (Exception e) {
         LOG.warn("Error Removing Scratch: "
-                 + StringUtils.stringifyException(e));
+            + StringUtils.stringifyException(e));
       }
     }
     fsScratchDirs.clear();
@@ -305,7 +304,7 @@ public class Context {
    */
   public boolean isMRTmpFileURI(String uriStr) {
     return (uriStr.indexOf(executionId) != -1) &&
-      (uriStr.indexOf(MR_PREFIX) != -1);
+        (uriStr.indexOf(MR_PREFIX) != -1);
   }
 
   /**
@@ -315,7 +314,7 @@ public class Context {
    */
   public Path getMRTmpPath() {
     return new Path(getMRScratchDir(), MR_PREFIX +
-      nextPathId());
+        nextPathId());
   }
 
   /**
@@ -343,7 +342,7 @@ public class Context {
       return getExtTmpPathRelTo(path.getParent());
     }
     return new Path(getExternalScratchDir(extURI), EXT_PREFIX +
-      nextPathId());
+        nextPathId());
   }
 
   /**
@@ -353,8 +352,8 @@ public class Context {
    */
   public Path getExtTmpPathRelTo(Path path) {
     URI uri = path.toUri();
-    return new Path (getScratchDir(uri.getScheme(), uri.getAuthority(), !explain, 
-    uri.getPath() + Path.SEPARATOR + "_" + this.executionId), EXT_PREFIX + nextPathId());
+    return new Path (getScratchDir(uri.getScheme(), uri.getAuthority(), !explain,
+        uri.getPath() + Path.SEPARATOR + "_" + this.executionId), EXT_PREFIX + nextPathId());
   }
 
   /**

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Driver.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Driver.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Driver.java Mon Sep  8 04:38:17 2014
@@ -135,7 +135,6 @@ public class Driver implements CommandPr
   private String errorMessage;
   private String SQLState;
   private Throwable downstreamError;
-  private HiveTxnManager txnMgr;
 
   // A limit on the number of threads that can be launched
   private int maxthreads;
@@ -145,16 +144,6 @@ public class Driver implements CommandPr
 
   private String userName;
 
-  private void createTxnManager() throws SemanticException {
-    if (txnMgr == null) {
-      try {
-        txnMgr = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
-      } catch (LockException e) {
-        throw new SemanticException(e.getMessage(), e);
-      }
-    }
-  }
-
   private boolean checkConcurrency() throws SemanticException {
     boolean supportConcurrency = conf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY);
     if (!supportConcurrency) {
@@ -868,7 +857,7 @@ public class Driver implements CommandPr
   // the input format.
   private int recordValidTxns() {
     try {
-      ValidTxnList txns = txnMgr.getValidTxns();
+      ValidTxnList txns = SessionState.get().getTxnMgr().getValidTxns();
       conf.set(ValidTxnList.VALID_TXNS_KEY, txns.toString());
       return 0;
     } catch (LockException e) {
@@ -893,7 +882,7 @@ public class Driver implements CommandPr
 
 
     try {
-      txnMgr.acquireLocks(plan, ctx, userName);
+      SessionState.get().getTxnMgr().acquireLocks(plan, ctx, userName);
       return 0;
     } catch (LockException e) {
       errorMessage = "FAILED: Error in acquiring locks: " + e.getMessage();
@@ -917,7 +906,7 @@ public class Driver implements CommandPr
     perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.RELEASE_LOCKS);
 
     if (hiveLocks != null) {
-      ctx.getHiveTxnManager().getLockManager().releaseLocks(hiveLocks);
+      SessionState.get().getTxnMgr().getLockManager().releaseLocks(hiveLocks);
     }
     ctx.setHiveLocks(null);
 
@@ -1048,9 +1037,14 @@ public class Driver implements CommandPr
 
     boolean requireLock = false;
     boolean ckLock = false;
+    SessionState ss = SessionState.get();
     try {
       ckLock = checkConcurrency();
-      createTxnManager();
+      try {
+        ss.initTxnMgr(conf);
+      } catch (LockException e) {
+        throw new SemanticException(e.getMessage(), e);
+      }
     } catch (SemanticException e) {
       errorMessage = "FAILED: Error in semantic analysis: " + e.getMessage();
       SQLState = ErrorMsg.findSQLState(e.getMessage());
@@ -1074,7 +1068,7 @@ public class Driver implements CommandPr
     // the reason that we set the txn manager for the cxt here is because each
     // query has its own ctx object. The txn mgr is shared across the
     // same instance of Driver, which can run multiple queries.
-    ctx.setHiveTxnManager(txnMgr);
+    ctx.setHiveTxnManager(ss.getTxnMgr());
 
     if (ckLock) {
       boolean lockOnlyMapred = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_LOCK_MAPRED_ONLY);
@@ -1671,9 +1665,6 @@ public class Driver implements CommandPr
             e.getMessage());
       }
     }
-    if (txnMgr != null) {
-      txnMgr.closeTxnManager();
-    }
   }
 
   public org.apache.hadoop.hive.ql.plan.api.Query getQueryPlan() throws IOException {

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/AutoProgressor.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/AutoProgressor.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/AutoProgressor.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/AutoProgressor.java Mon Sep  8 04:38:17 2014
@@ -44,7 +44,7 @@ public class AutoProgressor {
   // Name of the class to report for
   String logClassName = null;
   int notificationInterval;
-  int timeout;
+  long timeout;
   Reporter reporter;
 
   class ReporterTask extends TimerTask {
@@ -116,7 +116,7 @@ public class AutoProgressor {
    * @param timeout - when the autoprogressor should stop reporting (in ms)
    */
   AutoProgressor(String logClassName, Reporter reporter,
-      int notificationInterval, int timeout) {
+      int notificationInterval, long timeout) {
     this.logClassName = logClassName;
     this.reporter = reporter;
     this.notificationInterval = notificationInterval;

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Mon Sep  8 04:38:17 2014
@@ -507,21 +507,19 @@ public class DDLTask extends Task<DDLWor
       throw new HiveException("invalid configuration name " + showConf.getConfName());
     }
     String description = conf.getDescription();
-    String defaltValue = conf.getDefaultValue();
+    String defaultValue = conf.getDefaultValue();
     DataOutputStream output = getOutputStream(showConf.getResFile());
     try {
+      if (defaultValue != null) {
+        output.write(defaultValue.getBytes());
+      }
+      output.write(separator);
+      output.write(conf.typeString().getBytes());
+      output.write(separator);
       if (description != null) {
-        if (defaltValue != null) {
-          output.write(defaltValue.getBytes());
-        }
-        output.write(separator);
-        output.write(conf.typeString().getBytes());
-        output.write(separator);
-        if (description != null) {
-          output.write(description.replaceAll(" *\n *", " ").getBytes());
-        }
-        output.write(terminator);
+        output.write(description.replaceAll(" *\n *", " ").getBytes());
       }
+      output.write(terminator);
     } finally {
       output.close();
     }
@@ -4019,6 +4017,9 @@ public class DDLTask extends Task<DDLWor
       tbl.setDbName(names[0]);
       tbl.setTableName(names[1]);
 
+      // using old table object, hence reset the owner to current user for new table.
+      tbl.setOwner(SessionState.getUserFromAuthenticator());
+
       if (crtTbl.getLocation() != null) {
         tbl.setDataLocation(new Path(crtTbl.getLocation()));
       } else {

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DefaultFetchFormatter.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DefaultFetchFormatter.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DefaultFetchFormatter.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DefaultFetchFormatter.java Mon Sep  8 04:38:17 2014
@@ -53,7 +53,7 @@ public class DefaultFetchFormatter<T> im
   private SerDe initializeSerde(Configuration conf, Properties props) throws Exception {
     String serdeName = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEFETCHOUTPUTSERDE);
     Class<? extends SerDe> serdeClass = Class.forName(serdeName, true,
-        JavaUtils.getClassLoader()).asSubclass(SerDe.class);
+        Utilities.getSessionSpecifiedClassLoader()).asSubclass(SerDe.class);
     // cast only needed for Hadoop 0.17 compatibility
     SerDe serde = ReflectionUtils.newInstance(serdeClass, null);