You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/10/06 05:44:26 UTC

svn commit: r1629562 [3/38] - in /hive/branches/spark: ./ accumulo-handler/ beeline/ beeline/src/java/org/apache/hive/beeline/ bin/ext/ common/ common/src/java/org/apache/hadoop/hive/conf/ common/src/test/org/apache/hadoop/hive/common/type/ contrib/src...

Modified: hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java (original)
+++ hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java Mon Oct  6 03:44:13 2014
@@ -23,14 +23,10 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.SQLFeatureNotSupportedException;
 import java.sql.SQLWarning;
-import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 import java.util.concurrent.locks.ReentrantLock;
 
-import org.apache.hive.service.cli.RowSet;
-import org.apache.hive.service.cli.RowSetFactory;
 import org.apache.hive.service.cli.thrift.TCLIService;
 import org.apache.hive.service.cli.thrift.TCancelOperationReq;
 import org.apache.hive.service.cli.thrift.TCancelOperationResp;
@@ -42,9 +38,6 @@ import org.apache.hive.service.cli.thrif
 import org.apache.hive.service.cli.thrift.TGetOperationStatusResp;
 import org.apache.hive.service.cli.thrift.TOperationHandle;
 import org.apache.hive.service.cli.thrift.TSessionHandle;
-import org.apache.hive.service.cli.thrift.TFetchResultsReq;
-import org.apache.hive.service.cli.thrift.TFetchResultsResp;
-import org.apache.hive.service.cli.thrift.TFetchOrientation;
 
 /**
  * HiveStatement.
@@ -84,27 +77,6 @@ public class HiveStatement implements ja
    */
   private boolean isClosed = false;
 
-  /**
-   * Keep state so we can fail certain calls made after cancel().
-   */
-  private boolean isCancelled = false;
-
-  /**
-   * Keep this state so we can know whether the query in this statement is closed.
-   */
-  private boolean isQueryClosed = false;
-
-  /**
-   * Keep this state so we can know whether the query logs are being generated in HS2.
-   */
-  private boolean isLogBeingGenerated = true;
-
-  /**
-   * Keep this state so we can know whether the statement is submitted to HS2 and start execution
-   * successfully.
-   */
-  private boolean isExecuteStatementFailed = false;
-
   // A fair reentrant lock
   private ReentrantLock transportLock = new ReentrantLock(true);
 
@@ -141,9 +113,6 @@ public class HiveStatement implements ja
   @Override
   public void cancel() throws SQLException {
     checkConnection("cancel");
-    if (isCancelled) {
-      return;
-    }
 
     transportLock.lock();
     try {
@@ -159,7 +128,6 @@ public class HiveStatement implements ja
     } finally {
       transportLock.unlock();
     }
-    isCancelled = true;
   }
 
   /*
@@ -199,8 +167,6 @@ public class HiveStatement implements ja
     } finally {
       transportLock.unlock();
     }
-    isQueryClosed = true;
-    isExecuteStatementFailed = false;
     stmtHandle = null;
   }
 
@@ -236,7 +202,6 @@ public class HiveStatement implements ja
     checkConnection("execute");
 
     closeClientOperation();
-    initFlags();
 
     TExecuteStatementReq execReq = new TExecuteStatementReq(sessHandle, sql);
     /**
@@ -253,12 +218,9 @@ public class HiveStatement implements ja
       TExecuteStatementResp execResp = client.ExecuteStatement(execReq);
       Utils.verifySuccessWithInfo(execResp.getStatus());
       stmtHandle = execResp.getOperationHandle();
-      isExecuteStatementFailed = false;
     } catch (SQLException eS) {
-      isExecuteStatementFailed = true;
       throw eS;
     } catch (Exception ex) {
-      isExecuteStatementFailed = true;
       throw new SQLException(ex.toString(), "08S01", ex);
     } finally {
       transportLock.unlock();
@@ -304,14 +266,11 @@ public class HiveStatement implements ja
           }
         }
       } catch (SQLException e) {
-        isLogBeingGenerated = false;
         throw e;
       } catch (Exception e) {
-        isLogBeingGenerated = false;
         throw new SQLException(e.toString(), "08S01", e);
       }
     }
-    isLogBeingGenerated = false;
 
     // The query should be completed by now
     if (!stmtHandle.isHasResultSet()) {
@@ -319,7 +278,7 @@ public class HiveStatement implements ja
     }
     resultSet =  new HiveQueryResultSet.Builder(this).setClient(client).setSessionHandle(sessHandle)
         .setStmtHandle(stmtHandle).setMaxRows(maxRows).setFetchSize(fetchSize)
-        .setScrollable(isScrollableResultset).setTransportLock(transportLock)
+        .setScrollable(isScrollableResultset)
         .build();
     return true;
   }
@@ -330,13 +289,6 @@ public class HiveStatement implements ja
     }
   }
 
-  private void initFlags() {
-    isCancelled = false;
-    isQueryClosed = false;
-    isLogBeingGenerated = true;
-    isExecuteStatementFailed = false;
-  }
-
   /*
    * (non-Javadoc)
    *
@@ -761,93 +713,4 @@ public class HiveStatement implements ja
     throw new SQLException("Cannot unwrap to " + iface);
   }
 
-  /**
-   * Check whether query execution might be producing more logs to be fetched.
-   * This method is a public API for usage outside of Hive, although it is not part of the
-   * interface java.sql.Statement.
-   * @return true if query execution might be producing more logs. It does not indicate if last
-   *         log lines have been fetched by getQueryLog.
-   */
-  public boolean hasMoreLogs() {
-    return isLogBeingGenerated;
-  }
-
-  /**
-   * Get the execution logs of the given SQL statement.
-   * This method is a public API for usage outside of Hive, although it is not part of the
-   * interface java.sql.Statement.
-   * This method gets the incremental logs during SQL execution, and uses fetchSize holden by
-   * HiveStatement object.
-   * @return a list of logs. It can be empty if there are no new logs to be retrieved at that time.
-   * @throws SQLException
-   * @throws ClosedOrCancelledStatementException if statement has been cancelled or closed
-   */
-  public List<String> getQueryLog() throws SQLException, ClosedOrCancelledStatementException {
-    return getQueryLog(true, fetchSize);
-  }
-
-  /**
-   * Get the execution logs of the given SQL statement.
-   * This method is a public API for usage outside of Hive, although it is not part of the
-   * interface java.sql.Statement.
-   * @param incremental indicate getting logs either incrementally or from the beginning,
-   *                    when it is true or false.
-   * @param fetchSize the number of lines to fetch
-   * @return a list of logs. It can be empty if there are no new logs to be retrieved at that time.
-   * @throws SQLException
-   * @throws ClosedOrCancelledStatementException if statement has been cancelled or closed
-   */
-  public List<String> getQueryLog(boolean incremental, int fetchSize)
-      throws SQLException, ClosedOrCancelledStatementException {
-    checkConnection("getQueryLog");
-    if (isCancelled) {
-      throw new ClosedOrCancelledStatementException("Method getQueryLog() failed. The " +
-          "statement has been closed or cancelled.");
-    }
-
-    List<String> logs = new ArrayList<String>();
-    TFetchResultsResp tFetchResultsResp = null;
-    transportLock.lock();
-    try {
-      if (stmtHandle != null) {
-        TFetchResultsReq tFetchResultsReq = new TFetchResultsReq(stmtHandle,
-            getFetchOrientation(incremental), fetchSize);
-        tFetchResultsReq.setFetchType((short)1);
-        tFetchResultsResp = client.FetchResults(tFetchResultsReq);
-        Utils.verifySuccessWithInfo(tFetchResultsResp.getStatus());
-      } else {
-        if (isQueryClosed) {
-          throw new ClosedOrCancelledStatementException("Method getQueryLog() failed. The " +
-              "statement has been closed or cancelled.");
-        }
-        if (isExecuteStatementFailed) {
-          throw new SQLException("Method getQueryLog() failed. Because the stmtHandle in " +
-              "HiveStatement is null and the statement execution might fail.");
-        } else {
-          return logs;
-        }
-      }
-    } catch (SQLException e) {
-      throw e;
-    } catch (Exception e) {
-      throw new SQLException("Error when getting query log: " + e, e);
-    } finally {
-      transportLock.unlock();
-    }
-
-    RowSet rowSet = RowSetFactory.create(tFetchResultsResp.getResults(),
-        connection.getProtocol());
-    for (Object[] row : rowSet) {
-      logs.add((String)row[0]);
-    }
-    return logs;
-  }
-
-  private TFetchOrientation getFetchOrientation(boolean incremental) {
-    if (incremental) {
-      return TFetchOrientation.FETCH_NEXT;
-    } else {
-      return TFetchOrientation.FETCH_FIRST;
-    }
-  }
 }

Modified: hive/branches/spark/metastore/scripts/upgrade/mssql/hive-schema-0.14.0.mssql.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/mssql/hive-schema-0.14.0.mssql.sql?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/mssql/hive-schema-0.14.0.mssql.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/mssql/hive-schema-0.14.0.mssql.sql Mon Oct  6 03:44:13 2014
@@ -836,14 +836,14 @@ CREATE INDEX TABLE_PARAMS_N49 ON TABLE_P
 -- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
 -- -----------------------------------------------------------------------------------------------------------------------------------------------
 CREATE TABLE COMPACTION_QUEUE(
-	CQ_ID bigint NOT NULL,
+	CQ_ID int NOT NULL,
 	CQ_DATABASE varchar(128) NOT NULL,
 	CQ_TABLE varchar(128) NOT NULL,
 	CQ_PARTITION varchar(767) NULL,
 	CQ_STATE char(1) NOT NULL,
 	CQ_TYPE char(1) NOT NULL,
 	CQ_WORKER_ID varchar(128) NULL,
-	CQ_START bigint NULL,
+	CQ_START int NULL,
 	CQ_RUN_AS varchar(128) NULL,
 PRIMARY KEY CLUSTERED 
 (
@@ -852,23 +852,23 @@ PRIMARY KEY CLUSTERED 
 );
 
 CREATE TABLE COMPLETED_TXN_COMPONENTS(
-	CTC_TXNID bigint NULL,
+	CTC_TXNID int NULL,
 	CTC_DATABASE varchar(128) NOT NULL,
 	CTC_TABLE varchar(128) NULL,
 	CTC_PARTITION varchar(767) NULL
 );
 
 CREATE TABLE HIVE_LOCKS(
-	HL_LOCK_EXT_ID bigint NOT NULL,
-	HL_LOCK_INT_ID bigint NOT NULL,
-	HL_TXNID bigint NULL,
+	HL_LOCK_EXT_ID int NOT NULL,
+	HL_LOCK_INT_ID int NOT NULL,
+	HL_TXNID int NULL,
 	HL_DB varchar(128) NOT NULL,
 	HL_TABLE varchar(128) NULL,
 	HL_PARTITION varchar(767) NULL,
 	HL_LOCK_STATE char(1) NOT NULL,
 	HL_LOCK_TYPE char(1) NOT NULL,
-	HL_LAST_HEARTBEAT bigint NOT NULL,
-	HL_ACQUIRED_AT bigint NULL,
+	HL_LAST_HEARTBEAT int NOT NULL,
+	HL_ACQUIRED_AT int NULL,
 	HL_USER varchar(128) NOT NULL,
 	HL_HOST varchar(128) NOT NULL,
 PRIMARY KEY CLUSTERED 
@@ -879,28 +879,28 @@ PRIMARY KEY CLUSTERED 
 );
 
 CREATE TABLE NEXT_COMPACTION_QUEUE_ID(
-	NCQ_NEXT bigint NOT NULL
+	NCQ_NEXT int NOT NULL
 );
 
 INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
 
 CREATE TABLE NEXT_LOCK_ID(
-	NL_NEXT bigint NOT NULL
+	NL_NEXT int NOT NULL
 );
 
 INSERT INTO NEXT_LOCK_ID VALUES(1);
 
 CREATE TABLE NEXT_TXN_ID(
-	NTXN_NEXT bigint NOT NULL
+	NTXN_NEXT int NOT NULL
 );
 
 INSERT INTO NEXT_TXN_ID VALUES(1);
 
 CREATE TABLE TXNS(
-	TXN_ID bigint NOT NULL,
+	TXN_ID int NOT NULL,
 	TXN_STATE char(1) NOT NULL,
-	TXN_STARTED bigint NOT NULL,
-	TXN_LAST_HEARTBEAT bigint NOT NULL,
+	TXN_STARTED int NOT NULL,
+	TXN_LAST_HEARTBEAT int NOT NULL,
 	TXN_USER varchar(128) NOT NULL,
 	TXN_HOST varchar(128) NOT NULL,
 PRIMARY KEY CLUSTERED 
@@ -910,7 +910,7 @@ PRIMARY KEY CLUSTERED 
 );
 
 CREATE TABLE TXN_COMPONENTS(
-	TC_TXNID bigint NULL,
+	TC_TXNID int NULL,
 	TC_DATABASE varchar(128) NOT NULL,
 	TC_TABLE varchar(128) NULL,
 	TC_PARTITION varchar(767) NULL

Modified: hive/branches/spark/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql Mon Oct  6 03:44:13 2014
@@ -1,7 +1,6 @@
 SELECT 'Upgrading MetaStore schema from 0.13.0 to 0.14.0' AS MESSAGE;
 
 :r 002-HIVE-7784.mssql.sql;
-:r 003-HIVE-8239.mssql.sql;
 
 UPDATE VERSION SET SCHEMA_VERSION='0.14.0', VERSION_COMMENT='Hive release version 0.14.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 0.13.0 to 0.14.0' AS MESSAGE;

Modified: hive/branches/spark/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql Mon Oct  6 03:44:13 2014
@@ -1,6 +1,5 @@
 SELECT 'Upgrading MetaStore schema from 0.13.0 to 0.14.0' AS Status from dual;
 
-@019-HIVE-7118.oracle.sql;
 @020-HIVE-7784.oracle.sql;
 
 UPDATE VERSION SET SCHEMA_VERSION='0.14.0', VERSION_COMMENT='Hive release version 0.14.0' where VER_ID=1;

Modified: hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java (original)
+++ hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java Mon Oct  6 03:44:13 2014
@@ -135,9 +135,9 @@ public class FieldSchema implements org.
     String comment)
   {
     this();
-    this.name = name;
-    this.type = type;
-    this.comment = comment;
+    this.name = org.apache.hive.common.util.HiveStringUtils.intern(name);
+    this.type = org.apache.hive.common.util.HiveStringUtils.intern(type);
+    this.comment = org.apache.hive.common.util.HiveStringUtils.intern(comment);
   }
 
   /**
@@ -145,13 +145,13 @@ public class FieldSchema implements org.
    */
   public FieldSchema(FieldSchema other) {
     if (other.isSetName()) {
-      this.name = other.name;
+      this.name = org.apache.hive.common.util.HiveStringUtils.intern(other.name);
     }
     if (other.isSetType()) {
-      this.type = other.type;
+      this.type = org.apache.hive.common.util.HiveStringUtils.intern(other.type);
     }
     if (other.isSetComment()) {
-      this.comment = other.comment;
+      this.comment = org.apache.hive.common.util.HiveStringUtils.intern(other.comment);
     }
   }
 
@@ -171,7 +171,7 @@ public class FieldSchema implements org.
   }
 
   public void setName(String name) {
-    this.name = name;
+    this.name = org.apache.hive.common.util.HiveStringUtils.intern(name);
   }
 
   public void unsetName() {
@@ -194,7 +194,7 @@ public class FieldSchema implements org.
   }
 
   public void setType(String type) {
-    this.type = type;
+    this.type = org.apache.hive.common.util.HiveStringUtils.intern(type);
   }
 
   public void unsetType() {
@@ -217,7 +217,7 @@ public class FieldSchema implements org.
   }
 
   public void setComment(String comment) {
-    this.comment = comment;
+    this.comment = org.apache.hive.common.util.HiveStringUtils.intern(comment);
   }
 
   public void unsetComment() {

Modified: hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java (original)
+++ hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java Mon Oct  6 03:44:13 2014
@@ -182,14 +182,14 @@ public class Partition implements org.ap
   {
     this();
     this.values = values;
-    this.dbName = dbName;
-    this.tableName = tableName;
+    this.dbName = org.apache.hive.common.util.HiveStringUtils.intern(dbName);
+    this.tableName = org.apache.hive.common.util.HiveStringUtils.intern(tableName);
     this.createTime = createTime;
     setCreateTimeIsSet(true);
     this.lastAccessTime = lastAccessTime;
     setLastAccessTimeIsSet(true);
     this.sd = sd;
-    this.parameters = parameters;
+    this.parameters = org.apache.hive.common.util.HiveStringUtils.intern(parameters);
   }
 
   /**
@@ -205,10 +205,10 @@ public class Partition implements org.ap
       this.values = __this__values;
     }
     if (other.isSetDbName()) {
-      this.dbName = other.dbName;
+      this.dbName = org.apache.hive.common.util.HiveStringUtils.intern(other.dbName);
     }
     if (other.isSetTableName()) {
-      this.tableName = other.tableName;
+      this.tableName = org.apache.hive.common.util.HiveStringUtils.intern(other.tableName);
     }
     this.createTime = other.createTime;
     this.lastAccessTime = other.lastAccessTime;
@@ -222,9 +222,9 @@ public class Partition implements org.ap
         String other_element_key = other_element.getKey();
         String other_element_value = other_element.getValue();
 
-        String __this__parameters_copy_key = other_element_key;
+        String __this__parameters_copy_key = org.apache.hive.common.util.HiveStringUtils.intern(other_element_key);
 
-        String __this__parameters_copy_value = other_element_value;
+        String __this__parameters_copy_value = org.apache.hive.common.util.HiveStringUtils.intern(other_element_value);
 
         __this__parameters.put(__this__parameters_copy_key, __this__parameters_copy_value);
       }
@@ -296,7 +296,7 @@ public class Partition implements org.ap
   }
 
   public void setDbName(String dbName) {
-    this.dbName = dbName;
+    this.dbName = org.apache.hive.common.util.HiveStringUtils.intern(dbName);
   }
 
   public void unsetDbName() {
@@ -319,7 +319,7 @@ public class Partition implements org.ap
   }
 
   public void setTableName(String tableName) {
-    this.tableName = tableName;
+    this.tableName = org.apache.hive.common.util.HiveStringUtils.intern(tableName);
   }
 
   public void unsetTableName() {
@@ -420,7 +420,7 @@ public class Partition implements org.ap
   }
 
   public void setParameters(Map<String,String> parameters) {
-    this.parameters = parameters;
+    this.parameters = org.apache.hive.common.util.HiveStringUtils.intern(parameters);
   }
 
   public void unsetParameters() {

Modified: hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java (original)
+++ hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java Mon Oct  6 03:44:13 2014
@@ -137,9 +137,9 @@ public class SerDeInfo implements org.ap
     Map<String,String> parameters)
   {
     this();
-    this.name = name;
-    this.serializationLib = serializationLib;
-    this.parameters = parameters;
+    this.name = org.apache.hive.common.util.HiveStringUtils.intern(name);
+    this.serializationLib = org.apache.hive.common.util.HiveStringUtils.intern(serializationLib);
+    this.parameters = org.apache.hive.common.util.HiveStringUtils.intern(parameters);
   }
 
   /**
@@ -147,10 +147,10 @@ public class SerDeInfo implements org.ap
    */
   public SerDeInfo(SerDeInfo other) {
     if (other.isSetName()) {
-      this.name = other.name;
+      this.name = org.apache.hive.common.util.HiveStringUtils.intern(other.name);
     }
     if (other.isSetSerializationLib()) {
-      this.serializationLib = other.serializationLib;
+      this.serializationLib = org.apache.hive.common.util.HiveStringUtils.intern(other.serializationLib);
     }
     if (other.isSetParameters()) {
       Map<String,String> __this__parameters = new HashMap<String,String>();
@@ -159,9 +159,9 @@ public class SerDeInfo implements org.ap
         String other_element_key = other_element.getKey();
         String other_element_value = other_element.getValue();
 
-        String __this__parameters_copy_key = other_element_key;
+        String __this__parameters_copy_key = org.apache.hive.common.util.HiveStringUtils.intern(other_element_key);
 
-        String __this__parameters_copy_value = other_element_value;
+        String __this__parameters_copy_value = org.apache.hive.common.util.HiveStringUtils.intern(other_element_value);
 
         __this__parameters.put(__this__parameters_copy_key, __this__parameters_copy_value);
       }
@@ -185,7 +185,7 @@ public class SerDeInfo implements org.ap
   }
 
   public void setName(String name) {
-    this.name = name;
+    this.name = org.apache.hive.common.util.HiveStringUtils.intern(name);
   }
 
   public void unsetName() {
@@ -208,7 +208,7 @@ public class SerDeInfo implements org.ap
   }
 
   public void setSerializationLib(String serializationLib) {
-    this.serializationLib = serializationLib;
+    this.serializationLib = org.apache.hive.common.util.HiveStringUtils.intern(serializationLib);
   }
 
   public void unsetSerializationLib() {
@@ -242,7 +242,7 @@ public class SerDeInfo implements org.ap
   }
 
   public void setParameters(Map<String,String> parameters) {
-    this.parameters = parameters;
+    this.parameters = org.apache.hive.common.util.HiveStringUtils.intern(parameters);
   }
 
   public void unsetParameters() {

Modified: hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java (original)
+++ hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java Mon Oct  6 03:44:13 2014
@@ -216,17 +216,17 @@ public class StorageDescriptor implement
   {
     this();
     this.cols = cols;
-    this.location = location;
-    this.inputFormat = inputFormat;
-    this.outputFormat = outputFormat;
+    this.location = org.apache.hive.common.util.HiveStringUtils.intern(location);
+    this.inputFormat = org.apache.hive.common.util.HiveStringUtils.intern(inputFormat);
+    this.outputFormat = org.apache.hive.common.util.HiveStringUtils.intern(outputFormat);
     this.compressed = compressed;
     setCompressedIsSet(true);
     this.numBuckets = numBuckets;
     setNumBucketsIsSet(true);
     this.serdeInfo = serdeInfo;
-    this.bucketCols = bucketCols;
+    this.bucketCols = org.apache.hive.common.util.HiveStringUtils.intern(bucketCols);
     this.sortCols = sortCols;
-    this.parameters = parameters;
+    this.parameters = org.apache.hive.common.util.HiveStringUtils.intern(parameters);
   }
 
   /**
@@ -242,13 +242,13 @@ public class StorageDescriptor implement
       this.cols = __this__cols;
     }
     if (other.isSetLocation()) {
-      this.location = other.location;
+      this.location = org.apache.hive.common.util.HiveStringUtils.intern(other.location);
     }
     if (other.isSetInputFormat()) {
-      this.inputFormat = other.inputFormat;
+      this.inputFormat = org.apache.hive.common.util.HiveStringUtils.intern(other.inputFormat);
     }
     if (other.isSetOutputFormat()) {
-      this.outputFormat = other.outputFormat;
+      this.outputFormat = org.apache.hive.common.util.HiveStringUtils.intern(other.outputFormat);
     }
     this.compressed = other.compressed;
     this.numBuckets = other.numBuckets;
@@ -276,9 +276,9 @@ public class StorageDescriptor implement
         String other_element_key = other_element.getKey();
         String other_element_value = other_element.getValue();
 
-        String __this__parameters_copy_key = other_element_key;
+        String __this__parameters_copy_key = org.apache.hive.common.util.HiveStringUtils.intern(other_element_key);
 
-        String __this__parameters_copy_value = other_element_value;
+        String __this__parameters_copy_value = org.apache.hive.common.util.HiveStringUtils.intern(other_element_value);
 
         __this__parameters.put(__this__parameters_copy_key, __this__parameters_copy_value);
       }
@@ -356,7 +356,7 @@ public class StorageDescriptor implement
   }
 
   public void setLocation(String location) {
-    this.location = location;
+    this.location = org.apache.hive.common.util.HiveStringUtils.intern(location);
   }
 
   public void unsetLocation() {
@@ -379,7 +379,7 @@ public class StorageDescriptor implement
   }
 
   public void setInputFormat(String inputFormat) {
-    this.inputFormat = inputFormat;
+    this.inputFormat = org.apache.hive.common.util.HiveStringUtils.intern(inputFormat);
   }
 
   public void unsetInputFormat() {
@@ -402,7 +402,7 @@ public class StorageDescriptor implement
   }
 
   public void setOutputFormat(String outputFormat) {
-    this.outputFormat = outputFormat;
+    this.outputFormat = org.apache.hive.common.util.HiveStringUtils.intern(outputFormat);
   }
 
   public void unsetOutputFormat() {
@@ -507,7 +507,7 @@ public class StorageDescriptor implement
   }
 
   public void setBucketCols(List<String> bucketCols) {
-    this.bucketCols = bucketCols;
+    this.bucketCols = org.apache.hive.common.util.HiveStringUtils.intern(bucketCols);
   }
 
   public void unsetBucketCols() {
@@ -579,7 +579,7 @@ public class StorageDescriptor implement
   }
 
   public void setParameters(Map<String,String> parameters) {
-    this.parameters = parameters;
+    this.parameters = org.apache.hive.common.util.HiveStringUtils.intern(parameters);
   }
 
   public void unsetParameters() {

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Mon Oct  6 03:44:13 2014
@@ -1424,14 +1424,10 @@ public class HiveMetaStore extends Thrif
         if (!success) {
           ms.rollbackTransaction();
         } else if (deleteData && !isExternal) {
-          boolean ifPurge = false;
-          if (envContext != null){
-            ifPurge = Boolean.parseBoolean(envContext.getProperties().get("ifPurge"));
-          }
           // Delete the data in the partitions which have other locations
-          deletePartitionData(partPaths, ifPurge);
+          deletePartitionData(partPaths);
           // Delete the data in the table
-          deleteTableData(tblPath, ifPurge);
+          deleteTableData(tblPath);
           // ok even if the data is not deleted
         }
         for (MetaStoreEventListener listener : listeners) {
@@ -1448,21 +1444,9 @@ public class HiveMetaStore extends Thrif
      * @param tablePath
      */
     private void deleteTableData(Path tablePath) {
-      deleteTableData(tablePath, false);
-    }
-
-    /**
-     * Deletes the data in a table's location, if it fails logs an error
-     *
-     * @param tablePath
-     * @param ifPurge completely purge the table (skipping trash) while removing
-     *                data from warehouse
-     */
-    private void deleteTableData(Path tablePath, boolean ifPurge) {
-
       if (tablePath != null) {
         try {
-          wh.deleteDir(tablePath, true, ifPurge);
+          wh.deleteDir(tablePath, true);
         } catch (Exception e) {
           LOG.error("Failed to delete table directory: " + tablePath +
               " " + e.getMessage());
@@ -1477,22 +1461,10 @@ public class HiveMetaStore extends Thrif
      * @param partPaths
      */
     private void deletePartitionData(List<Path> partPaths) {
-      deletePartitionData(partPaths, false);
-    }
-
-    /**
-    * Give a list of partitions' locations, tries to delete each one
-    * and for each that fails logs an error.
-    *
-    * @param partPaths
-    * @param ifPurge completely purge the partition (skipping trash) while
-    *                removing data from warehouse
-    */
-    private void deletePartitionData(List<Path> partPaths, boolean ifPurge) {
       if (partPaths != null && !partPaths.isEmpty()) {
         for (Path partPath : partPaths) {
           try {
-            wh.deleteDir(partPath, true, ifPurge);
+            wh.deleteDir(partPath, true);
           } catch (Exception e) {
             LOG.error("Failed to delete partition directory: " + partPath +
                 " " + e.getMessage());

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Mon Oct  6 03:44:13 2014
@@ -763,34 +763,18 @@ public class HiveMetaStoreClient impleme
   }
 
   /**
-   * {@inheritDoc}
-   * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+   * @param name
+   * @param dbname
+   * @throws NoSuchObjectException
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String,
+   *      java.lang.String, boolean)
    */
   @Override
-  public void dropTable(String dbname, String name, boolean deleteData,
-      boolean ignoreUnknownTab) throws MetaException, TException,
-      NoSuchObjectException, UnsupportedOperationException {
-    dropTable(dbname, name, deleteData, ignoreUnknownTab, null);
-  }
-
-  /**
-   * Drop the table and choose whether to save the data in the trash.
-   * @param ifPurge completely purge the table (skipping trash) while removing
-   *                data from warehouse
-   * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
-   */
-  public void dropTable(String dbname, String name, boolean deleteData,
-      boolean ignoreUnknownTab, boolean ifPurge)
-      throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException {
-    //build new environmentContext with ifPurge;
-    EnvironmentContext envContext = null;
-    if(ifPurge){
-      Map<String, String> warehouseOptions = null;
-      warehouseOptions = new HashMap<String, String>();
-      warehouseOptions.put("ifPurge", "TRUE");
-      envContext = new EnvironmentContext(warehouseOptions);
-    }
-    dropTable(dbname, name, deleteData, ignoreUnknownTab, envContext);
+  public void dropTable(String dbname, String name)
+      throws NoSuchObjectException, MetaException, TException {
+    dropTable(dbname, name, true, true, null);
   }
 
   /** {@inheritDoc} */
@@ -802,37 +786,23 @@ public class HiveMetaStoreClient impleme
   }
 
   /**
-   * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
-   */
-  @Override
-  public void dropTable(String dbname, String name)
-      throws NoSuchObjectException, MetaException, TException {
-    dropTable(dbname, name, true, true, null);
-  }
-
-  /**
-   * Drop the table and choose whether to: delete the underlying table data;
-   * throw if the table doesn't exist; save the data in the trash.
-   *
    * @param dbname
    * @param name
    * @param deleteData
    *          delete the underlying data or just delete the table in metadata
-   * @param ignoreUnknownTab
-   *          don't throw if the requested table doesn't exist
-   * @param envContext
-   *          for communicating with thrift
-   * @throws MetaException
-   *           could not drop table properly
    * @throws NoSuchObjectException
-   *           the table wasn't found
+   * @throws MetaException
    * @throws TException
-   *           a thrift communication error occurred
-   * @throws UnsupportedOperationException
-   *           dropping an index table is not allowed
    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String,
    *      java.lang.String, boolean)
    */
+  @Override
+  public void dropTable(String dbname, String name, boolean deleteData,
+      boolean ignoreUnknownTab) throws MetaException, TException,
+      NoSuchObjectException, UnsupportedOperationException {
+    dropTable(dbname, name, deleteData, ignoreUnknownTab, null);
+  }
+
   public void dropTable(String dbname, String name, boolean deleteData,
       boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException,
       NoSuchObjectException, UnsupportedOperationException {
@@ -1689,12 +1659,7 @@ public class HiveMetaStoreClient impleme
 
   @Override
   public ValidTxnList getValidTxns() throws TException {
-    return TxnHandler.createValidTxnList(client.get_open_txns(), 0);
-  }
-
-  @Override
-  public ValidTxnList getValidTxns(long currentTxn) throws TException {
-    return TxnHandler.createValidTxnList(client.get_open_txns(), currentTxn);
+    return TxnHandler.createValidTxnList(client.get_open_txns());
   }
 
   @Override

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java Mon Oct  6 03:44:13 2014
@@ -37,14 +37,12 @@ public class HiveMetaStoreFsImpl impleme
 
   @Override
   public boolean deleteDir(FileSystem fs, Path f, boolean recursive,
-      boolean ifPurge, Configuration conf) throws MetaException {
+      Configuration conf) throws MetaException {
     LOG.info("deleting  " + f);
     HadoopShims hadoopShim = ShimLoader.getHadoopShims();
 
     try {
-      if (ifPurge) {
-        LOG.info("Not moving "+ f +" to trash");
-      } else if (hadoopShim.moveToAppropriateTrash(fs, f, conf)) {
+      if (hadoopShim.moveToAppropriateTrash(fs, f, conf)) {
         LOG.info("Moved to trash: " + f);
         return true;
       }

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Mon Oct  6 03:44:13 2014
@@ -192,10 +192,6 @@ public interface IMetaStoreClient {
    *          The database for this table
    * @param tableName
    *          The table to drop
-   * @param deleteData
-   *          Should we delete the underlying data
-   * @param ignoreUnknownTab
-   *          don't throw if the requested table doesn't exist
    * @throws MetaException
    *           Could not drop table properly.
    * @throws NoSuchObjectException
@@ -204,16 +200,7 @@ public interface IMetaStoreClient {
    *           A thrift communication error occurred
    */
   void dropTable(String dbname, String tableName, boolean deleteData,
-      boolean ignoreUnknownTab) throws MetaException, TException,
-      NoSuchObjectException;
-
-  /**
-   * @param ifPurge
-   *          completely purge the table (skipping trash) while removing data from warehouse
-   * @see #dropTable(String, String, boolean, boolean)
-   */
-  public void dropTable(String dbname, String tableName, boolean deleteData,
-      boolean ignoreUnknownTab, boolean ifPurge) throws MetaException, TException,
+      boolean ignoreUknownTab) throws MetaException, TException,
       NoSuchObjectException;
 
   /**
@@ -239,9 +226,6 @@ public interface IMetaStoreClient {
   void dropTable(String tableName, boolean deleteData)
       throws MetaException, UnknownTableException, TException, NoSuchObjectException;
 
-  /**
-   * @see #dropTable(String, String, boolean, boolean)
-   */
   void dropTable(String dbname, String tableName)
       throws MetaException, TException, NoSuchObjectException;
 
@@ -1086,15 +1070,6 @@ public interface IMetaStoreClient {
   ValidTxnList getValidTxns() throws TException;
 
   /**
-   * Get a structure that details valid transactions.
-   * @param currentTxn The current transaction of the caller.  This will be removed from the
-   *                   exceptions list so that the caller sees records from his own transaction.
-   * @return list of valid transactions
-   * @throws TException
-   */
-  ValidTxnList getValidTxns(long currentTxn) throws TException;
-
-  /**
    * Initiate a transaction.
    * @param user User who is opening this transaction.  This is the Hive user,
    *             not necessarily the OS user.  It is assumed that this user has already been

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java Mon Oct  6 03:44:13 2014
@@ -32,12 +32,11 @@ public interface MetaStoreFS {
    * delete a directory
    *
    * @param f
-   * @param ifPurge
    * @param recursive
    * @return true on success
    * @throws MetaException
    */
   public boolean deleteDir(FileSystem fs, Path f, boolean recursive,
-      boolean ifPurge, Configuration conf) throws MetaException;
+      Configuration conf) throws MetaException;
 
 }

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java Mon Oct  6 03:44:13 2014
@@ -57,6 +57,6 @@ public class TSetIpAddressProcessor<I ex
   }
 
   protected void setIpAddress(final Socket inSocket) {
-    HMSHandler.setIpAddress(inSocket.getInetAddress().getHostAddress());
+    HMSHandler.setIpAddress(inSocket.getInetAddress().toString());
   }
 }

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java Mon Oct  6 03:44:13 2014
@@ -224,12 +224,8 @@ public class Warehouse {
   }
 
   public boolean deleteDir(Path f, boolean recursive) throws MetaException {
-    return deleteDir(f, recursive, false);
-  }
-
-  public boolean deleteDir(Path f, boolean recursive, boolean ifPurge) throws MetaException {
     FileSystem fs = getFs(f);
-    return fsHandler.deleteDir(fs, f, recursive, ifPurge, conf);
+    return fsHandler.deleteDir(fs, f, recursive, conf);
   }
 
   public boolean isEmpty(Path path) throws IOException, MetaException {

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java Mon Oct  6 03:44:13 2014
@@ -233,22 +233,12 @@ public class TxnHandler {
     }
   }
 
-  /**
-   * Transform a {@link org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse} to a
-   * {@link org.apache.hadoop.hive.common.ValidTxnList}.
-   * @param txns txn list from the metastore
-   * @param currentTxn Current transaction that the user has open.  If this is greater than 0 it
-   *                   will be removed from the exceptions list so that the user sees his own
-   *                   transaction as valid.
-   * @return a valid txn list.
-   */
-  public static ValidTxnList createValidTxnList(GetOpenTxnsResponse txns, long currentTxn) {
+  public static ValidTxnList createValidTxnList(GetOpenTxnsResponse txns) {
     long highWater = txns.getTxn_high_water_mark();
     Set<Long> open = txns.getOpen_txns();
-    long[] exceptions = new long[open.size() - (currentTxn > 0 ? 1 : 0)];
+    long[] exceptions = new long[open.size()];
     int i = 0;
     for(long txn: open) {
-      if (currentTxn > 0 && currentTxn == txn) continue;
       exceptions[i++] = txn;
     }
     return new ValidTxnListImpl(exceptions, highWater);

Modified: hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/IpAddressListener.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/IpAddressListener.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/IpAddressListener.java (original)
+++ hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/IpAddressListener.java Mon Oct  6 03:44:13 2014
@@ -47,10 +47,15 @@ public class IpAddressListener extends M
     super(config);
   }
 
+  private String getIpFromInetAddress(String addr) {
+    return addr.substring(addr.indexOf('/') + 1);
+  }
+
   private void checkIpAddress() {
     try {
-      String localhostIp = InetAddress.getByName(LOCAL_HOST).getHostAddress();
-      Assert.assertEquals(localhostIp, HMSHandler.getIpAddress());
+      String localhostIp = InetAddress.getByName(LOCAL_HOST).toString();
+      Assert.assertEquals(getIpFromInetAddress(localhostIp),
+          getIpFromInetAddress(HMSHandler.getIpAddress()));
     } catch (UnknownHostException e) {
       Assert.assertTrue("InetAddress.getLocalHost threw an exception: " + e.getMessage(), false);
     }

Modified: hive/branches/spark/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/pom.xml?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/pom.xml (original)
+++ hive/branches/spark/pom.xml Mon Oct  6 03:44:13 2014
@@ -982,11 +982,6 @@
         <dependencies>
           <dependency>
             <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-client</artifactId>
-            <version>${hadoop-20S.version}</version>
-          </dependency>
-          <dependency>
-            <groupId>org.apache.hadoop</groupId>
             <artifactId>hadoop-core</artifactId>
             <version>${hadoop-20S.version}</version>
           </dependency>
@@ -1029,11 +1024,6 @@
         <dependencies>
           <dependency>
             <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-client</artifactId>
-            <version>${hadoop-23.version}</version>
-          </dependency>
-          <dependency>
-            <groupId>org.apache.hadoop</groupId>
             <artifactId>hadoop-common</artifactId>
             <version>${hadoop-23.version}</version>
           </dependency>

Modified: hive/branches/spark/ql/if/queryplan.thrift
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/if/queryplan.thrift?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/if/queryplan.thrift (original)
+++ hive/branches/spark/ql/if/queryplan.thrift Mon Oct  6 03:44:13 2014
@@ -59,7 +59,6 @@ enum OperatorType {
   EVENT,
   ORCFILEMERGE,
   RCFILEMERGE,
-  MERGEJOIN,
 }
 
 struct Operator {

Modified: hive/branches/spark/ql/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/pom.xml?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/pom.xml (original)
+++ hive/branches/spark/ql/pom.xml Mon Oct  6 03:44:13 2014
@@ -28,7 +28,6 @@
   <name>Hive Query Language</name>
 
   <properties>
-    <optiq.version>0.9.1-incubating-SNAPSHOT</optiq.version>
     <hive.path.to.root>..</hive.path.to.root>
   </properties>
 
@@ -183,42 +182,6 @@
       <version>${datanucleus-core.version}</version>
     </dependency>
     <dependency>
-      <groupId>org.apache.optiq</groupId>
-      <artifactId>optiq-core</artifactId>
-      <version>${optiq.version}</version>
-      <exclusions>
-        <!-- hsqldb interferes with the use of derby as the default db
-          in hive's use of datanucleus. 
-        -->
-        <exclusion>
-          <groupId>org.hsqldb</groupId>
-          <artifactId>hsqldb</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.fasterxml.jackson.core</groupId>
-          <artifactId>jackson-databind</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>   
-    <dependency>
-      <groupId>org.apache.optiq</groupId>
-      <artifactId>optiq-avatica</artifactId>
-      <version>${optiq.version}</version>
-      <exclusions>
-        <!-- hsqldb interferes with the use of derby as the default db
-          in hive's use of datanucleus. 
-        -->
-        <exclusion>
-          <groupId>org.hsqldb</groupId>
-          <artifactId>hsqldb</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.fasterxml.jackson.core</groupId>
-          <artifactId>jackson-databind</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
       <groupId>com.google.guava</groupId>
       <artifactId>guava</artifactId>
       <version>${guava.version}</version>

Modified: hive/branches/spark/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp (original)
+++ hive/branches/spark/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp Mon Oct  6 03:44:13 2014
@@ -54,8 +54,7 @@ int _kOperatorTypeValues[] = {
   OperatorType::DEMUX,
   OperatorType::EVENT,
   OperatorType::ORCFILEMERGE,
-  OperatorType::RCFILEMERGE,
-  OperatorType::MERGEJOIN
+  OperatorType::RCFILEMERGE
 };
 const char* _kOperatorTypeNames[] = {
   "JOIN",
@@ -81,10 +80,9 @@ const char* _kOperatorTypeNames[] = {
   "DEMUX",
   "EVENT",
   "ORCFILEMERGE",
-  "RCFILEMERGE",
-  "MERGEJOIN"
+  "RCFILEMERGE"
 };
-const std::map<int, const char*> _OperatorType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(25, _kOperatorTypeValues, _kOperatorTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+const std::map<int, const char*> _OperatorType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(24, _kOperatorTypeValues, _kOperatorTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
 
 int _kTaskTypeValues[] = {
   TaskType::MAP,

Modified: hive/branches/spark/ql/src/gen/thrift/gen-cpp/queryplan_types.h
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/thrift/gen-cpp/queryplan_types.h?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/thrift/gen-cpp/queryplan_types.h (original)
+++ hive/branches/spark/ql/src/gen/thrift/gen-cpp/queryplan_types.h Mon Oct  6 03:44:13 2014
@@ -59,8 +59,7 @@ struct OperatorType {
     DEMUX = 20,
     EVENT = 21,
     ORCFILEMERGE = 22,
-    RCFILEMERGE = 23,
-    MERGEJOIN = 24
+    RCFILEMERGE = 23
   };
 };
 

Modified: hive/branches/spark/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java (original)
+++ hive/branches/spark/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java Mon Oct  6 03:44:13 2014
@@ -35,8 +35,7 @@ public enum OperatorType implements org.
   DEMUX(20),
   EVENT(21),
   ORCFILEMERGE(22),
-  RCFILEMERGE(23),
-  MERGEJOIN(24);
+  RCFILEMERGE(23);
 
   private final int value;
 
@@ -105,8 +104,6 @@ public enum OperatorType implements org.
         return ORCFILEMERGE;
       case 23:
         return RCFILEMERGE;
-      case 24:
-        return MERGEJOIN;
       default:
         return null;
     }

Modified: hive/branches/spark/ql/src/gen/thrift/gen-php/Types.php
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/thrift/gen-php/Types.php?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/thrift/gen-php/Types.php (original)
+++ hive/branches/spark/ql/src/gen/thrift/gen-php/Types.php Mon Oct  6 03:44:13 2014
@@ -59,7 +59,6 @@ final class OperatorType {
   const EVENT = 21;
   const ORCFILEMERGE = 22;
   const RCFILEMERGE = 23;
-  const MERGEJOIN = 24;
   static public $__names = array(
     0 => 'JOIN',
     1 => 'MAPJOIN',
@@ -85,7 +84,6 @@ final class OperatorType {
     21 => 'EVENT',
     22 => 'ORCFILEMERGE',
     23 => 'RCFILEMERGE',
-    24 => 'MERGEJOIN',
   );
 }
 

Modified: hive/branches/spark/ql/src/gen/thrift/gen-py/queryplan/ttypes.py
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/thrift/gen-py/queryplan/ttypes.py?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/thrift/gen-py/queryplan/ttypes.py (original)
+++ hive/branches/spark/ql/src/gen/thrift/gen-py/queryplan/ttypes.py Mon Oct  6 03:44:13 2014
@@ -69,7 +69,6 @@ class OperatorType:
   EVENT = 21
   ORCFILEMERGE = 22
   RCFILEMERGE = 23
-  MERGEJOIN = 24
 
   _VALUES_TO_NAMES = {
     0: "JOIN",
@@ -96,7 +95,6 @@ class OperatorType:
     21: "EVENT",
     22: "ORCFILEMERGE",
     23: "RCFILEMERGE",
-    24: "MERGEJOIN",
   }
 
   _NAMES_TO_VALUES = {
@@ -124,7 +122,6 @@ class OperatorType:
     "EVENT": 21,
     "ORCFILEMERGE": 22,
     "RCFILEMERGE": 23,
-    "MERGEJOIN": 24,
   }
 
 class TaskType:

Modified: hive/branches/spark/ql/src/gen/thrift/gen-rb/queryplan_types.rb
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/gen/thrift/gen-rb/queryplan_types.rb?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/gen/thrift/gen-rb/queryplan_types.rb (original)
+++ hive/branches/spark/ql/src/gen/thrift/gen-rb/queryplan_types.rb Mon Oct  6 03:44:13 2014
@@ -45,9 +45,8 @@ module OperatorType
   EVENT = 21
   ORCFILEMERGE = 22
   RCFILEMERGE = 23
-  MERGEJOIN = 24
-  VALUE_MAP = {0 => "JOIN", 1 => "MAPJOIN", 2 => "EXTRACT", 3 => "FILTER", 4 => "FORWARD", 5 => "GROUPBY", 6 => "LIMIT", 7 => "SCRIPT", 8 => "SELECT", 9 => "TABLESCAN", 10 => "FILESINK", 11 => "REDUCESINK", 12 => "UNION", 13 => "UDTF", 14 => "LATERALVIEWJOIN", 15 => "LATERALVIEWFORWARD", 16 => "HASHTABLESINK", 17 => "HASHTABLEDUMMY", 18 => "PTF", 19 => "MUX", 20 => "DEMUX", 21 => "EVENT", 22 => "ORCFILEMERGE", 23 => "RCFILEMERGE", 24 => "MERGEJOIN"}
-  VALID_VALUES = Set.new([JOIN, MAPJOIN, EXTRACT, FILTER, FORWARD, GROUPBY, LIMIT, SCRIPT, SELECT, TABLESCAN, FILESINK, REDUCESINK, UNION, UDTF, LATERALVIEWJOIN, LATERALVIEWFORWARD, HASHTABLESINK, HASHTABLEDUMMY, PTF, MUX, DEMUX, EVENT, ORCFILEMERGE, RCFILEMERGE, MERGEJOIN]).freeze
+  VALUE_MAP = {0 => "JOIN", 1 => "MAPJOIN", 2 => "EXTRACT", 3 => "FILTER", 4 => "FORWARD", 5 => "GROUPBY", 6 => "LIMIT", 7 => "SCRIPT", 8 => "SELECT", 9 => "TABLESCAN", 10 => "FILESINK", 11 => "REDUCESINK", 12 => "UNION", 13 => "UDTF", 14 => "LATERALVIEWJOIN", 15 => "LATERALVIEWFORWARD", 16 => "HASHTABLESINK", 17 => "HASHTABLEDUMMY", 18 => "PTF", 19 => "MUX", 20 => "DEMUX", 21 => "EVENT", 22 => "ORCFILEMERGE", 23 => "RCFILEMERGE"}
+  VALID_VALUES = Set.new([JOIN, MAPJOIN, EXTRACT, FILTER, FORWARD, GROUPBY, LIMIT, SCRIPT, SELECT, TABLESCAN, FILESINK, REDUCESINK, UNION, UDTF, LATERALVIEWJOIN, LATERALVIEWFORWARD, HASHTABLESINK, HASHTABLEDUMMY, PTF, MUX, DEMUX, EVENT, ORCFILEMERGE, RCFILEMERGE]).freeze
 end
 
 module TaskType

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Driver.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Driver.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Driver.java Mon Oct  6 03:44:13 2014
@@ -390,9 +390,6 @@ public class Driver implements CommandPr
       tree = ParseUtils.findRootNonNullToken(tree);
       perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARSE);
 
-      // Initialize the transaction manager.  This must be done before analyze is called
-      SessionState.get().initTxnMgr(conf);
-
       perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ANALYZE);
       BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(conf, tree);
       List<HiveSemanticAnalyzerHook> saHooks =
@@ -892,12 +889,9 @@ public class Driver implements CommandPr
 
   /**
    * Acquire read and write locks needed by the statement. The list of objects to be locked are
-   * obtained from the inputs and outputs populated by the compiler. The lock acuisition scheme is
+   * obtained from he inputs and outputs populated by the compiler. The lock acuisition scheme is
    * pretty simple. If all the locks cannot be obtained, error out. Deadlock is avoided by making
    * sure that the locks are lexicographically sorted.
-   *
-   * This method also records the list of valid transactions.  This must be done after any
-   * transactions have been opened and locks acquired.
    **/
   private int acquireLocksAndOpenTxn() {
     PerfLogger perfLogger = PerfLogger.getPerfLogger();
@@ -937,7 +931,7 @@ public class Driver implements CommandPr
 
       txnMgr.acquireLocks(plan, ctx, userFromUGI);
 
-      return recordValidTxns();
+      return 0;
     } catch (LockException e) {
       errorMessage = "FAILED: Error in acquiring locks: " + e.getMessage();
       SQLState = ErrorMsg.findSQLState(e.getMessage());
@@ -1114,6 +1108,11 @@ public class Driver implements CommandPr
     SessionState ss = SessionState.get();
     try {
       ckLock = checkConcurrency();
+      try {
+        ss.initTxnMgr(conf);
+      } catch (LockException e) {
+        throw new SemanticException(e.getMessage(), e);
+      }
     } catch (SemanticException e) {
       errorMessage = "FAILED: Error in semantic analysis: " + e.getMessage();
       SQLState = ErrorMsg.findSQLState(e.getMessage());
@@ -1122,8 +1121,11 @@ public class Driver implements CommandPr
           + org.apache.hadoop.util.StringUtils.stringifyException(e));
       return createProcessorResponse(10);
     }
+    int ret = recordValidTxns();
+    if (ret != 0) {
+      return createProcessorResponse(ret);
+    }
 
-    int ret;
     if (!alreadyCompiled) {
       ret = compileInternal(command);
       if (ret != 0) {

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java Mon Oct  6 03:44:13 2014
@@ -417,8 +417,6 @@ public enum ErrorMsg {
       "that implements AcidOutputFormat while transaction manager that supports ACID is in use"),
   VALUES_TABLE_CONSTRUCTOR_NOT_SUPPORTED(10296,
       "Values clause with table constructor not yet supported"),
-  ACID_OP_ON_NONACID_TABLE(10297, "Attempt to do update or delete on table {0} that does not use " +
-      "an AcidOutputFormat", true),
 
   //========================== 20000 range starts here ========================//
   SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."),

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java Mon Oct  6 03:44:13 2014
@@ -48,37 +48,12 @@ public class QueryProperties {
   boolean mapJoinRemoved = false;
   boolean hasMapGroupBy = false;
 
-  private int noOfJoins = 0;
-  private int noOfOuterJoins = 0;
-  private boolean hasLateralViews;
-  
-  private boolean multiDestQuery;
-  private boolean filterWithSubQuery;
-  
   public boolean hasJoin() {
-    return (noOfJoins > 0);
+    return hasJoin;
   }
 
-  public void incrementJoinCount(boolean outerJoin) {
-    noOfJoins++;
-    if (outerJoin)
-      noOfOuterJoins++;
-  }
-
-  public int getJoinCount() {
-    return noOfJoins;
-  }
-
-  public int getOuterJoinCount() {
-    return noOfOuterJoins;
-  }
-
-  public void setHasLateralViews(boolean hasLateralViews) {
-    this.hasLateralViews = hasLateralViews;
-  }
-
-  public boolean hasLateralViews() {
-    return hasLateralViews;
+  public void setHasJoin(boolean hasJoin) {
+    this.hasJoin = hasJoin;
   }
 
   public boolean hasGroupBy() {
@@ -169,22 +144,6 @@ public class QueryProperties {
     this.hasMapGroupBy = hasMapGroupBy;
   }
 
-  public boolean hasMultiDestQuery() {
-    return this.multiDestQuery;
-  }
-
-  public void setMultiDestQuery(boolean multiDestQuery) {
-    this.multiDestQuery = multiDestQuery;
-  }
-
-  public void setFilterWithSubQuery(boolean filterWithSubQuery) {
-    this.filterWithSubQuery = filterWithSubQuery;
-  }
-
-  public boolean hasFilterWithSubQuery() {
-    return this.filterWithSubQuery;
-  }
-
   public void clear() {
     hasJoin = false;
     hasGroupBy = false;
@@ -201,11 +160,5 @@ public class QueryProperties {
     hasClusterBy = false;
     mapJoinRemoved = false;
     hasMapGroupBy = false;
-
-    noOfJoins = 0;
-    noOfOuterJoins = 0;
-    
-    multiDestQuery = false;
-    filterWithSubQuery = false;
   }
 }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java Mon Oct  6 03:44:13 2014
@@ -61,13 +61,13 @@ public abstract class AbstractMapJoinOpe
   @Override
   @SuppressWarnings("unchecked")
   protected void initializeOp(Configuration hconf) throws HiveException {
-    if (conf.getGenJoinKeys()) {
-      int tagLen = conf.getTagLength();
-      joinKeys = new List[tagLen];
-      JoinUtil.populateJoinKeyValue(joinKeys, conf.getKeys(), NOTSKIPBIGTABLE);
-      joinKeysObjectInspectors = JoinUtil.getObjectInspectorsFromEvaluators(joinKeys,
-          inputObjInspectors,NOTSKIPBIGTABLE, tagLen);
-    }
+    int tagLen = conf.getTagLength();
+
+    joinKeys = new List[tagLen];
+
+    JoinUtil.populateJoinKeyValue(joinKeys, conf.getKeys(), NOTSKIPBIGTABLE);
+    joinKeysObjectInspectors = JoinUtil.getObjectInspectorsFromEvaluators(joinKeys,
+        inputObjInspectors,NOTSKIPBIGTABLE, tagLen);
 
     super.initializeOp(hconf);
 

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java Mon Oct  6 03:44:13 2014
@@ -44,10 +44,10 @@ import org.apache.tez.runtime.api.events
 @SuppressWarnings({ "deprecation", "serial" })
 public class AppMasterEventOperator extends Operator<AppMasterEventDesc> {
 
-  protected transient Serializer serializer;
-  protected transient DataOutputBuffer buffer;
-  protected transient boolean hasReachedMaxSize = false;
-  protected transient long MAX_SIZE;
+  private transient Serializer serializer;
+  private transient DataOutputBuffer buffer;
+  private transient boolean hasReachedMaxSize = false;
+  private transient long MAX_SIZE;
 
   @Override
   public void initializeOp(Configuration hconf) throws HiveException {
@@ -57,7 +57,7 @@ public class AppMasterEventOperator exte
     initDataBuffer(false);
   }
 
-  protected void initDataBuffer(boolean skipPruning) throws HiveException {
+  private void initDataBuffer(boolean skipPruning) throws HiveException {
     buffer = new DataOutputBuffer();
     try {
       // where does this go to?

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java Mon Oct  6 03:44:13 2014
@@ -323,6 +323,7 @@ public abstract class CommonJoinOperator
 
   @Override
   public void startGroup() throws HiveException {
+    LOG.trace("Join: Starting new group");
     newGroupStarted = true;
     for (AbstractRowContainer<List<Object>> alw : storage) {
       alw.clearRows();
@@ -631,6 +632,8 @@ public abstract class CommonJoinOperator
    */
   @Override
   public void endGroup() throws HiveException {
+    LOG.trace("Join Op: endGroup called: numValues=" + numAliases);
+
     checkAndGenObject();
   }
 
@@ -716,6 +719,7 @@ public abstract class CommonJoinOperator
 
         if (noOuterJoin) {
           if (alw.rowCount() == 0) {
+            LOG.trace("No data for alias=" + i);
             return;
           } else if (alw.rowCount() > 1) {
             mayHasMoreThanOne = true;
@@ -772,6 +776,7 @@ public abstract class CommonJoinOperator
    */
   @Override
   public void closeOp(boolean abort) throws HiveException {
+    LOG.trace("Join Op close");
     for (AbstractRowContainer<List<Object>> alw : storage) {
       if (alw != null) {
         alw.clearRows(); // clean up the temp files

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Mon Oct  6 03:44:13 2014
@@ -3275,21 +3275,19 @@ public class DDLTask extends Task<DDLWor
     }
 
     Table oldTbl = tbl.copy();
-    List<FieldSchema> oldCols = (part == null ? tbl.getCols() : part.getCols());
-    StorageDescriptor sd = (part == null ? tbl.getTTable().getSd() : part.getTPartition().getSd());
 
     if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) {
       tbl.setDbName(Utilities.getDatabaseName(alterTbl.getNewName()));
       tbl.setTableName(Utilities.getTableName(alterTbl.getNewName()));
     } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCOLS) {
       List<FieldSchema> newCols = alterTbl.getNewCols();
-      String serializationLib = sd.getSerdeInfo().getSerializationLib();
-      if (serializationLib.equals(
+      List<FieldSchema> oldCols = tbl.getCols();
+      if (tbl.getSerializationLib().equals(
           "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
         console
         .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
-        sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
-        sd.setCols(newCols);
+        tbl.setSerializationLib(LazySimpleSerDe.class.getName());
+        tbl.getTTable().getSd().setCols(newCols);
       } else {
         // make sure the columns does not already exist
         Iterator<FieldSchema> iterNewCols = newCols.iterator();
@@ -3305,9 +3303,10 @@ public class DDLTask extends Task<DDLWor
           }
           oldCols.add(newCol);
         }
-        sd.setCols(oldCols);
+        tbl.getTTable().getSd().setCols(oldCols);
       }
     } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAMECOLUMN) {
+      List<FieldSchema> oldCols = tbl.getCols();
       List<FieldSchema> newCols = new ArrayList<FieldSchema>();
       Iterator<FieldSchema> iterOldCols = oldCols.iterator();
       String oldName = alterTbl.getOldColName();
@@ -3368,24 +3367,24 @@ public class DDLTask extends Task<DDLWor
         newCols.add(position, column);
       }
 
-      sd.setCols(newCols);
+      tbl.getTTable().getSd().setCols(newCols);
+
     } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.REPLACECOLS) {
       // change SerDe to LazySimpleSerDe if it is columnsetSerDe
-      String serializationLib = sd.getSerdeInfo().getSerializationLib();
-      if (serializationLib.equals(
+      if (tbl.getSerializationLib().equals(
           "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
         console
         .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
-        sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
-      } else if (!serializationLib.equals(
+        tbl.setSerializationLib(LazySimpleSerDe.class.getName());
+      } else if (!tbl.getSerializationLib().equals(
           MetadataTypedColumnsetSerDe.class.getName())
-          && !serializationLib.equals(LazySimpleSerDe.class.getName())
-          && !serializationLib.equals(ColumnarSerDe.class.getName())
-          && !serializationLib.equals(DynamicSerDe.class.getName())
-          && !serializationLib.equals(ParquetHiveSerDe.class.getName())) {
+          && !tbl.getSerializationLib().equals(LazySimpleSerDe.class.getName())
+          && !tbl.getSerializationLib().equals(ColumnarSerDe.class.getName())
+          && !tbl.getSerializationLib().equals(DynamicSerDe.class.getName())
+          && !tbl.getSerializationLib().equals(ParquetHiveSerDe.class.getName())) {
         throw new HiveException(ErrorMsg.CANNOT_REPLACE_COLUMNS, alterTbl.getOldName());
       }
-      sd.setCols(alterTbl.getNewCols());
+      tbl.getTTable().getSd().setCols(alterTbl.getNewCols());
     } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDPROPS) {
       tbl.getTTable().getParameters().putAll(alterTbl.getProps());
     } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.DROPPROPS) {
@@ -3394,26 +3393,47 @@ public class DDLTask extends Task<DDLWor
         tbl.getTTable().getParameters().remove(keyItr.next());
       }
     } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDEPROPS) {
-      sd.getSerdeInfo().getParameters().putAll(alterTbl.getProps());
+      if (part != null) {
+        part.getTPartition().getSd().getSerdeInfo().getParameters().putAll(
+            alterTbl.getProps());
+      } else {
+        tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll(
+            alterTbl.getProps());
+      }
     } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDE) {
       String serdeName = alterTbl.getSerdeName();
-      sd.getSerdeInfo().setSerializationLib(serdeName);
-      if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) {
-        sd.getSerdeInfo().getParameters().putAll(alterTbl.getProps());
-      }
       if (part != null) {
+        part.getTPartition().getSd().getSerdeInfo().setSerializationLib(serdeName);
+        if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) {
+          part.getTPartition().getSd().getSerdeInfo().getParameters().putAll(
+              alterTbl.getProps());
+        }
         part.getTPartition().getSd().setCols(part.getTPartition().getSd().getCols());
       } else {
+        tbl.setSerializationLib(alterTbl.getSerdeName());
+        if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) {
+          tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll(
+              alterTbl.getProps());
+        }
         if (!Table.hasMetastoreBasedSchema(conf, serdeName)) {
           tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), tbl.
               getDeserializer()));
         }
       }
     } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDFILEFORMAT) {
-      sd.setInputFormat(alterTbl.getInputFormat());
-      sd.setOutputFormat(alterTbl.getOutputFormat());
-      if (alterTbl.getSerdeName() != null) {
-        sd.getSerdeInfo().setSerializationLib(alterTbl.getSerdeName());
+      if(part != null) {
+        part.getTPartition().getSd().setInputFormat(alterTbl.getInputFormat());
+        part.getTPartition().getSd().setOutputFormat(alterTbl.getOutputFormat());
+        if (alterTbl.getSerdeName() != null) {
+          part.getTPartition().getSd().getSerdeInfo().setSerializationLib(
+              alterTbl.getSerdeName());
+        }
+      } else {
+        tbl.getTTable().getSd().setInputFormat(alterTbl.getInputFormat());
+        tbl.getTTable().getSd().setOutputFormat(alterTbl.getOutputFormat());
+        if (alterTbl.getSerdeName() != null) {
+          tbl.setSerializationLib(alterTbl.getSerdeName());
+        }
       }
     } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) {
       boolean protectModeEnable = alterTbl.isProtectModeEnable();
@@ -3443,6 +3463,8 @@ public class DDLTask extends Task<DDLWor
             .getColumnNamesFromSortCols(alterTbl.getSortColumns()));
       }
 
+      StorageDescriptor sd = part == null ? tbl.getTTable().getSd() : part.getTPartition().getSd();
+
       if (alterTbl.isTurnOffSorting()) {
         sd.setSortCols(new ArrayList<Order>());
       } else if (alterTbl.getNumberBuckets() == -1) {
@@ -3463,7 +3485,11 @@ public class DDLTask extends Task<DDLWor
             || locUri.getScheme().trim().equals("")) {
           throw new HiveException(ErrorMsg.BAD_LOCATION_VALUE, newLocation);
         }
-        sd.setLocation(newLocation);
+        if (part != null) {
+          part.setLocation(newLocation);
+        } else {
+          tbl.setDataLocation(new Path(locUri));
+        }
       } catch (URISyntaxException e) {
         throw new HiveException(e);
       }
@@ -3663,7 +3689,7 @@ public class DDLTask extends Task<DDLWor
     }
 
     // drop the table
-    db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge());
+    db.dropTable(dropTbl.getTableName());
     if (tbl != null) {
       // We have already locked the table in DDLSemanticAnalyzer, don't do it again here
       work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java Mon Oct  6 03:44:13 2014
@@ -65,7 +65,7 @@ import org.apache.hadoop.hive.serde2.obj
  */
 public class DummyStoreOperator extends Operator<DummyStoreDesc> implements Serializable {
 
-  protected transient InspectableObject result;
+  private transient InspectableObject result;
 
   public DummyStoreOperator() {
     super();

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java Mon Oct  6 03:44:13 2014
@@ -45,7 +45,6 @@ public class ExprNodeGenericFuncEvaluato
   transient ExprNodeEvaluator[] children;
   transient GenericUDF.DeferredObject[] deferredChildren;
   transient boolean isEager;
-  transient boolean isConstant = false;
 
   /**
    * Class to allow deferred evaluation for GenericUDF.
@@ -125,10 +124,7 @@ public class ExprNodeGenericFuncEvaluato
     if (context != null) {
       context.setup(genericUDF);
     }
-    outputOI = genericUDF.initializeAndFoldConstants(childrenOIs);
-    isConstant = ObjectInspectorUtils.isConstantObjectInspector(outputOI)
-        && isDeterministic();
-    return outputOI;
+    return outputOI = genericUDF.initializeAndFoldConstants(childrenOIs);
   }
 
   @Override
@@ -158,11 +154,12 @@ public class ExprNodeGenericFuncEvaluato
 
   @Override
   protected Object _evaluate(Object row, int version) throws HiveException {
-    if (isConstant) {
+    rowObject = row;
+    if (ObjectInspectorUtils.isConstantObjectInspector(outputOI) &&
+        isDeterministic()) {
       // The output of this UDF is constant, so don't even bother evaluating.
-      return ((ConstantObjectInspector) outputOI).getWritableConstantValue();
+      return ((ConstantObjectInspector)outputOI).getWritableConstantValue();
     }
-    rowObject = row;
     for (int i = 0; i < deferredChildren.length; i++) {
       deferredChildren[i].prepare(version);
     }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java Mon Oct  6 03:44:13 2014
@@ -165,7 +165,7 @@ public class FetchOperator implements Se
 
   private void setupExecContext() {
     if (hasVC || work.getSplitSample() != null) {
-      context = new ExecMapperContext(job);
+      context = new ExecMapperContext();
       if (operator != null) {
         operator.setExecContext(context);
       }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java Mon Oct  6 03:44:13 2014
@@ -76,7 +76,7 @@ public class FilterOperator extends Oper
       statsMap.put(Counter.FILTERED, filtered_count);
       statsMap.put(Counter.PASSED, passed_count);
       conditionInspector = null;
-      ioContext = IOContext.get(hconf.get(Utilities.INPUT_NAME));
+      ioContext = IOContext.get();
     } catch (Throwable e) {
       throw new HiveException(e);
     }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java Mon Oct  6 03:44:13 2014
@@ -639,14 +639,6 @@ public final class FunctionRegistry {
     }
   }
 
-  public static String getNormalizedFunctionName(String fn) {
-    // Does the same thing as getFunctionInfo, except for getting the function info.
-    fn = fn.toLowerCase();
-    return (FunctionUtils.isQualifiedFunctionName(fn) || mFunctions.get(fn) != null) ? fn
-        : FunctionUtils.qualifyFunctionName(
-            fn, SessionState.get().getCurrentDatabase().toLowerCase());
-  }
-
   private static <T extends CommonFunctionInfo> T getFunctionInfo(
       Map<String, T> mFunctions, String functionName) {
     functionName = functionName.toLowerCase();
@@ -869,7 +861,15 @@ public final class FunctionRegistry {
             TypeInfoUtils.getCharacterLengthForType(b));
         return TypeInfoFactory.getVarcharTypeInfo(maxLength);
       case DECIMAL:
-      return HiveDecimalUtils.getDecimalTypeForPrimitiveCategories(a, b);
+          int prec1 = HiveDecimalUtils.getPrecisionForType(a);
+          int prec2 = HiveDecimalUtils.getPrecisionForType(b);
+          int scale1 = HiveDecimalUtils.getScaleForType(a);
+          int scale2 = HiveDecimalUtils.getScaleForType(b);
+          int intPart = Math.max(prec1 - scale1, prec2 - scale2);
+          int decPart = Math.max(scale1, scale2);
+          int prec =  Math.min(intPart + decPart, HiveDecimal.MAX_PRECISION);
+          int scale = Math.min(decPart, HiveDecimal.MAX_PRECISION - intPart);
+          return TypeInfoFactory.getDecimalTypeInfo(prec, scale);
       default:
         // Type doesn't require any qualifiers.
         return TypeInfoFactory.getPrimitiveTypeInfo(

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java Mon Oct  6 03:44:13 2014
@@ -18,7 +18,22 @@
 
 package org.apache.hadoop.hive.ql.exec;
 
+import java.io.Serializable;
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryMXBean;
+import java.lang.reflect.Field;
+import java.sql.Timestamp;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import javolution.util.FastBitSet;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -54,20 +69,6 @@ import org.apache.hadoop.hive.serde2.typ
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 
-import java.io.Serializable;
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryMXBean;
-import java.lang.reflect.Field;
-import java.sql.Timestamp;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
 /**
  * GroupBy operator implementation.
  */
@@ -76,7 +77,6 @@ public class GroupByOperator extends Ope
 
   private static final Log LOG = LogFactory.getLog(GroupByOperator.class
       .getName());
-  private static final boolean isTraceEnabled = LOG.isTraceEnabled();
   private static final long serialVersionUID = 1L;
   private static final int NUMROWSESTIMATESIZE = 1000;
 
@@ -101,7 +101,6 @@ public class GroupByOperator extends Ope
   transient ExprNodeEvaluator unionExprEval = null;
 
   transient GenericUDAFEvaluator[] aggregationEvaluators;
-  transient boolean[] estimableAggregationEvaluators;
 
   protected transient ArrayList<ObjectInspector> objectInspectors;
   transient ArrayList<String> fieldNames;
@@ -443,10 +442,10 @@ public class GroupByOperator extends Ope
     estimateRowSize();
   }
 
-  public static final int javaObjectOverHead = 64;
-  public static final int javaHashEntryOverHead = 64;
-  public static final int javaSizePrimitiveType = 16;
-  public static final int javaSizeUnknownType = 256;
+  private static final int javaObjectOverHead = 64;
+  private static final int javaHashEntryOverHead = 64;
+  private static final int javaSizePrimitiveType = 16;
+  private static final int javaSizeUnknownType = 256;
 
   /**
    * The size of the element at position 'pos' is returned, if possible. If the
@@ -558,13 +557,11 @@ public class GroupByOperator extends Ope
     // Go over all the aggregation classes and and get the size of the fields of
     // fixed length. Keep track of the variable length
     // fields in these aggregation classes.
-    estimableAggregationEvaluators = new boolean[aggregationEvaluators.length];
     for (int i = 0; i < aggregationEvaluators.length; i++) {
 
       fixedRowSize += javaObjectOverHead;
       AggregationBuffer agg = aggregationEvaluators[i].getNewAggregationBuffer();
       if (GenericUDAFEvaluator.isEstimable(agg)) {
-        estimableAggregationEvaluators[i] = true;
         continue;
       }
       Field[] fArr = ObjectInspectorUtils.getDeclaredNonStaticFields(agg.getClass());
@@ -768,12 +765,10 @@ public class GroupByOperator extends Ope
           flushHashTable(true);
           hashAggr = false;
         } else {
-          if (isTraceEnabled) {
-            LOG.trace("Hash Aggr Enabled: #hash table = " + numRowsHashTbl
-                + " #total = " + numRowsInput + " reduction = " + 1.0
-                * (numRowsHashTbl / numRowsInput) + " minReduction = "
-                + minReductionHashAggr);
-          }
+          LOG.trace("Hash Aggr Enabled: #hash table = " + numRowsHashTbl
+              + " #total = " + numRowsInput + " reduction = " + 1.0
+              * (numRowsHashTbl / numRowsInput) + " minReduction = "
+              + minReductionHashAggr);
         }
       }
     }
@@ -957,7 +952,7 @@ public class GroupByOperator extends Ope
       AggregationBuffer[] aggs = hashAggregations.get(newKeys);
       for (int i = 0; i < aggs.length; i++) {
         AggregationBuffer agg = aggs[i];
-        if (estimableAggregationEvaluators[i]) {
+        if (GenericUDAFEvaluator.isEstimable(agg)) {
           totalVariableSize += ((GenericUDAFEvaluator.AbstractAggregationBuffer)agg).estimate();
           continue;
         }
@@ -971,10 +966,8 @@ public class GroupByOperator extends Ope
       // Update the number of entries that can fit in the hash table
       numEntriesHashTable =
           (int) (maxHashTblMemory / (fixedRowSize + (totalVariableSize / numEntriesVarSize)));
-      if (isTraceEnabled) {
-        LOG.trace("Hash Aggr: #hash table = " + numEntries
-            + " #max in hash table = " + numEntriesHashTable);
-      }
+      LOG.trace("Hash Aggr: #hash table = " + numEntries
+          + " #max in hash table = " + numEntriesHashTable);
     }
 
     // flush if necessary

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java Mon Oct  6 03:44:13 2014
@@ -171,9 +171,8 @@ public class MapJoinOperator extends Abs
 
   private void loadHashTable() throws HiveException {
 
-    if ((this.getExecContext() != null)
-        && ((this.getExecContext().getLocalWork() == null) || (!this.getExecContext()
-            .getLocalWork().getInputFileChangeSensitive()))) {
+    if (this.getExecContext().getLocalWork() == null
+        || !this.getExecContext().getLocalWork().getInputFileChangeSensitive()) {
       if (hashTblInitedOnce) {
         return;
       } else {
@@ -314,8 +313,8 @@ public class MapJoinOperator extends Abs
         tableContainer.dumpMetrics();
       }
     }
-    if ((this.getExecContext() != null) && (this.getExecContext().getLocalWork() != null)
-        && (this.getExecContext().getLocalWork().getInputFileChangeSensitive())
+    if ((this.getExecContext().getLocalWork() != null
+        && this.getExecContext().getLocalWork().getInputFileChangeSensitive())
         && mapJoinTables != null) {
       for (MapJoinTableContainer tableContainer : mapJoinTables) {
         if (tableContainer != null) {