You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/10/06 00:26:58 UTC
svn commit: r1629544 [5/33] - in /hive/branches/spark-new: ./
accumulo-handler/ beeline/ beeline/src/java/org/apache/hive/beeline/ bin/
bin/ext/ common/ common/src/java/org/apache/hadoop/hive/conf/
common/src/test/org/apache/hadoop/hive/common/type/ co...
Modified: hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Sun Oct 5 22:26:43 2014
@@ -48,9 +48,6 @@ import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.regex.Pattern;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableListMultimap;
-import com.google.common.collect.Multimaps;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -171,6 +168,8 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
import org.apache.hadoop.hive.metastore.events.PreEventContext;
import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreReadTableEvent;
import org.apache.hadoop.hive.metastore.model.MDBPrivilege;
import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege;
import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege;
@@ -203,7 +202,10 @@ import org.apache.thrift.transport.TTran
import com.facebook.fb303.FacebookBase;
import com.facebook.fb303.fb_status;
import com.google.common.base.Splitter;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableListMultimap;
import com.google.common.collect.Lists;
+import com.google.common.collect.Multimaps;
/**
* TODO:pc remove application logic to a separate interface.
@@ -803,7 +805,7 @@ public class HiveMetaStore extends Thrif
Exception ex = null;
try {
try {
- if (null != get_database(db.getName())) {
+ if (null != get_database_core(db.getName())) {
throw new AlreadyExistsException("Database " + db.getName() + " already exists");
}
} catch (NoSuchObjectException e) {
@@ -829,25 +831,45 @@ public class HiveMetaStore extends Thrif
}
@Override
- public Database get_database(final String name) throws NoSuchObjectException,
- MetaException {
+ public Database get_database(final String name) throws NoSuchObjectException, MetaException {
startFunction("get_database", ": " + name);
Database db = null;
Exception ex = null;
try {
- db = getMS().getDatabase(name);
+ db = get_database_core(name);
+ firePreEvent(new PreReadDatabaseEvent(db, this));
} catch (MetaException e) {
ex = e;
throw e;
} catch (NoSuchObjectException e) {
ex = e;
throw e;
+ } finally {
+ endFunction("get_database", db != null, ex);
+ }
+ return db;
+ }
+
+ /**
+ * Equivalent to get_database, but does not write to audit logs, or fire pre-event listners.
+ * Meant to be used for internal hive classes that don't use the thrift interface.
+ * @param name
+ * @return
+ * @throws NoSuchObjectException
+ * @throws MetaException
+ */
+ public Database get_database_core(final String name) throws NoSuchObjectException,
+ MetaException {
+ Database db = null;
+ try {
+ db = getMS().getDatabase(name);
+ } catch (MetaException e) {
+ throw e;
+ } catch (NoSuchObjectException e) {
+ throw e;
} catch (Exception e) {
- ex = e;
assert (e instanceof RuntimeException);
throw (RuntimeException) e;
- } finally {
- endFunction("get_database", db != null, ex);
}
return db;
}
@@ -1373,7 +1395,7 @@ public class HiveMetaStore extends Thrif
try {
ms.openTransaction();
// drop any partitions
- tbl = get_table(dbname, name);
+ tbl = get_table_core(dbname, name);
if (tbl == null) {
throw new NoSuchObjectException(name + " doesn't exist");
}
@@ -1424,10 +1446,14 @@ public class HiveMetaStore extends Thrif
if (!success) {
ms.rollbackTransaction();
} else if (deleteData && !isExternal) {
+ boolean ifPurge = false;
+ if (envContext != null){
+ ifPurge = Boolean.parseBoolean(envContext.getProperties().get("ifPurge"));
+ }
// Delete the data in the partitions which have other locations
- deletePartitionData(partPaths);
+ deletePartitionData(partPaths, ifPurge);
// Delete the data in the table
- deleteTableData(tblPath);
+ deleteTableData(tblPath, ifPurge);
// ok even if the data is not deleted
}
for (MetaStoreEventListener listener : listeners) {
@@ -1444,9 +1470,21 @@ public class HiveMetaStore extends Thrif
* @param tablePath
*/
private void deleteTableData(Path tablePath) {
+ deleteTableData(tablePath, false);
+ }
+
+ /**
+ * Deletes the data in a table's location, if it fails logs an error
+ *
+ * @param tablePath
+ * @param ifPurge completely purge the table (skipping trash) while removing
+ * data from warehouse
+ */
+ private void deleteTableData(Path tablePath, boolean ifPurge) {
+
if (tablePath != null) {
try {
- wh.deleteDir(tablePath, true);
+ wh.deleteDir(tablePath, true, ifPurge);
} catch (Exception e) {
LOG.error("Failed to delete table directory: " + tablePath +
" " + e.getMessage());
@@ -1461,10 +1499,22 @@ public class HiveMetaStore extends Thrif
* @param partPaths
*/
private void deletePartitionData(List<Path> partPaths) {
+ deletePartitionData(partPaths, false);
+ }
+
+ /**
+ * Give a list of partitions' locations, tries to delete each one
+ * and for each that fails logs an error.
+ *
+ * @param partPaths
+ * @param ifPurge completely purge the partition (skipping trash) while
+ * removing data from warehouse
+ */
+ private void deletePartitionData(List<Path> partPaths, boolean ifPurge) {
if (partPaths != null && !partPaths.isEmpty()) {
for (Path partPath : partPaths) {
try {
- wh.deleteDir(partPath, true);
+ wh.deleteDir(partPath, true, ifPurge);
} catch (Exception e) {
LOG.error("Failed to delete partition directory: " + partPath +
" " + e.getMessage());
@@ -1597,13 +1647,40 @@ public class HiveMetaStore extends Thrif
startTableFunction("get_table", dbname, name);
Exception ex = null;
try {
+ t = get_table_core(dbname, name);
+ firePreEvent(new PreReadTableEvent(t, this));
+ } catch (MetaException e) {
+ ex = e;
+ throw e;
+ } catch (NoSuchObjectException e) {
+ ex = e;
+ throw e;
+ } finally {
+ endFunction("get_table", t != null, ex, name);
+ }
+ return t;
+ }
+
+ /**
+ * Equivalent of get_table, but does not log audits and fire pre-event listener.
+ * Meant to be used for calls made by other hive classes, that are not using the
+ * thrift interface.
+ * @param dbname
+ * @param name
+ * @return Table object
+ * @throws MetaException
+ * @throws NoSuchObjectException
+ */
+ public Table get_table_core(final String dbname, final String name) throws MetaException,
+ NoSuchObjectException {
+ Table t;
+ try {
t = getMS().getTable(dbname, name);
if (t == null) {
throw new NoSuchObjectException(dbname + "." + name
+ " table not found");
}
} catch (Exception e) {
- ex = e;
if (e instanceof MetaException) {
throw (MetaException) e;
} else if (e instanceof NoSuchObjectException) {
@@ -1611,8 +1688,6 @@ public class HiveMetaStore extends Thrif
} else {
throw newMetaException(e);
}
- } finally {
- endFunction("get_table", t != null, ex, name);
}
return t;
}
@@ -2390,7 +2465,7 @@ public class HiveMetaStore extends Thrif
try {
ms.openTransaction();
part = ms.getPartition(db_name, tbl_name, part_vals);
- tbl = get_table(db_name, tbl_name);
+ tbl = get_table_core(db_name, tbl_name);
firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this));
if (part == null) {
@@ -2484,7 +2559,7 @@ public class HiveMetaStore extends Thrif
try {
// We need Partition-s for firing events and for result; DN needs MPartition-s to drop.
// Great... Maybe we could bypass fetching MPartitions by issuing direct SQL deletes.
- tbl = get_table(dbName, tblName);
+ tbl = get_table_core(dbName, tblName);
int minCount = 0;
RequestPartsSpec spec = request.getParts();
List<String> partNames = null;
@@ -2643,6 +2718,7 @@ public class HiveMetaStore extends Thrif
Partition ret = null;
Exception ex = null;
try {
+ fireReadTablePreEvent(db_name, tbl_name);
ret = getMS().getPartition(db_name, tbl_name, part_vals);
} catch (Exception e) {
ex = e;
@@ -2659,6 +2735,28 @@ public class HiveMetaStore extends Thrif
return ret;
}
+ /**
+ * Fire a pre-event for read table operation, if there are any
+ * pre-event listeners registered
+ *
+ * @param db_name
+ * @param tbl_name
+ * @throws MetaException
+ * @throws NoSuchObjectException
+ */
+ private void fireReadTablePreEvent(String dbName, String tblName) throws MetaException, NoSuchObjectException {
+ if(preListeners.size() > 0) {
+ // do this only if there is a pre event listener registered (avoid unnecessary
+ // metastore api call)
+ Table t = getMS().getTable(dbName, tblName);
+ if (t == null) {
+ throw new NoSuchObjectException(dbName + "." + tblName
+ + " table not found");
+ }
+ firePreEvent(new PreReadTableEvent(t, this));
+ }
+ }
+
@Override
public Partition get_partition_with_auth(final String db_name,
final String tbl_name, final List<String> part_vals,
@@ -2666,7 +2764,7 @@ public class HiveMetaStore extends Thrif
throws MetaException, NoSuchObjectException, TException {
startPartitionFunction("get_partition_with_auth", db_name, tbl_name,
part_vals);
-
+ fireReadTablePreEvent(db_name, tbl_name);
Partition ret = null;
Exception ex = null;
try {
@@ -2688,7 +2786,7 @@ public class HiveMetaStore extends Thrif
public List<Partition> get_partitions(final String db_name, final String tbl_name,
final short max_parts) throws NoSuchObjectException, MetaException {
startTableFunction("get_partitions", db_name, tbl_name);
-
+ fireReadTablePreEvent(db_name, tbl_name);
List<Partition> ret = null;
Exception ex = null;
try {
@@ -2745,7 +2843,7 @@ public class HiveMetaStore extends Thrif
List<PartitionSpec> partitionSpecs = null;
try {
- Table table = get_table(dbName, tableName);
+ Table table = get_table_core(dbName, tableName);
List<Partition> partitions = get_partitions(dbName, tableName, (short) max_parts);
if (is_partition_spec_grouping_enabled(table)) {
@@ -2769,7 +2867,7 @@ public class HiveMetaStore extends Thrif
private static class StorageDescriptorKey {
- private StorageDescriptor sd;
+ private final StorageDescriptor sd;
StorageDescriptorKey(StorageDescriptor sd) { this.sd = sd; }
@@ -2891,9 +2989,9 @@ public class HiveMetaStore extends Thrif
@Override
public List<String> get_partition_names(final String db_name, final String tbl_name,
- final short max_parts) throws MetaException {
+ final short max_parts) throws MetaException, NoSuchObjectException {
startTableFunction("get_partition_names", db_name, tbl_name);
-
+ fireReadTablePreEvent(db_name, tbl_name);
List<String> ret = null;
Exception ex = null;
try {
@@ -3010,14 +3108,7 @@ public class HiveMetaStore extends Thrif
Exception ex = null;
try {
for (Partition tmpPart : new_parts) {
- try {
- for (MetaStorePreEventListener listener : preListeners) {
- listener.onEvent(
- new PreAlterPartitionEvent(db_name, tbl_name, null, tmpPart, this));
- }
- } catch (NoSuchObjectException e) {
- throw new MetaException(e.getMessage());
- }
+ firePreEvent(new PreAlterPartitionEvent(db_name, tbl_name, null, tmpPart, this));
}
oldParts = alterHandler.alterPartitions(getMS(), wh, db_name, tbl_name, new_parts);
@@ -3122,7 +3213,7 @@ public class HiveMetaStore extends Thrif
boolean success = false;
Exception ex = null;
try {
- Table oldt = get_table(dbname, name);
+ Table oldt = get_table_core(dbname, name);
firePreEvent(new PreAlterTableEvent(oldt, newTable, this));
alterHandler.alterTable(getMS(), wh, dbname, name, newTable);
success = true;
@@ -3206,7 +3297,7 @@ public class HiveMetaStore extends Thrif
Exception ex = null;
try {
try {
- tbl = get_table(db, base_table_name);
+ tbl = get_table_core(db, base_table_name);
} catch (NoSuchObjectException e) {
throw new UnknownTableException(e.getMessage());
}
@@ -3266,7 +3357,7 @@ public class HiveMetaStore extends Thrif
Table tbl;
try {
- tbl = get_table(db, base_table_name);
+ tbl = get_table_core(db, base_table_name);
} catch (NoSuchObjectException e) {
throw new UnknownTableException(e.getMessage());
}
@@ -3385,6 +3476,7 @@ public class HiveMetaStore extends Thrif
private Partition get_partition_by_name_core(final RawStore ms, final String db_name,
final String tbl_name, final String part_name)
throws MetaException, NoSuchObjectException, TException {
+ fireReadTablePreEvent(db_name, tbl_name);
List<String> partVals = null;
try {
partVals = getPartValsFromName(ms, db_name, tbl_name, part_name);
@@ -3406,7 +3498,6 @@ public class HiveMetaStore extends Thrif
startFunction("get_partition_by_name", ": db=" + db_name + " tbl="
+ tbl_name + " part=" + part_name);
-
Partition ret = null;
Exception ex = null;
try {
@@ -3536,6 +3627,7 @@ public class HiveMetaStore extends Thrif
final List<String> groupNames) throws MetaException, TException, NoSuchObjectException {
startPartitionFunction("get_partitions_ps_with_auth", db_name, tbl_name,
part_vals);
+ fireReadTablePreEvent(db_name, tbl_name);
List<Partition> ret = null;
Exception ex = null;
try {
@@ -3558,6 +3650,7 @@ public class HiveMetaStore extends Thrif
final String tbl_name, final List<String> part_vals, final short max_parts)
throws MetaException, TException, NoSuchObjectException {
startPartitionFunction("get_partitions_names_ps", db_name, tbl_name, part_vals);
+ fireReadTablePreEvent(db_name, tbl_name);
List<String> ret = null;
Exception ex = null;
try {
@@ -3726,7 +3819,7 @@ public class HiveMetaStore extends Thrif
String idxTblName = index.getIndexTableName();
if (idxTblName != null) {
String[] qualified = MetaStoreUtils.getQualifiedName(index.getDbName(), idxTblName);
- Table tbl = get_table(qualified[0], qualified[1]);
+ Table tbl = get_table_core(qualified[0], qualified[1]);
if (tbl.getSd() == null) {
throw new MetaException("Table metadata is corrupted");
}
@@ -4028,7 +4121,7 @@ public class HiveMetaStore extends Thrif
} finally {
endFunction("write_partition_column_statistics: ", ret != false, null, tableName);
}
- }
+ }
@Override
public boolean delete_partition_column_statistics(String dbName, String tableName,
@@ -4083,7 +4176,7 @@ public class HiveMetaStore extends Thrif
final String tblName, final String filter, final short maxParts)
throws MetaException, NoSuchObjectException, TException {
startTableFunction("get_partitions_by_filter", dbName, tblName);
-
+ fireReadTablePreEvent(dbName, tblName);
List<Partition> ret = null;
Exception ex = null;
try {
@@ -4106,7 +4199,7 @@ public class HiveMetaStore extends Thrif
List<PartitionSpec> partitionSpecs = null;
try {
- Table table = get_table(dbName, tblName);
+ Table table = get_table_core(dbName, tblName);
List<Partition> partitions = get_partitions_by_filter(dbName, tblName, filter, (short) maxParts);
if (is_partition_spec_grouping_enabled(table)) {
@@ -4133,6 +4226,7 @@ public class HiveMetaStore extends Thrif
PartitionsByExprRequest req) throws TException {
String dbName = req.getDbName(), tblName = req.getTblName();
startTableFunction("get_partitions_by_expr", dbName, tblName);
+ fireReadTablePreEvent(dbName, tblName);
PartitionsByExprResult ret = null;
Exception ex = null;
try {
@@ -4169,7 +4263,7 @@ public class HiveMetaStore extends Thrif
throws MetaException, NoSuchObjectException, TException {
startTableFunction("get_partitions_by_names", dbName, tblName);
-
+ fireReadTablePreEvent(dbName, tblName);
List<Partition> ret = null;
Exception ex = null;
try {
@@ -4214,7 +4308,7 @@ public class HiveMetaStore extends Thrif
List<String> partValue = hiveObject.getPartValues();
if (partValue != null && partValue.size() > 0) {
try {
- Table table = get_table(hiveObject.getDbName(), hiveObject
+ Table table = get_table_core(hiveObject.getDbName(), hiveObject
.getObjectName());
partName = Warehouse
.makePartName(table.getPartitionKeys(), partValue);
@@ -4658,7 +4752,7 @@ public class HiveMetaStore extends Thrif
if (dbName == null) {
return getMS().listPrincipalPartitionColumnGrantsAll(principalName, principalType);
}
- Table tbl = get_table(dbName, tableName);
+ Table tbl = get_table_core(dbName, tableName);
String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues);
if (principalName == null) {
return getMS().listPartitionColumnGrantsAll(dbName, tableName, partName, columnName);
@@ -4736,7 +4830,7 @@ public class HiveMetaStore extends Thrif
if (dbName == null) {
return getMS().listPrincipalPartitionGrantsAll(principalName, principalType);
}
- Table tbl = get_table(dbName, tableName);
+ Table tbl = get_table_core(dbName, tableName);
String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues);
if (principalName == null) {
return getMS().listPartitionGrantsAll(dbName, tableName, partName);
@@ -5394,7 +5488,7 @@ public class HiveMetaStore extends Thrif
}
}
-
+
public static IHMSHandler newHMSHandler(String name, HiveConf hiveConf) throws MetaException {
return newHMSHandler(name, hiveConf, false);
}
Modified: hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Sun Oct 5 22:26:43 2014
@@ -28,7 +28,6 @@ import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.net.InetAddress;
import java.net.URI;
-import java.net.URISyntaxException;
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
@@ -98,7 +97,6 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionSpec;
import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest;
import org.apache.hadoop.hive.metastore.api.PartitionsByExprResult;
import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest;
@@ -122,7 +120,6 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
import org.apache.hadoop.hive.metastore.api.UnknownTableException;
import org.apache.hadoop.hive.metastore.api.UnlockRequest;
-import org.apache.hadoop.hive.metastore.partition.spec.CompositePartitionSpecProxy;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.txn.TxnHandler;
import org.apache.hadoop.hive.shims.HadoopShims;
@@ -763,18 +760,35 @@ public class HiveMetaStoreClient impleme
}
/**
- * @param name
- * @param dbname
- * @throws NoSuchObjectException
- * @throws MetaException
- * @throws TException
- * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String,
- * java.lang.String, boolean)
+ * {@inheritDoc}
+ * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
*/
@Override
- public void dropTable(String dbname, String name)
- throws NoSuchObjectException, MetaException, TException {
- dropTable(dbname, name, true, true, null);
+ public void dropTable(String dbname, String name, boolean deleteData,
+ boolean ignoreUnknownTab) throws MetaException, TException,
+ NoSuchObjectException, UnsupportedOperationException {
+ dropTable(dbname, name, deleteData, ignoreUnknownTab, null);
+ }
+
+ /**
+ * Drop the table and choose whether to save the data in the trash.
+ * @param ifPurge completely purge the table (skipping trash) while removing
+ * data from warehouse
+ * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+ */
+ @Override
+ public void dropTable(String dbname, String name, boolean deleteData,
+ boolean ignoreUnknownTab, boolean ifPurge)
+ throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException {
+ //build new environmentContext with ifPurge;
+ EnvironmentContext envContext = null;
+ if(ifPurge){
+ Map<String, String> warehouseOptions = null;
+ warehouseOptions = new HashMap<String, String>();
+ warehouseOptions.put("ifPurge", "TRUE");
+ envContext = new EnvironmentContext(warehouseOptions);
+ }
+ dropTable(dbname, name, deleteData, ignoreUnknownTab, envContext);
}
/** {@inheritDoc} */
@@ -786,23 +800,37 @@ public class HiveMetaStoreClient impleme
}
/**
+ * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+ */
+ @Override
+ public void dropTable(String dbname, String name)
+ throws NoSuchObjectException, MetaException, TException {
+ dropTable(dbname, name, true, true, null);
+ }
+
+ /**
+ * Drop the table and choose whether to: delete the underlying table data;
+ * throw if the table doesn't exist; save the data in the trash.
+ *
* @param dbname
* @param name
* @param deleteData
* delete the underlying data or just delete the table in metadata
- * @throws NoSuchObjectException
+ * @param ignoreUnknownTab
+ * don't throw if the requested table doesn't exist
+ * @param envContext
+ * for communicating with thrift
* @throws MetaException
+ * could not drop table properly
+ * @throws NoSuchObjectException
+ * the table wasn't found
* @throws TException
+ * a thrift communication error occurred
+ * @throws UnsupportedOperationException
+ * dropping an index table is not allowed
* @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String,
* java.lang.String, boolean)
*/
- @Override
- public void dropTable(String dbname, String name, boolean deleteData,
- boolean ignoreUnknownTab) throws MetaException, TException,
- NoSuchObjectException, UnsupportedOperationException {
- dropTable(dbname, name, deleteData, ignoreUnknownTab, null);
- }
-
public void dropTable(String dbname, String name, boolean deleteData,
boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException,
NoSuchObjectException, UnsupportedOperationException {
@@ -1283,6 +1311,7 @@ public class HiveMetaStoreClient impleme
}
/** {@inheritDoc} */
+ @Override
public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request)
throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
InvalidInputException{
@@ -1659,7 +1688,12 @@ public class HiveMetaStoreClient impleme
@Override
public ValidTxnList getValidTxns() throws TException {
- return TxnHandler.createValidTxnList(client.get_open_txns());
+ return TxnHandler.createValidTxnList(client.get_open_txns(), 0);
+ }
+
+ @Override
+ public ValidTxnList getValidTxns(long currentTxn) throws TException {
+ return TxnHandler.createValidTxnList(client.get_open_txns(), currentTxn);
}
@Override
Modified: hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java (original)
+++ hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java Sun Oct 5 22:26:43 2014
@@ -37,12 +37,14 @@ public class HiveMetaStoreFsImpl impleme
@Override
public boolean deleteDir(FileSystem fs, Path f, boolean recursive,
- Configuration conf) throws MetaException {
+ boolean ifPurge, Configuration conf) throws MetaException {
LOG.info("deleting " + f);
HadoopShims hadoopShim = ShimLoader.getHadoopShims();
try {
- if (hadoopShim.moveToAppropriateTrash(fs, f, conf)) {
+ if (ifPurge) {
+ LOG.info("Not moving "+ f +" to trash");
+ } else if (hadoopShim.moveToAppropriateTrash(fs, f, conf)) {
LOG.info("Moved to trash: " + f);
return true;
}
Modified: hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original)
+++ hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Sun Oct 5 22:26:43 2014
@@ -192,6 +192,10 @@ public interface IMetaStoreClient {
* The database for this table
* @param tableName
* The table to drop
+ * @param deleteData
+ * Should we delete the underlying data
+ * @param ignoreUnknownTab
+ * don't throw if the requested table doesn't exist
* @throws MetaException
* Could not drop table properly.
* @throws NoSuchObjectException
@@ -200,7 +204,16 @@ public interface IMetaStoreClient {
* A thrift communication error occurred
*/
void dropTable(String dbname, String tableName, boolean deleteData,
- boolean ignoreUknownTab) throws MetaException, TException,
+ boolean ignoreUnknownTab) throws MetaException, TException,
+ NoSuchObjectException;
+
+ /**
+ * @param ifPurge
+ * completely purge the table (skipping trash) while removing data from warehouse
+ * @see #dropTable(String, String, boolean, boolean)
+ */
+ public void dropTable(String dbname, String tableName, boolean deleteData,
+ boolean ignoreUnknownTab, boolean ifPurge) throws MetaException, TException,
NoSuchObjectException;
/**
@@ -226,6 +239,9 @@ public interface IMetaStoreClient {
void dropTable(String tableName, boolean deleteData)
throws MetaException, UnknownTableException, TException, NoSuchObjectException;
+ /**
+ * @see #dropTable(String, String, boolean, boolean)
+ */
void dropTable(String dbname, String tableName)
throws MetaException, TException, NoSuchObjectException;
@@ -1070,6 +1086,15 @@ public interface IMetaStoreClient {
ValidTxnList getValidTxns() throws TException;
/**
+ * Get a structure that details valid transactions.
+ * @param currentTxn The current transaction of the caller. This will be removed from the
+ * exceptions list so that the caller sees records from his own transaction.
+ * @return list of valid transactions
+ * @throws TException
+ */
+ ValidTxnList getValidTxns(long currentTxn) throws TException;
+
+ /**
* Initiate a transaction.
* @param user User who is opening this transaction. This is the Hive user,
* not necessarily the OS user. It is assumed that this user has already been
Modified: hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java (original)
+++ hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java Sun Oct 5 22:26:43 2014
@@ -32,11 +32,12 @@ public interface MetaStoreFS {
* delete a directory
*
* @param f
+ * @param ifPurge
* @param recursive
* @return true on success
* @throws MetaException
*/
public boolean deleteDir(FileSystem fs, Path f, boolean recursive,
- Configuration conf) throws MetaException;
+ boolean ifPurge, Configuration conf) throws MetaException;
}
Modified: hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Sun Oct 5 22:26:43 2014
@@ -258,7 +258,7 @@ public class MetaStoreUtils {
if (oldPart.getParameters().containsKey(stat)) {
Long oldStat = Long.parseLong(oldPart.getParameters().get(stat));
Long newStat = Long.parseLong(newPart.getParameters().get(stat));
- if (oldStat != newStat) {
+ if (!oldStat.equals(newStat)) {
return true;
}
}
@@ -993,7 +993,7 @@ public class MetaStoreUtils {
partString = partString.concat(partStringSep);
partString = partString.concat(partKey.getName());
partTypesString = partTypesString.concat(partTypesStringSep);
- partTypesString = partTypesString.concat(partKey.getType());
+ partTypesString = partTypesString.concat(partKey.getType());
if (partStringSep.length() == 0) {
partStringSep = "/";
partTypesStringSep = ":";
@@ -1007,7 +1007,7 @@ public class MetaStoreUtils {
schema
.setProperty(
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES,
- partTypesString);
+ partTypesString);
}
if (parameters != null) {
Modified: hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java (original)
+++ hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java Sun Oct 5 22:26:43 2014
@@ -57,6 +57,6 @@ public class TSetIpAddressProcessor<I ex
}
protected void setIpAddress(final Socket inSocket) {
- HMSHandler.setIpAddress(inSocket.getInetAddress().toString());
+ HMSHandler.setIpAddress(inSocket.getInetAddress().getHostAddress());
}
}
Modified: hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (original)
+++ hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java Sun Oct 5 22:26:43 2014
@@ -224,8 +224,12 @@ public class Warehouse {
}
public boolean deleteDir(Path f, boolean recursive) throws MetaException {
+ return deleteDir(f, recursive, false);
+ }
+
+ public boolean deleteDir(Path f, boolean recursive, boolean ifPurge) throws MetaException {
FileSystem fs = getFs(f);
- return fsHandler.deleteDir(fs, f, recursive, conf);
+ return fsHandler.deleteDir(fs, f, recursive, ifPurge, conf);
}
public boolean isEmpty(Path path) throws IOException, MetaException {
Modified: hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java (original)
+++ hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java Sun Oct 5 22:26:43 2014
@@ -38,6 +38,8 @@ public abstract class PreEventContext {
DROP_DATABASE,
LOAD_PARTITION_DONE,
AUTHORIZATION_API_CALL,
+ READ_TABLE,
+ READ_DATABASE
}
private final PreEventType eventType;
Modified: hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java (original)
+++ hive/branches/spark-new/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java Sun Oct 5 22:26:43 2014
@@ -233,12 +233,22 @@ public class TxnHandler {
}
}
- public static ValidTxnList createValidTxnList(GetOpenTxnsResponse txns) {
+ /**
+ * Transform a {@link org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse} to a
+ * {@link org.apache.hadoop.hive.common.ValidTxnList}.
+ * @param txns txn list from the metastore
+ * @param currentTxn Current transaction that the user has open. If this is greater than 0 it
+ * will be removed from the exceptions list so that the user sees his own
+ * transaction as valid.
+ * @return a valid txn list.
+ */
+ public static ValidTxnList createValidTxnList(GetOpenTxnsResponse txns, long currentTxn) {
long highWater = txns.getTxn_high_water_mark();
Set<Long> open = txns.getOpen_txns();
- long[] exceptions = new long[open.size()];
+ long[] exceptions = new long[open.size() - (currentTxn > 0 ? 1 : 0)];
int i = 0;
for(long txn: open) {
+ if (currentTxn > 0 && currentTxn == txn) continue;
exceptions[i++] = txn;
}
return new ValidTxnListImpl(exceptions, highWater);
Modified: hive/branches/spark-new/metastore/src/test/org/apache/hadoop/hive/metastore/IpAddressListener.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/metastore/src/test/org/apache/hadoop/hive/metastore/IpAddressListener.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/metastore/src/test/org/apache/hadoop/hive/metastore/IpAddressListener.java (original)
+++ hive/branches/spark-new/metastore/src/test/org/apache/hadoop/hive/metastore/IpAddressListener.java Sun Oct 5 22:26:43 2014
@@ -47,15 +47,10 @@ public class IpAddressListener extends M
super(config);
}
- private String getIpFromInetAddress(String addr) {
- return addr.substring(addr.indexOf('/') + 1);
- }
-
private void checkIpAddress() {
try {
- String localhostIp = InetAddress.getByName(LOCAL_HOST).toString();
- Assert.assertEquals(getIpFromInetAddress(localhostIp),
- getIpFromInetAddress(HMSHandler.getIpAddress()));
+ String localhostIp = InetAddress.getByName(LOCAL_HOST).getHostAddress();
+ Assert.assertEquals(localhostIp, HMSHandler.getIpAddress());
} catch (UnknownHostException e) {
Assert.assertTrue("InetAddress.getLocalHost threw an exception: " + e.getMessage(), false);
}
Modified: hive/branches/spark-new/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/pom.xml?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/pom.xml (original)
+++ hive/branches/spark-new/pom.xml Sun Oct 5 22:26:43 2014
@@ -115,7 +115,7 @@
<groovy.version>2.1.6</groovy.version>
<hadoop-20.version>0.20.2</hadoop-20.version>
<hadoop-20S.version>1.2.1</hadoop-20S.version>
- <hadoop-23.version>2.4.0</hadoop-23.version>
+ <hadoop-23.version>2.5.0</hadoop-23.version>
<hadoop.bin.path>${basedir}/${hive.path.to.root}/testutils/hadoop</hadoop.bin.path>
<hbase.hadoop1.version>0.98.3-hadoop1</hbase.hadoop1.version>
<hbase.hadoop2.version>0.98.3-hadoop2</hbase.hadoop2.version>
@@ -151,7 +151,7 @@
<stax.version>1.0.1</stax.version>
<slf4j.version>1.7.5</slf4j.version>
<ST4.version>4.0.4</ST4.version>
- <tez.version>0.5.0</tez.version>
+ <tez.version>0.5.1</tez.version>
<super-csv.version>2.2.0</super-csv.version>
<spark.version>1.2.0-SNAPSHOT</spark.version>
<scala.binary.version>2.10</scala.binary.version>
@@ -982,6 +982,11 @@
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-client</artifactId>
+ <version>${hadoop-20S.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core</artifactId>
<version>${hadoop-20S.version}</version>
</dependency>
@@ -1024,6 +1029,11 @@
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-client</artifactId>
+ <version>${hadoop-23.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop-23.version}</version>
</dependency>
Modified: hive/branches/spark-new/ql/if/queryplan.thrift
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/if/queryplan.thrift?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/if/queryplan.thrift (original)
+++ hive/branches/spark-new/ql/if/queryplan.thrift Sun Oct 5 22:26:43 2014
@@ -59,6 +59,7 @@ enum OperatorType {
EVENT,
ORCFILEMERGE,
RCFILEMERGE,
+ MERGEJOIN,
}
struct Operator {
Modified: hive/branches/spark-new/ql/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/pom.xml?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/pom.xml (original)
+++ hive/branches/spark-new/ql/pom.xml Sun Oct 5 22:26:43 2014
@@ -28,6 +28,7 @@
<name>Hive Query Language</name>
<properties>
+ <optiq.version>0.9.1-incubating-SNAPSHOT</optiq.version>
<hive.path.to.root>..</hive.path.to.root>
</properties>
@@ -182,6 +183,42 @@
<version>${datanucleus-core.version}</version>
</dependency>
<dependency>
+ <groupId>org.apache.optiq</groupId>
+ <artifactId>optiq-core</artifactId>
+ <version>${optiq.version}</version>
+ <exclusions>
+ <!-- hsqldb interferes with the use of derby as the default db
+ in hive's use of datanucleus.
+ -->
+ <exclusion>
+ <groupId>org.hsqldb</groupId>
+ <artifactId>hsqldb</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-databind</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.optiq</groupId>
+ <artifactId>optiq-avatica</artifactId>
+ <version>${optiq.version}</version>
+ <exclusions>
+ <!-- hsqldb interferes with the use of derby as the default db
+ in hive's use of datanucleus.
+ -->
+ <exclusion>
+ <groupId>org.hsqldb</groupId>
+ <artifactId>hsqldb</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-databind</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
Modified: hive/branches/spark-new/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp (original)
+++ hive/branches/spark-new/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp Sun Oct 5 22:26:43 2014
@@ -54,7 +54,8 @@ int _kOperatorTypeValues[] = {
OperatorType::DEMUX,
OperatorType::EVENT,
OperatorType::ORCFILEMERGE,
- OperatorType::RCFILEMERGE
+ OperatorType::RCFILEMERGE,
+ OperatorType::MERGEJOIN
};
const char* _kOperatorTypeNames[] = {
"JOIN",
@@ -80,9 +81,10 @@ const char* _kOperatorTypeNames[] = {
"DEMUX",
"EVENT",
"ORCFILEMERGE",
- "RCFILEMERGE"
+ "RCFILEMERGE",
+ "MERGEJOIN"
};
-const std::map<int, const char*> _OperatorType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(24, _kOperatorTypeValues, _kOperatorTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+const std::map<int, const char*> _OperatorType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(25, _kOperatorTypeValues, _kOperatorTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
int _kTaskTypeValues[] = {
TaskType::MAP,
Modified: hive/branches/spark-new/ql/src/gen/thrift/gen-cpp/queryplan_types.h
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/gen/thrift/gen-cpp/queryplan_types.h?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/gen/thrift/gen-cpp/queryplan_types.h (original)
+++ hive/branches/spark-new/ql/src/gen/thrift/gen-cpp/queryplan_types.h Sun Oct 5 22:26:43 2014
@@ -59,7 +59,8 @@ struct OperatorType {
DEMUX = 20,
EVENT = 21,
ORCFILEMERGE = 22,
- RCFILEMERGE = 23
+ RCFILEMERGE = 23,
+ MERGEJOIN = 24
};
};
Modified: hive/branches/spark-new/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java (original)
+++ hive/branches/spark-new/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java Sun Oct 5 22:26:43 2014
@@ -35,7 +35,8 @@ public enum OperatorType implements org.
DEMUX(20),
EVENT(21),
ORCFILEMERGE(22),
- RCFILEMERGE(23);
+ RCFILEMERGE(23),
+ MERGEJOIN(24);
private final int value;
@@ -104,6 +105,8 @@ public enum OperatorType implements org.
return ORCFILEMERGE;
case 23:
return RCFILEMERGE;
+ case 24:
+ return MERGEJOIN;
default:
return null;
}
Modified: hive/branches/spark-new/ql/src/gen/thrift/gen-php/Types.php
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/gen/thrift/gen-php/Types.php?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/gen/thrift/gen-php/Types.php (original)
+++ hive/branches/spark-new/ql/src/gen/thrift/gen-php/Types.php Sun Oct 5 22:26:43 2014
@@ -59,6 +59,7 @@ final class OperatorType {
const EVENT = 21;
const ORCFILEMERGE = 22;
const RCFILEMERGE = 23;
+ const MERGEJOIN = 24;
static public $__names = array(
0 => 'JOIN',
1 => 'MAPJOIN',
@@ -84,6 +85,7 @@ final class OperatorType {
21 => 'EVENT',
22 => 'ORCFILEMERGE',
23 => 'RCFILEMERGE',
+ 24 => 'MERGEJOIN',
);
}
Modified: hive/branches/spark-new/ql/src/gen/thrift/gen-py/queryplan/ttypes.py
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/gen/thrift/gen-py/queryplan/ttypes.py?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/gen/thrift/gen-py/queryplan/ttypes.py (original)
+++ hive/branches/spark-new/ql/src/gen/thrift/gen-py/queryplan/ttypes.py Sun Oct 5 22:26:43 2014
@@ -69,6 +69,7 @@ class OperatorType:
EVENT = 21
ORCFILEMERGE = 22
RCFILEMERGE = 23
+ MERGEJOIN = 24
_VALUES_TO_NAMES = {
0: "JOIN",
@@ -95,6 +96,7 @@ class OperatorType:
21: "EVENT",
22: "ORCFILEMERGE",
23: "RCFILEMERGE",
+ 24: "MERGEJOIN",
}
_NAMES_TO_VALUES = {
@@ -122,6 +124,7 @@ class OperatorType:
"EVENT": 21,
"ORCFILEMERGE": 22,
"RCFILEMERGE": 23,
+ "MERGEJOIN": 24,
}
class TaskType:
Modified: hive/branches/spark-new/ql/src/gen/thrift/gen-rb/queryplan_types.rb
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/gen/thrift/gen-rb/queryplan_types.rb?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/gen/thrift/gen-rb/queryplan_types.rb (original)
+++ hive/branches/spark-new/ql/src/gen/thrift/gen-rb/queryplan_types.rb Sun Oct 5 22:26:43 2014
@@ -45,8 +45,9 @@ module OperatorType
EVENT = 21
ORCFILEMERGE = 22
RCFILEMERGE = 23
- VALUE_MAP = {0 => "JOIN", 1 => "MAPJOIN", 2 => "EXTRACT", 3 => "FILTER", 4 => "FORWARD", 5 => "GROUPBY", 6 => "LIMIT", 7 => "SCRIPT", 8 => "SELECT", 9 => "TABLESCAN", 10 => "FILESINK", 11 => "REDUCESINK", 12 => "UNION", 13 => "UDTF", 14 => "LATERALVIEWJOIN", 15 => "LATERALVIEWFORWARD", 16 => "HASHTABLESINK", 17 => "HASHTABLEDUMMY", 18 => "PTF", 19 => "MUX", 20 => "DEMUX", 21 => "EVENT", 22 => "ORCFILEMERGE", 23 => "RCFILEMERGE"}
- VALID_VALUES = Set.new([JOIN, MAPJOIN, EXTRACT, FILTER, FORWARD, GROUPBY, LIMIT, SCRIPT, SELECT, TABLESCAN, FILESINK, REDUCESINK, UNION, UDTF, LATERALVIEWJOIN, LATERALVIEWFORWARD, HASHTABLESINK, HASHTABLEDUMMY, PTF, MUX, DEMUX, EVENT, ORCFILEMERGE, RCFILEMERGE]).freeze
+ MERGEJOIN = 24
+ VALUE_MAP = {0 => "JOIN", 1 => "MAPJOIN", 2 => "EXTRACT", 3 => "FILTER", 4 => "FORWARD", 5 => "GROUPBY", 6 => "LIMIT", 7 => "SCRIPT", 8 => "SELECT", 9 => "TABLESCAN", 10 => "FILESINK", 11 => "REDUCESINK", 12 => "UNION", 13 => "UDTF", 14 => "LATERALVIEWJOIN", 15 => "LATERALVIEWFORWARD", 16 => "HASHTABLESINK", 17 => "HASHTABLEDUMMY", 18 => "PTF", 19 => "MUX", 20 => "DEMUX", 21 => "EVENT", 22 => "ORCFILEMERGE", 23 => "RCFILEMERGE", 24 => "MERGEJOIN"}
+ VALID_VALUES = Set.new([JOIN, MAPJOIN, EXTRACT, FILTER, FORWARD, GROUPBY, LIMIT, SCRIPT, SELECT, TABLESCAN, FILESINK, REDUCESINK, UNION, UDTF, LATERALVIEWJOIN, LATERALVIEWFORWARD, HASHTABLESINK, HASHTABLEDUMMY, PTF, MUX, DEMUX, EVENT, ORCFILEMERGE, RCFILEMERGE, MERGEJOIN]).freeze
end
module TaskType
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/Driver.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/Driver.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/Driver.java Sun Oct 5 22:26:43 2014
@@ -390,6 +390,14 @@ public class Driver implements CommandPr
tree = ParseUtils.findRootNonNullToken(tree);
perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARSE);
+ // Initialize the transaction manager. This must be done before analyze is called. Also
+ // record the valid transactions for this query. We have to do this at compile time
+ // because we use the information in planning the query. Also,
+ // we want to record it at this point so that users see data valid at the point that they
+ // submit the query.
+ SessionState.get().initTxnMgr(conf);
+ recordValidTxns();
+
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ANALYZE);
BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(conf, tree);
List<HiveSemanticAnalyzerHook> saHooks =
@@ -422,7 +430,8 @@ public class Driver implements CommandPr
sem.validate();
perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ANALYZE);
- plan = new QueryPlan(command, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId);
+ plan = new QueryPlan(command, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId,
+ SessionState.get().getCommandType());
String queryStr = plan.getQueryStr();
conf.setVar(HiveConf.ConfVars.HIVEQUERYSTRING, queryStr);
@@ -870,28 +879,24 @@ public class Driver implements CommandPr
// Write the current set of valid transactions into the conf file so that it can be read by
// the input format.
- private int recordValidTxns() {
- try {
- ValidTxnList txns = SessionState.get().getTxnMgr().getValidTxns();
- String txnStr = txns.toString();
- conf.set(ValidTxnList.VALID_TXNS_KEY, txnStr);
- LOG.debug("Encoding valid txns info " + txnStr);
- return 0;
- } catch (LockException e) {
- errorMessage = "FAILED: Error in determing valid transactions: " + e.getMessage();
- SQLState = ErrorMsg.findSQLState(e.getMessage());
- downstreamError = e;
- console.printError(errorMessage, "\n"
- + org.apache.hadoop.util.StringUtils.stringifyException(e));
- return 10;
- }
+ private void recordValidTxns() throws LockException {
+ ValidTxnList txns = SessionState.get().getTxnMgr().getValidTxns();
+ String txnStr = txns.toString();
+ conf.set(ValidTxnList.VALID_TXNS_KEY, txnStr);
+ LOG.debug("Encoding valid txns info " + txnStr);
+ // TODO I think when we switch to cross query transactions we need to keep this list in
+ // session state rather than agressively encoding it in the conf like this. We can let the
+ // TableScanOperators then encode it in the conf before calling the input formats.
}
/**
* Acquire read and write locks needed by the statement. The list of objects to be locked are
- * obtained from he inputs and outputs populated by the compiler. The lock acuisition scheme is
+ * obtained from the inputs and outputs populated by the compiler. The lock acuisition scheme is
* pretty simple. If all the locks cannot be obtained, error out. Deadlock is avoided by making
* sure that the locks are lexicographically sorted.
+ *
+ * This method also records the list of valid transactions. This must be done after any
+ * transactions have been opened and locks acquired.
**/
private int acquireLocksAndOpenTxn() {
PerfLogger perfLogger = PerfLogger.getPerfLogger();
@@ -927,6 +932,9 @@ public class Driver implements CommandPr
desc.setTransactionId(txnId);
}
}
+
+ // TODO Once we move to cross query transactions we need to add the open transaction to
+ // our list of valid transactions. We don't have a way to do that right now.
}
txnMgr.acquireLocks(plan, ctx, userFromUGI);
@@ -1108,11 +1116,6 @@ public class Driver implements CommandPr
SessionState ss = SessionState.get();
try {
ckLock = checkConcurrency();
- try {
- ss.initTxnMgr(conf);
- } catch (LockException e) {
- throw new SemanticException(e.getMessage(), e);
- }
} catch (SemanticException e) {
errorMessage = "FAILED: Error in semantic analysis: " + e.getMessage();
SQLState = ErrorMsg.findSQLState(e.getMessage());
@@ -1121,11 +1124,8 @@ public class Driver implements CommandPr
+ org.apache.hadoop.util.StringUtils.stringifyException(e));
return createProcessorResponse(10);
}
- int ret = recordValidTxns();
- if (ret != 0) {
- return createProcessorResponse(ret);
- }
+ int ret;
if (!alreadyCompiled) {
ret = compileInternal(command);
if (ret != 0) {
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java Sun Oct 5 22:26:43 2014
@@ -417,6 +417,10 @@ public enum ErrorMsg {
"that implements AcidOutputFormat while transaction manager that supports ACID is in use"),
VALUES_TABLE_CONSTRUCTOR_NOT_SUPPORTED(10296,
"Values clause with table constructor not yet supported"),
+ ACID_OP_ON_NONACID_TABLE(10297, "Attempt to do update or delete on table {0} that does not use " +
+ "an AcidOutputFormat or is not bucketed", true),
+ ACID_NO_SORTED_BUCKETS(10298, "ACID insert, update, delete not supported on tables that are " +
+ "sorted, table {0}", true),
//========================== 20000 range starts here ========================//
SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."),
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java Sun Oct 5 22:26:43 2014
@@ -104,16 +104,14 @@ public class QueryPlan implements Serial
private QueryProperties queryProperties;
private transient Long queryStartTime;
+ private String operationName;
public QueryPlan() {
this.reducerTimeStatsPerJobList = new ArrayList<ReducerTimeStatsPerJob>();
}
- public QueryPlan(String queryString, BaseSemanticAnalyzer sem, Long startTime) {
- this(queryString, sem, startTime, null);
- }
-
- public QueryPlan(String queryString, BaseSemanticAnalyzer sem, Long startTime, String queryId) {
+ public QueryPlan(String queryString, BaseSemanticAnalyzer sem, Long startTime, String queryId,
+ String operationName) {
this.queryString = queryString;
rootTasks = new ArrayList<Task<? extends Serializable>>();
@@ -134,6 +132,7 @@ public class QueryPlan implements Serial
query.putToQueryAttributes("queryString", this.queryString);
queryProperties = sem.getQueryProperties();
queryStartTime = startTime;
+ this.operationName = operationName;
}
public String getQueryStr() {
@@ -786,4 +785,8 @@ public class QueryPlan implements Serial
public void setQueryStartTime(Long queryStartTime) {
this.queryStartTime = queryStartTime;
}
+
+ public String getOperationName() {
+ return operationName;
+ }
}
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java Sun Oct 5 22:26:43 2014
@@ -48,12 +48,37 @@ public class QueryProperties {
boolean mapJoinRemoved = false;
boolean hasMapGroupBy = false;
+ private int noOfJoins = 0;
+ private int noOfOuterJoins = 0;
+ private boolean hasLateralViews;
+
+ private boolean multiDestQuery;
+ private boolean filterWithSubQuery;
+
public boolean hasJoin() {
- return hasJoin;
+ return (noOfJoins > 0);
}
- public void setHasJoin(boolean hasJoin) {
- this.hasJoin = hasJoin;
+ public void incrementJoinCount(boolean outerJoin) {
+ noOfJoins++;
+ if (outerJoin)
+ noOfOuterJoins++;
+ }
+
+ public int getJoinCount() {
+ return noOfJoins;
+ }
+
+ public int getOuterJoinCount() {
+ return noOfOuterJoins;
+ }
+
+ public void setHasLateralViews(boolean hasLateralViews) {
+ this.hasLateralViews = hasLateralViews;
+ }
+
+ public boolean hasLateralViews() {
+ return hasLateralViews;
}
public boolean hasGroupBy() {
@@ -144,6 +169,22 @@ public class QueryProperties {
this.hasMapGroupBy = hasMapGroupBy;
}
+ public boolean hasMultiDestQuery() {
+ return this.multiDestQuery;
+ }
+
+ public void setMultiDestQuery(boolean multiDestQuery) {
+ this.multiDestQuery = multiDestQuery;
+ }
+
+ public void setFilterWithSubQuery(boolean filterWithSubQuery) {
+ this.filterWithSubQuery = filterWithSubQuery;
+ }
+
+ public boolean hasFilterWithSubQuery() {
+ return this.filterWithSubQuery;
+ }
+
public void clear() {
hasJoin = false;
hasGroupBy = false;
@@ -160,5 +201,11 @@ public class QueryProperties {
hasClusterBy = false;
mapJoinRemoved = false;
hasMapGroupBy = false;
+
+ noOfJoins = 0;
+ noOfOuterJoins = 0;
+
+ multiDestQuery = false;
+ filterWithSubQuery = false;
}
}
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java Sun Oct 5 22:26:43 2014
@@ -61,13 +61,13 @@ public abstract class AbstractMapJoinOpe
@Override
@SuppressWarnings("unchecked")
protected void initializeOp(Configuration hconf) throws HiveException {
- int tagLen = conf.getTagLength();
-
- joinKeys = new List[tagLen];
-
- JoinUtil.populateJoinKeyValue(joinKeys, conf.getKeys(), NOTSKIPBIGTABLE);
- joinKeysObjectInspectors = JoinUtil.getObjectInspectorsFromEvaluators(joinKeys,
- inputObjInspectors,NOTSKIPBIGTABLE, tagLen);
+ if (conf.getGenJoinKeys()) {
+ int tagLen = conf.getTagLength();
+ joinKeys = new List[tagLen];
+ JoinUtil.populateJoinKeyValue(joinKeys, conf.getKeys(), NOTSKIPBIGTABLE);
+ joinKeysObjectInspectors = JoinUtil.getObjectInspectorsFromEvaluators(joinKeys,
+ inputObjInspectors,NOTSKIPBIGTABLE, tagLen);
+ }
super.initializeOp(hconf);
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java Sun Oct 5 22:26:43 2014
@@ -44,10 +44,10 @@ import org.apache.tez.runtime.api.events
@SuppressWarnings({ "deprecation", "serial" })
public class AppMasterEventOperator extends Operator<AppMasterEventDesc> {
- private transient Serializer serializer;
- private transient DataOutputBuffer buffer;
- private transient boolean hasReachedMaxSize = false;
- private transient long MAX_SIZE;
+ protected transient Serializer serializer;
+ protected transient DataOutputBuffer buffer;
+ protected transient boolean hasReachedMaxSize = false;
+ protected transient long MAX_SIZE;
@Override
public void initializeOp(Configuration hconf) throws HiveException {
@@ -57,12 +57,9 @@ public class AppMasterEventOperator exte
initDataBuffer(false);
}
- private void initDataBuffer(boolean skipPruning) throws HiveException {
+ protected void initDataBuffer(boolean skipPruning) throws HiveException {
buffer = new DataOutputBuffer();
try {
- // where does this go to?
- buffer.writeUTF(((TezContext) TezContext.get()).getTezProcessorContext().getTaskVertexName());
-
// add any other header info
getConf().writeEventHeader(buffer);
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java Sun Oct 5 22:26:43 2014
@@ -323,7 +323,6 @@ public abstract class CommonJoinOperator
@Override
public void startGroup() throws HiveException {
- LOG.trace("Join: Starting new group");
newGroupStarted = true;
for (AbstractRowContainer<List<Object>> alw : storage) {
alw.clearRows();
@@ -632,8 +631,6 @@ public abstract class CommonJoinOperator
*/
@Override
public void endGroup() throws HiveException {
- LOG.trace("Join Op: endGroup called: numValues=" + numAliases);
-
checkAndGenObject();
}
@@ -719,7 +716,6 @@ public abstract class CommonJoinOperator
if (noOuterJoin) {
if (alw.rowCount() == 0) {
- LOG.trace("No data for alias=" + i);
return;
} else if (alw.rowCount() > 1) {
mayHasMoreThanOne = true;
@@ -776,7 +772,6 @@ public abstract class CommonJoinOperator
*/
@Override
public void closeOp(boolean abort) throws HiveException {
- LOG.trace("Join Op close");
for (AbstractRowContainer<List<Object>> alw : storage) {
if (alw != null) {
alw.clearRows(); // clean up the temp files
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Sun Oct 5 22:26:43 2014
@@ -3275,19 +3275,21 @@ public class DDLTask extends Task<DDLWor
}
Table oldTbl = tbl.copy();
+ List<FieldSchema> oldCols = (part == null ? tbl.getCols() : part.getCols());
+ StorageDescriptor sd = (part == null ? tbl.getTTable().getSd() : part.getTPartition().getSd());
if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) {
tbl.setDbName(Utilities.getDatabaseName(alterTbl.getNewName()));
tbl.setTableName(Utilities.getTableName(alterTbl.getNewName()));
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCOLS) {
List<FieldSchema> newCols = alterTbl.getNewCols();
- List<FieldSchema> oldCols = tbl.getCols();
- if (tbl.getSerializationLib().equals(
+ String serializationLib = sd.getSerdeInfo().getSerializationLib();
+ if (serializationLib.equals(
"org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
console
.printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
- tbl.setSerializationLib(LazySimpleSerDe.class.getName());
- tbl.getTTable().getSd().setCols(newCols);
+ sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
+ sd.setCols(newCols);
} else {
// make sure the columns does not already exist
Iterator<FieldSchema> iterNewCols = newCols.iterator();
@@ -3303,10 +3305,9 @@ public class DDLTask extends Task<DDLWor
}
oldCols.add(newCol);
}
- tbl.getTTable().getSd().setCols(oldCols);
+ sd.setCols(oldCols);
}
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAMECOLUMN) {
- List<FieldSchema> oldCols = tbl.getCols();
List<FieldSchema> newCols = new ArrayList<FieldSchema>();
Iterator<FieldSchema> iterOldCols = oldCols.iterator();
String oldName = alterTbl.getOldColName();
@@ -3367,24 +3368,24 @@ public class DDLTask extends Task<DDLWor
newCols.add(position, column);
}
- tbl.getTTable().getSd().setCols(newCols);
-
+ sd.setCols(newCols);
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.REPLACECOLS) {
// change SerDe to LazySimpleSerDe if it is columnsetSerDe
- if (tbl.getSerializationLib().equals(
+ String serializationLib = sd.getSerdeInfo().getSerializationLib();
+ if (serializationLib.equals(
"org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
console
.printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
- tbl.setSerializationLib(LazySimpleSerDe.class.getName());
- } else if (!tbl.getSerializationLib().equals(
+ sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
+ } else if (!serializationLib.equals(
MetadataTypedColumnsetSerDe.class.getName())
- && !tbl.getSerializationLib().equals(LazySimpleSerDe.class.getName())
- && !tbl.getSerializationLib().equals(ColumnarSerDe.class.getName())
- && !tbl.getSerializationLib().equals(DynamicSerDe.class.getName())
- && !tbl.getSerializationLib().equals(ParquetHiveSerDe.class.getName())) {
+ && !serializationLib.equals(LazySimpleSerDe.class.getName())
+ && !serializationLib.equals(ColumnarSerDe.class.getName())
+ && !serializationLib.equals(DynamicSerDe.class.getName())
+ && !serializationLib.equals(ParquetHiveSerDe.class.getName())) {
throw new HiveException(ErrorMsg.CANNOT_REPLACE_COLUMNS, alterTbl.getOldName());
}
- tbl.getTTable().getSd().setCols(alterTbl.getNewCols());
+ sd.setCols(alterTbl.getNewCols());
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDPROPS) {
tbl.getTTable().getParameters().putAll(alterTbl.getProps());
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.DROPPROPS) {
@@ -3393,47 +3394,26 @@ public class DDLTask extends Task<DDLWor
tbl.getTTable().getParameters().remove(keyItr.next());
}
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDEPROPS) {
- if (part != null) {
- part.getTPartition().getSd().getSerdeInfo().getParameters().putAll(
- alterTbl.getProps());
- } else {
- tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll(
- alterTbl.getProps());
- }
+ sd.getSerdeInfo().getParameters().putAll(alterTbl.getProps());
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDE) {
String serdeName = alterTbl.getSerdeName();
+ sd.getSerdeInfo().setSerializationLib(serdeName);
+ if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) {
+ sd.getSerdeInfo().getParameters().putAll(alterTbl.getProps());
+ }
if (part != null) {
- part.getTPartition().getSd().getSerdeInfo().setSerializationLib(serdeName);
- if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) {
- part.getTPartition().getSd().getSerdeInfo().getParameters().putAll(
- alterTbl.getProps());
- }
part.getTPartition().getSd().setCols(part.getTPartition().getSd().getCols());
} else {
- tbl.setSerializationLib(alterTbl.getSerdeName());
- if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) {
- tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll(
- alterTbl.getProps());
- }
if (!Table.hasMetastoreBasedSchema(conf, serdeName)) {
tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), tbl.
getDeserializer()));
}
}
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDFILEFORMAT) {
- if(part != null) {
- part.getTPartition().getSd().setInputFormat(alterTbl.getInputFormat());
- part.getTPartition().getSd().setOutputFormat(alterTbl.getOutputFormat());
- if (alterTbl.getSerdeName() != null) {
- part.getTPartition().getSd().getSerdeInfo().setSerializationLib(
- alterTbl.getSerdeName());
- }
- } else {
- tbl.getTTable().getSd().setInputFormat(alterTbl.getInputFormat());
- tbl.getTTable().getSd().setOutputFormat(alterTbl.getOutputFormat());
- if (alterTbl.getSerdeName() != null) {
- tbl.setSerializationLib(alterTbl.getSerdeName());
- }
+ sd.setInputFormat(alterTbl.getInputFormat());
+ sd.setOutputFormat(alterTbl.getOutputFormat());
+ if (alterTbl.getSerdeName() != null) {
+ sd.getSerdeInfo().setSerializationLib(alterTbl.getSerdeName());
}
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) {
boolean protectModeEnable = alterTbl.isProtectModeEnable();
@@ -3463,8 +3443,6 @@ public class DDLTask extends Task<DDLWor
.getColumnNamesFromSortCols(alterTbl.getSortColumns()));
}
- StorageDescriptor sd = part == null ? tbl.getTTable().getSd() : part.getTPartition().getSd();
-
if (alterTbl.isTurnOffSorting()) {
sd.setSortCols(new ArrayList<Order>());
} else if (alterTbl.getNumberBuckets() == -1) {
@@ -3485,11 +3463,7 @@ public class DDLTask extends Task<DDLWor
|| locUri.getScheme().trim().equals("")) {
throw new HiveException(ErrorMsg.BAD_LOCATION_VALUE, newLocation);
}
- if (part != null) {
- part.setLocation(newLocation);
- } else {
- tbl.setDataLocation(new Path(locUri));
- }
+ sd.setLocation(newLocation);
} catch (URISyntaxException e) {
throw new HiveException(e);
}
@@ -3689,7 +3663,7 @@ public class DDLTask extends Task<DDLWor
}
// drop the table
- db.dropTable(dropTbl.getTableName());
+ db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge());
if (tbl != null) {
// We have already locked the table in DDLSemanticAnalyzer, don't do it again here
work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
@@ -4233,7 +4207,7 @@ public class DDLTask extends Task<DDLWor
String statVal = props.get(stat);
if (statVal != null && Long.parseLong(statVal) > 0) {
statsPresent = true;
- props.put(statVal, "0");
+ props.put(stat, "0");
props.put(StatsSetupConst.COLUMN_STATS_ACCURATE, "false");
}
}
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java Sun Oct 5 22:26:43 2014
@@ -65,7 +65,7 @@ import org.apache.hadoop.hive.serde2.obj
*/
public class DummyStoreOperator extends Operator<DummyStoreDesc> implements Serializable {
- private transient InspectableObject result;
+ protected transient InspectableObject result;
public DummyStoreOperator() {
super();
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java Sun Oct 5 22:26:43 2014
@@ -45,6 +45,7 @@ public class ExprNodeGenericFuncEvaluato
transient ExprNodeEvaluator[] children;
transient GenericUDF.DeferredObject[] deferredChildren;
transient boolean isEager;
+ transient boolean isConstant = false;
/**
* Class to allow deferred evaluation for GenericUDF.
@@ -124,7 +125,10 @@ public class ExprNodeGenericFuncEvaluato
if (context != null) {
context.setup(genericUDF);
}
- return outputOI = genericUDF.initializeAndFoldConstants(childrenOIs);
+ outputOI = genericUDF.initializeAndFoldConstants(childrenOIs);
+ isConstant = ObjectInspectorUtils.isConstantObjectInspector(outputOI)
+ && isDeterministic();
+ return outputOI;
}
@Override
@@ -154,12 +158,11 @@ public class ExprNodeGenericFuncEvaluato
@Override
protected Object _evaluate(Object row, int version) throws HiveException {
- rowObject = row;
- if (ObjectInspectorUtils.isConstantObjectInspector(outputOI) &&
- isDeterministic()) {
+ if (isConstant) {
// The output of this UDF is constant, so don't even bother evaluating.
- return ((ConstantObjectInspector)outputOI).getWritableConstantValue();
+ return ((ConstantObjectInspector) outputOI).getWritableConstantValue();
}
+ rowObject = row;
for (int i = 0; i < deferredChildren.length; i++) {
deferredChildren[i].prepare(version);
}
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java Sun Oct 5 22:26:43 2014
@@ -165,7 +165,7 @@ public class FetchOperator implements Se
private void setupExecContext() {
if (hasVC || work.getSplitSample() != null) {
- context = new ExecMapperContext();
+ context = new ExecMapperContext(job);
if (operator != null) {
operator.setExecContext(context);
}