You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by zs...@apache.org on 2009/10/08 02:45:56 UTC
svn commit: r822976 [1/5] - in /hadoop/hive/trunk: ./ metastore/if/
metastore/src/gen-cpp/
metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/
metastore/src/gen-php/ metastore/src/gen-py/hive_metastore/
metastore/src/java/org/apache/hadoop...
Author: zshao
Date: Thu Oct 8 00:45:54 2009
New Revision: 822976
URL: http://svn.apache.org/viewvc?rev=822976&view=rev
Log:
HIVE-868. Add last ddl time and dml time for table/partition. (Namit Jain via zshao)
Modified:
hadoop/hive/trunk/CHANGES.txt
hadoop/hive/trunk/metastore/if/hive_metastore.thrift
hadoop/hive/trunk/metastore/src/gen-cpp/hive_metastore_constants.cpp
hadoop/hive/trunk/metastore/src/gen-cpp/hive_metastore_constants.h
hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Constants.java
hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java
hadoop/hive/trunk/metastore/src/gen-php/hive_metastore_constants.php
hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/constants.py
hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ttypes.py
hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java
hadoop/hive/trunk/ql/src/test/results/clientpositive/input42.q.out
hadoop/hive/trunk/ql/src/test/results/clientpositive/input_part9.q.out
hadoop/hive/trunk/ql/src/test/results/clientpositive/sample2.q.out
hadoop/hive/trunk/ql/src/test/results/clientpositive/sample4.q.out
hadoop/hive/trunk/ql/src/test/results/clientpositive/sample5.q.out
hadoop/hive/trunk/ql/src/test/results/clientpositive/sample6.q.out
hadoop/hive/trunk/ql/src/test/results/clientpositive/sample7.q.out
hadoop/hive/trunk/ql/src/test/results/clientpositive/sample9.q.out
hadoop/hive/trunk/ql/src/test/results/compiler/plan/case_sensitivity.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/cast1.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby1.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby2.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby3.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby4.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby5.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby6.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input1.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input2.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input20.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input3.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input4.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input5.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input6.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input7.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input8.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input9.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input_part1.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input_testsequencefile.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input_testxpath.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input_testxpath2.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/join1.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/join2.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/join3.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/join4.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/join5.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/join6.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/join7.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/join8.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample1.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample2.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample3.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample4.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample5.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample6.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample7.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/subq.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf1.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf4.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf6.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf_case.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf_when.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/union.q.xml
Modified: hadoop/hive/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/CHANGES.txt?rev=822976&r1=822975&r2=822976&view=diff
==============================================================================
--- hadoop/hive/trunk/CHANGES.txt (original)
+++ hadoop/hive/trunk/CHANGES.txt Thu Oct 8 00:45:54 2009
@@ -59,6 +59,9 @@
HIVE-682. Add concat_ws (Jonathan Chang via namit)
+ HIVE-868. Add last ddl time and dml time for table/partition.
+ (Namit Jain via zshao)
+
IMPROVEMENTS
HIVE-760. Add version info to META-INF/MANIFEST.MF.
Modified: hadoop/hive/trunk/metastore/if/hive_metastore.thrift
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/if/hive_metastore.thrift?rev=822976&r1=822975&r2=822976&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/if/hive_metastore.thrift (original)
+++ hadoop/hive/trunk/metastore/if/hive_metastore.thrift Thu Oct 8 00:45:54 2009
@@ -9,6 +9,8 @@
namespace php metastore
namespace cpp Apache.Hadoop.Hive
+const string DDL_TIME = "transient_lastDdlTime"
+
struct Version {
1: string version,
2: string comments
Modified: hadoop/hive/trunk/metastore/src/gen-cpp/hive_metastore_constants.cpp
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-cpp/hive_metastore_constants.cpp?rev=822976&r1=822975&r2=822976&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-cpp/hive_metastore_constants.cpp (original)
+++ hadoop/hive/trunk/metastore/src/gen-cpp/hive_metastore_constants.cpp Thu Oct 8 00:45:54 2009
@@ -10,6 +10,8 @@
const hive_metastoreConstants g_hive_metastore_constants;
hive_metastoreConstants::hive_metastoreConstants() {
+ DDL_TIME = "transient_lastDdlTime";
+
META_TABLE_COLUMNS = "columns";
META_TABLE_COLUMN_TYPES = "columns.types";
Modified: hadoop/hive/trunk/metastore/src/gen-cpp/hive_metastore_constants.h
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-cpp/hive_metastore_constants.h?rev=822976&r1=822975&r2=822976&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-cpp/hive_metastore_constants.h (original)
+++ hadoop/hive/trunk/metastore/src/gen-cpp/hive_metastore_constants.h Thu Oct 8 00:45:54 2009
@@ -14,6 +14,7 @@
public:
hive_metastoreConstants();
+ std::string DDL_TIME;
std::string META_TABLE_COLUMNS;
std::string META_TABLE_COLUMN_TYPES;
std::string BUCKET_FIELD_NAME;
Modified: hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Constants.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Constants.java?rev=822976&r1=822975&r2=822976&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Constants.java (original)
+++ hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Constants.java Thu Oct 8 00:45:54 2009
@@ -16,6 +16,8 @@
public class Constants {
+ public static final String DDL_TIME = "transient_lastDdlTime";
+
public static final String META_TABLE_COLUMNS = "columns";
public static final String META_TABLE_COLUMN_TYPES = "columns.types";
Modified: hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java?rev=822976&r1=822975&r2=822976&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java (original)
+++ hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java Thu Oct 8 00:45:54 2009
@@ -25,6 +25,7 @@
private static final TField TABLE_NAME_FIELD_DESC = new TField("tableName", TType.STRING, (short)3);
private static final TField DB_NAME_FIELD_DESC = new TField("dbName", TType.STRING, (short)4);
private static final TField COL_NAMES_FIELD_DESC = new TField("colNames", TType.LIST, (short)5);
+ private static final TField PART_NAME_FIELD_DESC = new TField("partName", TType.STRING, (short)6);
private String indexName;
public static final int INDEXNAME = 1;
@@ -36,6 +37,8 @@
public static final int DBNAME = 4;
private List<String> colNames;
public static final int COLNAMES = 5;
+ private String partName;
+ public static final int PARTNAME = 6;
private final Isset __isset = new Isset();
private static final class Isset implements java.io.Serializable {
@@ -54,6 +57,8 @@
put(COLNAMES, new FieldMetaData("colNames", TFieldRequirementType.DEFAULT,
new ListMetaData(TType.LIST,
new FieldValueMetaData(TType.STRING))));
+ put(PARTNAME, new FieldMetaData("partName", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRING)));
}});
static {
@@ -68,7 +73,8 @@
int indexType,
String tableName,
String dbName,
- List<String> colNames)
+ List<String> colNames,
+ String partName)
{
this();
this.indexName = indexName;
@@ -77,6 +83,7 @@
this.tableName = tableName;
this.dbName = dbName;
this.colNames = colNames;
+ this.partName = partName;
}
/**
@@ -101,6 +108,9 @@
}
this.colNames = __this__colNames;
}
+ if (other.isSetPartName()) {
+ this.partName = other.partName;
+ }
}
@Override
@@ -209,6 +219,23 @@
return this.colNames != null;
}
+ public String getPartName() {
+ return this.partName;
+ }
+
+ public void setPartName(String partName) {
+ this.partName = partName;
+ }
+
+ public void unsetPartName() {
+ this.partName = null;
+ }
+
+ // Returns true if field partName is set (has been asigned a value) and false otherwise
+ public boolean isSetPartName() {
+ return this.partName != null;
+ }
+
public void setFieldValue(int fieldID, Object value) {
switch (fieldID) {
case INDEXNAME:
@@ -251,6 +278,14 @@
}
break;
+ case PARTNAME:
+ if (value == null) {
+ unsetPartName();
+ } else {
+ setPartName((String)value);
+ }
+ break;
+
default:
throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
}
@@ -273,6 +308,9 @@
case COLNAMES:
return getColNames();
+ case PARTNAME:
+ return getPartName();
+
default:
throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
}
@@ -291,6 +329,8 @@
return isSetDbName();
case COLNAMES:
return isSetColNames();
+ case PARTNAME:
+ return isSetPartName();
default:
throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
}
@@ -354,6 +394,15 @@
return false;
}
+ boolean this_present_partName = true && this.isSetPartName();
+ boolean that_present_partName = true && that.isSetPartName();
+ if (this_present_partName || that_present_partName) {
+ if (!(this_present_partName && that_present_partName))
+ return false;
+ if (!this.partName.equals(that.partName))
+ return false;
+ }
+
return true;
}
@@ -419,6 +468,13 @@
TProtocolUtil.skip(iprot, field.type);
}
break;
+ case PARTNAME:
+ if (field.type == TType.STRING) {
+ this.partName = iprot.readString();
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
default:
TProtocolUtil.skip(iprot, field.type);
break;
@@ -463,6 +519,11 @@
}
oprot.writeFieldEnd();
}
+ if (this.partName != null) {
+ oprot.writeFieldBegin(PART_NAME_FIELD_DESC);
+ oprot.writeString(this.partName);
+ oprot.writeFieldEnd();
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -507,6 +568,14 @@
sb.append(this.colNames);
}
first = false;
+ if (!first) sb.append(", ");
+ sb.append("partName:");
+ if (this.partName == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.partName);
+ }
+ first = false;
sb.append(")");
return sb.toString();
}
Modified: hadoop/hive/trunk/metastore/src/gen-php/hive_metastore_constants.php
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-php/hive_metastore_constants.php?rev=822976&r1=822975&r2=822976&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-php/hive_metastore_constants.php (original)
+++ hadoop/hive/trunk/metastore/src/gen-php/hive_metastore_constants.php Thu Oct 8 00:45:54 2009
@@ -8,6 +8,8 @@
$GLOBALS['hive_metastore_CONSTANTS'] = array();
+$GLOBALS['hive_metastore_CONSTANTS']['DDL_TIME'] = "transient_lastDdlTime";
+
$GLOBALS['hive_metastore_CONSTANTS']['META_TABLE_COLUMNS'] = "columns";
$GLOBALS['hive_metastore_CONSTANTS']['META_TABLE_COLUMN_TYPES'] = "columns.types";
Modified: hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/constants.py
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/constants.py?rev=822976&r1=822975&r2=822976&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/constants.py (original)
+++ hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/constants.py Thu Oct 8 00:45:54 2009
@@ -7,6 +7,8 @@
from thrift.Thrift import *
from ttypes import *
+DDL_TIME = "transient_lastDdlTime"
+
META_TABLE_COLUMNS = "columns"
META_TABLE_COLUMN_TYPES = "columns.types"
Modified: hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ttypes.py
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ttypes.py?rev=822976&r1=822975&r2=822976&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ttypes.py (original)
+++ hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ttypes.py Thu Oct 8 00:45:54 2009
@@ -1019,6 +1019,7 @@
- tableName
- dbName
- colNames
+ - partName
"""
thrift_spec = (
@@ -1028,14 +1029,16 @@
(3, TType.STRING, 'tableName', None, None, ), # 3
(4, TType.STRING, 'dbName', None, None, ), # 4
(5, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 5
+ (6, TType.STRING, 'partName', None, None, ), # 6
)
- def __init__(self, indexName=None, indexType=None, tableName=None, dbName=None, colNames=None,):
+ def __init__(self, indexName=None, indexType=None, tableName=None, dbName=None, colNames=None, partName=None,):
self.indexName = indexName
self.indexType = indexType
self.tableName = tableName
self.dbName = dbName
self.colNames = colNames
+ self.partName = partName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -1076,6 +1079,11 @@
iprot.readListEnd()
else:
iprot.skip(ftype)
+ elif fid == 6:
+ if ftype == TType.STRING:
+ self.partName = iprot.readString();
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -1109,6 +1117,10 @@
oprot.writeString(iter84)
oprot.writeListEnd()
oprot.writeFieldEnd()
+ if self.partName != None:
+ oprot.writeFieldBegin('partName', TType.STRING, 6)
+ oprot.writeString(self.partName)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=822976&r1=822975&r2=822976&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Thu Oct 8 00:45:54 2009
@@ -47,6 +47,7 @@
import org.apache.hadoop.hive.serde2.SerDeUtils;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.hive.metastore.api.Constants;
import com.facebook.fb303.FacebookBase;
import com.facebook.fb303.FacebookService;
@@ -60,10 +61,10 @@
import org.apache.thrift.transport.TTransportFactory;
/**
- * TODO:pc remove application logic to a separate interface.
+ * TODO:pc remove application logic to a separate interface.
*/
public class HiveMetaStore extends ThriftHiveMetastore {
-
+
public static class HMSHandler extends FacebookBase implements ThriftHiveMetastore.Iface {
public static final Log LOG = LogFactory.getLog(HiveMetaStore.class.getName());
private static boolean createDefaultDB = false;
@@ -85,15 +86,15 @@
}
};
public static Integer get() {
- return threadLocalId.get();
+ return threadLocalId.get();
}
-
+
public HMSHandler(String name) throws MetaException {
super(name);
hiveConf = new HiveConf(this.getClass());
init();
}
-
+
public HMSHandler(String name, HiveConf conf) throws MetaException {
super(name);
hiveConf = conf;
@@ -108,7 +109,7 @@
classLoader = Configuration.class.getClassLoader();
}
}
-
+
private boolean init() throws MetaException {
rawStoreClassName = hiveConf.get("hive.metastore.rawstore.impl");
checkForDefaultDb = hiveConf.getBoolean("hive.metastore.checkForDefaultDb", true);
@@ -121,7 +122,7 @@
/**
* @return
- * @throws MetaException
+ * @throws MetaException
*/
private RawStore getMS() throws MetaException {
RawStore ms = threadLocalMS.get();
@@ -145,7 +146,7 @@
try {
getMS().getDatabase(MetaStoreUtils.DEFAULT_DATABASE_NAME);
} catch (NoSuchObjectException e) {
- getMS().createDatabase(new Database(MetaStoreUtils.DEFAULT_DATABASE_NAME,
+ getMS().createDatabase(new Database(MetaStoreUtils.DEFAULT_DATABASE_NAME,
wh.getDefaultDatabasePath(MetaStoreUtils.DEFAULT_DATABASE_NAME).toString()));
}
HMSHandler.createDefaultDB = true;
@@ -158,7 +159,7 @@
throw new MetaException(rawStoreClassName + " class not found");
}
}
-
+
private void logStartFunction(String m) {
LOG.info(threadLocalId.get().toString() + ": " + m);
}
@@ -166,12 +167,12 @@
private void logStartFunction(String f, String db, String tbl) {
LOG.info(threadLocalId.get().toString() + ": " + f + " : db=" + db + " tbl=" + tbl);
}
-
+
@Override
public int getStatus() {
return fb_status.ALIVE;
}
-
+
public void shutdown() {
logStartFunction("Shutting down the object store...");
try {
@@ -259,7 +260,7 @@
public boolean drop_type(String name) throws MetaException {
this.incrementCounter("drop_type");
logStartFunction("drop_type: " + name);
- // TODO:pc validate that there are no types that refer to this
+ // TODO:pc validate that there are no types that refer to this
return getMS().dropType(name);
}
@@ -280,7 +281,7 @@
!MetaStoreUtils.validateColNames(tbl.getPartitionKeys()))) {
throw new InvalidObjectException(tbl.getTableName() + " is not a valid object name");
}
-
+
Path tblPath = null;
boolean success = false, madeDir = false;
try {
@@ -309,9 +310,14 @@
madeDir = true;
}
+ // set create time
+ long time = System.currentTimeMillis() / 1000;
+ tbl.setCreateTime((int) time);
+ tbl.putToParameters(Constants.DDL_TIME, Long.toString(time));
+
getMS().createTable(tbl);
success = getMS().commitTransaction();
-
+
} finally {
if(!success) {
getMS().rollbackTransaction();
@@ -321,7 +327,7 @@
}
}
}
-
+
public boolean is_table_exists(String dbname, String name) throws MetaException {
try {
return (get_table(dbname, name) != null);
@@ -329,7 +335,7 @@
return false;
}
}
-
+
public void drop_table(String dbname, String name, boolean deleteData) throws NoSuchObjectException, MetaException {
this.incrementCounter("drop_table");
logStartFunction("drop_table", dbname, name);
@@ -378,7 +384,7 @@
if(params == null) {
return false;
}
-
+
return "TRUE".equalsIgnoreCase(params.get("EXTERNAL"));
}
@@ -392,7 +398,7 @@
return t;
}
- public boolean set_table_parameters(String dbname, String name,
+ public boolean set_table_parameters(String dbname, String name,
Map<String, String> params) throws NoSuchObjectException,
MetaException {
this.incrementCounter("set_table_parameters");
@@ -435,7 +441,7 @@
if( old_part != null) {
throw new AlreadyExistsException("Partition already exists:" + part);
}
-
+
if(!wh.isDir(partLocation)) {
if(!wh.mkdirs(partLocation)) {
throw new MetaException (partLocation + " is not a directory or unable to create one");
@@ -443,6 +449,11 @@
madeDir = true;
}
+ // set create time
+ long time = System.currentTimeMillis() / 1000;
+ part.setCreateTime((int) time);
+ part.putToParameters(Constants.DDL_TIME, Long.toString(time));
+
success = getMS().addPartition(part);
if(success) {
success = getMS().commitTransaction();
@@ -457,7 +468,7 @@
}
return part;
}
-
+
public int add_partitions(List<Partition> parts) throws MetaException, InvalidObjectException, AlreadyExistsException {
this.incrementCounter("add_partition");
if(parts.size() == 0) {
@@ -504,7 +515,7 @@
// set default location if not specified
partLocation = new Path(tbl.getSd().getLocation(),
Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues()));
-
+
} else {
partLocation = wh.getDnsPath(new Path(partLocationStr));
}
@@ -518,6 +529,11 @@
madeDir = true;
}
+ // set create time
+ long time = System.currentTimeMillis() / 1000;
+ part.setCreateTime((int) time);
+ part.putToParameters(Constants.DDL_TIME, Long.toString(time));
+
success = getMS().addPartition(part) && getMS().commitTransaction();
} finally {
@@ -580,7 +596,7 @@
logStartFunction("get_partitions", db_name, tbl_name);
return getMS().getPartitions(db_name, tbl_name, max_parts);
}
-
+
public List<String> get_partition_names(String db_name, String tbl_name, short max_parts) throws MetaException {
this.incrementCounter("get_partition_names");
logStartFunction("get_partition_names", db_name, tbl_name);
@@ -594,13 +610,14 @@
logStartFunction("alter_partition", db_name, tbl_name);
LOG.info("Partition values:" + new_part.getValues());
try {
+ new_part.putToParameters(Constants.DDL_TIME, Long.toString(System.currentTimeMillis() / 1000));
getMS().alterPartition(db_name, tbl_name, new_part);
} catch(InvalidObjectException e) {
LOG.error(StringUtils.stringifyException(e));
throw new InvalidOperationException("alter is not possible");
}
}
-
+
public boolean create_index(Index index_def)
throws IndexAlreadyExistsException, MetaException {
this.incrementCounter("create_index");
@@ -613,11 +630,12 @@
logStartFunction("getVersion");
return "3.0";
}
-
+
public void alter_table(String dbname, String name, Table newTable) throws InvalidOperationException,
MetaException {
this.incrementCounter("alter_table");
logStartFunction("truncate_table: db=" + dbname + " tbl=" + name + " newtbl=" + newTable.getTableName());
+ newTable.putToParameters(Constants.DDL_TIME, Long.toString(System.currentTimeMillis() / 1000));
alterHandler.alterTable(getMS(), wh, dbname, name, newTable);
}
@@ -628,7 +646,7 @@
}
- public List<FieldSchema> get_fields(String db, String tableName)
+ public List<FieldSchema> get_fields(String db, String tableName)
throws MetaException,UnknownTableException, UnknownDBException {
this.incrementCounter("get_fields");
logStartFunction("get_fields: db=" + db + "tbl=" + tableName);
@@ -654,7 +672,7 @@
}
}
}
-
+
/**
* Return the schema of the table. This function includes partition columns
* in addition to the regular columns.
@@ -665,13 +683,13 @@
* @throws UnknownTableException
* @throws UnknownDBException
*/
- public List<FieldSchema> get_schema(String db, String tableName)
+ public List<FieldSchema> get_schema(String db, String tableName)
throws MetaException, UnknownTableException, UnknownDBException {
this.incrementCounter("get_schema");
logStartFunction("get_schema: db=" + db + "tbl=" + tableName);
String [] names = tableName.split("\\.");
String base_table_name = names[0];
-
+
Table tbl;
try {
tbl = this.get_table(db, base_table_name);
@@ -683,7 +701,7 @@
if (tbl == null || fieldSchemas == null) {
throw new UnknownTableException(tableName + " doesn't exist");
}
-
+
if (tbl.getPartitionKeys() != null) {
// Combine the column field schemas and the partition keys to create the whole schema
fieldSchemas.addAll(tbl.getPartitionKeys());
@@ -695,7 +713,7 @@
return "";
}
}
-
+
/**
* @param args
*/
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=822976&r1=822975&r2=822976&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Thu Oct 8 00:45:54 2009
@@ -66,15 +66,15 @@
public class Hive {
static final private Log LOG = LogFactory.getLog("hive.ql.metadata.Hive");
-
+
private HiveConf conf = null;
private IMetaStoreClient metaStoreClient;
-
+
private static ThreadLocal<Hive> hiveDB = new ThreadLocal() {
protected synchronized Object initialValue() {
return null;
}
-
+
public synchronized void remove() {
if( this.get() != null ) {
((Hive)this.get()).close();
@@ -82,9 +82,9 @@
super.remove();
}
};
-
+
/**
- * Gets hive object for the current thread. If one is not initialized then a new one is created
+ * Gets hive object for the current thread. If one is not initialized then a new one is created
* If the new configuration is different in metadata conf vars then a new one is created.
* @param c new Hive Configuration
* @return Hive object for current thread
@@ -133,11 +133,11 @@
}
return db;
}
-
+
public static void closeCurrent() {
hiveDB.remove();
}
-
+
/**
* Hive
*
@@ -148,7 +148,7 @@
private Hive(HiveConf c) throws HiveException {
this.conf = c;
}
-
+
/**
* closes the connection to metastore for the calling thread
*/
@@ -156,7 +156,7 @@
LOG.info("Closing current thread's connection to Hive Metastore.");
metaStoreClient.close();
}
-
+
/**
* Creates a table metdata and the directory for the table data
* @param tableName name of the table
@@ -212,7 +212,7 @@
/**
- * Updates the existing table metadata with the new metadata.
+ * Updates the existing table metadata with the new metadata.
* @param tblName name of the existing table
* @param newTbl new name of the table. could be the old name
* @throws InvalidOperationException if the changes in metadata is not acceptable
@@ -231,6 +231,26 @@
}
/**
+ * Updates the existing table metadata with the new metadata.
+ * @param tblName name of the existing table
+ * @param newTbl new name of the table. could be the old name
+ * @throws InvalidOperationException if the changes in metadata is not acceptable
+ * @throws TException
+ */
+ public void alterPartition(String tblName, Partition newPart)
+ throws InvalidOperationException, HiveException {
+ try {
+ getMSC().alter_partition(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName,
+ newPart.getTPartition());
+
+ } catch (MetaException e) {
+ throw new HiveException("Unable to alter partition.", e);
+ } catch (TException e) {
+ throw new HiveException("Unable to alter partition.", e);
+ }
+ }
+
+ /**
* Creates the table with the give objects
* @param tbl a table object
* @throws HiveException
@@ -238,7 +258,7 @@
public void createTable(Table tbl) throws HiveException {
createTable(tbl, false);
}
-
+
/**
* Creates the table with the give objects
* @param tbl a table object
@@ -271,10 +291,10 @@
*/
public void dropTable(String dbName, String tableName) throws HiveException {
dropTable(dbName, tableName, true, true);
- }
-
+ }
+
/**
- * Drops the table.
+ * Drops the table.
* @param tableName
* @param deleteData deletes the underlying data along with metadata
* @param ignoreUnknownTab an exception if thrown if this is falser and
@@ -283,7 +303,7 @@
*/
public void dropTable(String dbName, String tableName, boolean deleteData,
boolean ignoreUnknownTab) throws HiveException {
-
+
try {
getMSC().dropTable(dbName, tableName, deleteData, ignoreUnknownTab);
} catch (NoSuchObjectException e) {
@@ -292,37 +312,37 @@
}
} catch (Exception e) {
throw new HiveException(e);
- }
+ }
}
public HiveConf getConf() {
return (conf);
}
-
+
/**
- * Returns metadata of the table.
+ * Returns metadata of the table.
* @param dbName the name of the database
* @param tableName the name of the table
* @return the table
- * @exception HiveException if there's an internal error or if the
- * table doesn't exist
+ * @exception HiveException if there's an internal error or if the
+ * table doesn't exist
*/
- public Table getTable(final String dbName, final String tableName)
+ public Table getTable(final String dbName, final String tableName)
throws HiveException {
-
+
return this.getTable(dbName, tableName, true);
- }
-
+ }
+
/**
* Returns metadata of the table
* @param dbName the name of the database
* @param tableName the name of the table
- * @param throwException controls whether an exception is thrown
+ * @param throwException controls whether an exception is thrown
* or a returns a null
* @return the table or if throwException is false a null value.
* @throws HiveException
*/
- public Table getTable(final String dbName, final String tableName,
+ public Table getTable(final String dbName, final String tableName,
boolean throwException) throws HiveException {
if(tableName == null || tableName.equals("")) {
@@ -344,14 +364,14 @@
// just a sanity check
assert(tTable != null);
try {
-
+
// Use LazySimpleSerDe for MetadataTypedColumnsetSerDe.
// NOTE: LazySimpleSerDe does not support tables with a single column of col
// of type "array<string>". This happens when the table is created using an
// earlier version of Hive.
if (tTable.getSd().getSerdeInfo().getSerializationLib().equals(
org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe.class.getName())
- && tTable.getSd().getColsSize() > 0
+ && tTable.getSd().getColsSize() > 0
&& tTable.getSd().getCols().get(0).getType().indexOf('<') == -1 ) {
tTable.getSd().getSerdeInfo().setSerializationLib(
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
@@ -385,7 +405,7 @@
table.checkValidity();
return table;
}
-
+
public List<String> getAllTables() throws HiveException {
return getTablesByPattern(".*");
}
@@ -393,7 +413,7 @@
/**
* returns all existing tables from default database which match the given
* pattern. The matching occurs as per Java regular expressions
- *
+ *
* @param tablePattern
* java re pattern
* @return list of table names
@@ -406,7 +426,7 @@
/**
* returns all existing tables from the given database which match the given
* pattern. The matching occurs as per Java regular expressions
- *
+ *
* @param database
* the database name
* @param tablePattern
@@ -421,7 +441,7 @@
throw new HiveException(e);
}
}
-
+
/**
* @param name
* @param locationUri
@@ -465,7 +485,7 @@
throws HiveException {
Table tbl = getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
try {
- /** Move files before creating the partition since down stream processes check
+ /** Move files before creating the partition since down stream processes check
* for existence of partition in metadata before accessing the data. If partition
* is created before data is moved, downstream waiting processes might move forward
* with partial data
@@ -496,7 +516,7 @@
if (part == null) {
// create the partition if it didn't exist before
- getPartition(tbl, partSpec, true);
+ part = getPartition(tbl, partSpec, true);
}
} catch (IOException e) {
LOG.error(StringUtils.stringifyException(e));
@@ -505,7 +525,8 @@
LOG.error(StringUtils.stringifyException(e));
throw new HiveException(e);
}
-}
+
+ }
/**
* Load a directory into a Hive Table.
@@ -518,10 +539,11 @@
* @param replace if true - replace files in the table, otherwise add files to table
* @param tmpDirPath The temporary directory.
*/
- public void loadTable(Path loadPath, String tableName,
+ public void loadTable(Path loadPath, String tableName,
boolean replace,
Path tmpDirPath) throws HiveException {
Table tbl = getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
+
if(replace) {
tbl.replaceFiles(loadPath, tmpDirPath);
} else {
@@ -540,9 +562,9 @@
throws HiveException {
return createPartition(tbl, partSpec, null);
}
-
+
/**
- * Creates a partition
+ * Creates a partition
* @param tbl table for which partition needs to be created
* @param partSpec partition keys and their values
* @param location location of this partition
@@ -551,9 +573,9 @@
*/
public Partition createPartition(Table tbl, Map<String, String> partSpec,
Path location) throws HiveException {
-
+
org.apache.hadoop.hive.metastore.api.Partition partition = null;
-
+
try {
Partition tmpPart = new Partition(tbl, partSpec, location);
partition = getMSC().add_partition(tmpPart.getTPartition());
@@ -561,7 +583,7 @@
LOG.error(StringUtils.stringifyException(e));
throw new HiveException(e);
}
-
+
return new Partition(tbl, partition);
}
@@ -602,7 +624,7 @@
}
return new Partition(tbl, tpart);
}
-
+
public boolean dropPartition(String db_name, String tbl_name, List<String> part_vals,
boolean deleteData) throws HiveException {
try {
@@ -646,7 +668,7 @@
}
return parts;
} else {
- // create an empty partition.
+ // create an empty partition.
// HACK, HACK. SemanticAnalyzer code requires that an empty partition when the table is not partitioned
org.apache.hadoop.hive.metastore.api.Partition tPart = new org.apache.hadoop.hive.metastore.api.Partition();
tPart.setSd(tbl.getTTable().getSd()); // TODO: get a copy
@@ -763,13 +785,13 @@
// create the parent directory otherwise rename can fail if the parent doesn't exist
if (!fs.mkdirs(destf.getParent())) {
- throw new HiveException("Unable to create destination directory: "
+ throw new HiveException("Unable to create destination directory: "
+ destf.getParent().toString());
}
-
+
b = fs.rename(tmppath, destf);
if (!b) {
- throw new HiveException("Unable to move results to destination directory: "
+ throw new HiveException("Unable to move results to destination directory: "
+ destf.getParent().toString());
}
LOG.debug("Renaming:"+tmppath.toString()+",Status:"+b);
@@ -794,9 +816,9 @@
private IMetaStoreClient createMetaStoreClient() throws MetaException {
return new HiveMetaStoreClient(this.conf);
}
-
+
/**
- *
+ *
* @return the metastore client for the current thread
* @throws MetaException
*/
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java?rev=822976&r1=822975&r2=822976&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java Thu Oct 8 00:45:54 2009
@@ -82,7 +82,7 @@
* @param location Location of the partition, relative to the table.
* @throws HiveException Thrown if we could not create the partition.
*/
- public Partition(Table tbl, Map<String, String> partSpec,
+ public Partition(Table tbl, Map<String, String> partSpec,
Path location) throws HiveException {
List<String> pvals = new ArrayList<String>();
@@ -93,8 +93,8 @@
}
pvals.add(val);
}
-
- org.apache.hadoop.hive.metastore.api.Partition tpart =
+
+ org.apache.hadoop.hive.metastore.api.Partition tpart =
new org.apache.hadoop.hive.metastore.api.Partition();
tpart.setDbName(tbl.getDbName());
tpart.setTableName(tbl.getName());
@@ -111,7 +111,7 @@
} catch (TException e) {
LOG.error("Could not create a copy of StorageDescription");
throw new HiveException("Could not create a copy of StorageDescription");
- }
+ }
tpart.setSd(sd);
if (location != null) {
@@ -129,8 +129,8 @@
* @param tp Thrift Partition object
* @throws HiveException Thrown if we cannot initialize the partition
*/
- private void initialize(Table tbl,
- org.apache.hadoop.hive.metastore.api.Partition tp)
+ private void initialize(Table tbl,
+ org.apache.hadoop.hive.metastore.api.Partition tp)
throws HiveException {
table = tbl;
@@ -139,7 +139,7 @@
if(tbl.isPartitioned()) {
try {
- partName = Warehouse.makePartName(tbl.getPartCols(),
+ partName = Warehouse.makePartName(tbl.getPartCols(),
tp.getValues());
if (tp.getSd().getLocation() == null) {
// set default if location is not set
@@ -153,9 +153,9 @@
e);
}
} else {
- // We are in the HACK territory.
+ // We are in the HACK territory.
// SemanticAnalyzer expects a single partition whose schema
- // is same as the table partition.
+ // is same as the table partition.
partPath = table.getPath();
}
@@ -314,8 +314,8 @@
@SuppressWarnings("nls")
@Override
- public String toString() {
- String pn = "Invalid Partition";
+ public String toString() {
+ String pn = "Invalid Partition";
try {
pn = Warehouse.makePartName(spec);
} catch (MetaException e) {
@@ -323,4 +323,20 @@
}
return table.toString() + "(" + pn + ")";
}
+
+ public void setProperty(String name, String value) {
+ getTPartition().putToParameters(name, value);
+ }
+
+ /**
+ * getProperty
+ *
+ */
+ public String getProperty(String name) {
+ Map<String,String> params = getTPartition().getParameters();
+ if (params == null)
+ return null;
+ return params.get(name);
+ }
+
}
Modified: hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java?rev=822976&r1=822975&r2=822976&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java (original)
+++ hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java Thu Oct 8 00:45:54 2009
@@ -90,7 +90,7 @@
private MiniMRCluster mr = null;
private HadoopShims.MiniDFSShim dfs = null;
private boolean miniMr = false;
-
+
public boolean deleteDirectory(File path) {
if (path.exists()) {
File[] files = path.listFiles();
@@ -105,7 +105,7 @@
}
return(path.delete());
}
-
+
public void copyDirectoryToLocal(Path src, Path dest) throws Exception {
FileSystem srcFs = src.getFileSystem(conf);
@@ -134,7 +134,7 @@
}
}
}
-
+
static Pattern mapTok = Pattern.compile("(\\.?)(.*)_map_(.*)");
static Pattern reduceTok = Pattern.compile("(.*)(reduce_[^\\.]*)((\\..*)?)");
@@ -178,14 +178,14 @@
dfs = ShimLoader.getHadoopShims().getMiniDfs(conf, 4, true, null);
FileSystem fs = dfs.getFileSystem();
mr = new MiniMRCluster(4, fs.getUri().toString(), 1);
-
+
// hive.metastore.warehouse.dir needs to be set relative to the jobtracker
String fsName = conf.get("fs.default.name");
assert fsName != null;
conf.set("hive.metastore.warehouse.dir", fsName.concat("/build/ql/test/data/warehouse/"));
-
+
conf.set("mapred.job.tracker", "localhost:" + mr.getJobTrackerPort());
- }
+ }
// System.out.println(conf.toString());
testFiles = conf.get("test.data.files").replace('\\', '/').replace("c:", "");
@@ -199,7 +199,7 @@
srcTables = new LinkedList<String>();
init();
}
-
+
public void shutdown() throws Exception {
cleanUp();
@@ -207,7 +207,7 @@
dfs.shutdown();
dfs = null;
}
-
+
if (mr != null) {
mr.shutdown();
mr = null;
@@ -226,7 +226,7 @@
BufferedInputStream bis = new BufferedInputStream(fis);
DataInputStream dis = new DataInputStream(bis);
StringBuffer qsb = new StringBuffer();
-
+
// Read the entire query
while(dis.available() != 0) {
qsb.append(dis.readLine() + "\n");
@@ -237,8 +237,8 @@
public void cleanUp() throws Exception {
String warehousePath = ((new URI(testWarehouse)).getPath());
// Drop any tables that remain due to unsuccessful runs
- for(String s: new String [] {"src", "src1", "src_json", "src_thrift", "src_sequencefile",
- "srcpart", "srcbucket", "dest1", "dest2",
+ for(String s: new String [] {"src", "src1", "src_json", "src_thrift", "src_sequencefile",
+ "srcpart", "srcbucket", "dest1", "dest2",
"dest3", "dest4", "dest4_sequencefile",
"dest_j1", "dest_j2", "dest_g1", "dest_g2",
"fetchtask_ioexception"}) {
@@ -259,7 +259,7 @@
return;
}
-
+
private void runCreateTableCmd(String createTableCmd) throws Exception {
int ecode = 0;
ecode = drv.run(createTableCmd);
@@ -276,13 +276,13 @@
LinkedList<String> cols = new LinkedList<String>();
cols.add("key");
cols.add("value");
-
+
LinkedList<String> part_cols = new LinkedList<String>();
part_cols.add("ds");
part_cols.add("hr");
db.createTable("srcpart", cols, part_cols, TextInputFormat.class, IgnoreKeyTextOutputFormat.class);
srcTables.add("srcpart");
-
+
Path fpath;
Path newfpath;
HashMap<String, String> part_spec = new HashMap<String, String>();
@@ -313,14 +313,14 @@
fs.copyFromLocalFile(false, true, fpath, newfpath);
runLoadCmd("LOAD DATA INPATH '" + newfpath.toString() + "' INTO TABLE srcbucket");
}
-
+
for (String tname: new String [] {"src", "src1"}) {
db.createTable(tname, cols, null, TextInputFormat.class, IgnoreKeyTextOutputFormat.class);
srcTables.add(tname);
}
db.createTable("src_sequencefile", cols, null, SequenceFileInputFormat.class, SequenceFileOutputFormat.class);
srcTables.add("src_sequencefile");
-
+
Table srcThrift = new Table("src_thrift");
srcThrift.setInputFormatClass(SequenceFileInputFormat.class.getName());
srcThrift.setOutputFormatClass(SequenceFileOutputFormat.class.getName());
@@ -329,7 +329,7 @@
srcThrift.setSerdeParam(Constants.SERIALIZATION_FORMAT, TBinaryProtocol.class.getName());
db.createTable(srcThrift);
srcTables.add("src_thrift");
-
+
LinkedList<String> json_cols = new LinkedList<String>();
json_cols.add("json");
db.createTable("src_json", json_cols, null, TextInputFormat.class, IgnoreKeyTextOutputFormat.class);
@@ -341,35 +341,35 @@
fs.copyFromLocalFile(false, true, fpath, newfpath);
//db.loadTable(newfpath, "src", false);
runLoadCmd("LOAD DATA INPATH '" + newfpath.toString() + "' INTO TABLE src");
-
+
// load the input data into the src table
fpath = new Path(testFiles, "kv3.txt");
newfpath = new Path(tmppath, "kv3.txt");
fs.copyFromLocalFile(false, true, fpath, newfpath);
//db.loadTable(newfpath, "src1", false);
runLoadCmd("LOAD DATA INPATH '" + newfpath.toString() + "' INTO TABLE src1");
-
+
// load the input data into the src_sequencefile table
fpath = new Path(testFiles, "kv1.seq");
newfpath = new Path(tmppath, "kv1.seq");
fs.copyFromLocalFile(false, true, fpath, newfpath);
//db.loadTable(newfpath, "src_sequencefile", true);
runLoadCmd("LOAD DATA INPATH '" + newfpath.toString() + "' INTO TABLE src_sequencefile");
-
+
// load the input data into the src_thrift table
fpath = new Path(testFiles, "complex.seq");
newfpath = new Path(tmppath, "complex.seq");
fs.copyFromLocalFile(false, true, fpath, newfpath);
- //db.loadTable(newfpath, "src_thrift", true);
+ //db.loadTable(newfpath, "src_thrift", true);
runLoadCmd("LOAD DATA INPATH '" + newfpath.toString() + "' INTO TABLE src_thrift");
-
+
// load the json data into the src_json table
fpath = new Path(testFiles, "json.txt");
newfpath = new Path(tmppath, "json.txt");
fs.copyFromLocalFile(false, true, fpath, newfpath);
//db.loadTable(newfpath, "src_json", false);
runLoadCmd("LOAD DATA INPATH '" + newfpath.toString() + "' INTO TABLE src_json");
-
+
}
public void init() throws Exception {
@@ -377,7 +377,7 @@
testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
// conf.logVars(System.out);
// System.out.flush();
-
+
db = Hive.get(conf);
fs = FileSystem.get(conf);
drv = new Driver(conf);
@@ -392,14 +392,14 @@
LinkedList<String> cols = new LinkedList<String>();
cols.add("key");
cols.add("value");
-
+
LinkedList<String> part_cols = new LinkedList<String>();
part_cols.add("ds");
part_cols.add("hr");
db.createTable("dest1", cols, null, TextInputFormat.class, IgnoreKeyTextOutputFormat.class);
db.createTable("dest2", cols, null, TextInputFormat.class, IgnoreKeyTextOutputFormat.class);
-
+
db.createTable("dest3", cols, part_cols, TextInputFormat.class, IgnoreKeyTextOutputFormat.class);
Table dest3 = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "dest3");
@@ -407,7 +407,7 @@
part_spec.put("ds", "2008-04-08");
part_spec.put("hr", "12");
db.createPartition(dest3, part_spec);
-
+
db.createTable("dest4", cols, null, TextInputFormat.class, IgnoreKeyTextOutputFormat.class);
db.createTable("dest4_sequencefile", cols, null, SequenceFileInputFormat.class, SequenceFileOutputFormat.class);
}
@@ -421,7 +421,7 @@
cleanUp();
createSources();
}
-
+
CliSessionState ss = new CliSessionState(conf);
assert ss!= null;
ss.in = System.in;
@@ -468,10 +468,10 @@
LinkedList<String> cols = new LinkedList<String>();
cols.add("key");
cols.add("value");
-
- // Move all data from dest4_sequencefile to dest4
+
+ // Move all data from dest4_sequencefile to dest4
drv.run("FROM dest4_sequencefile INSERT OVERWRITE TABLE dest4 SELECT dest4_sequencefile.*");
-
+
// Drop dest4_sequencefile
db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "dest4_sequencefile", true, true);
}
@@ -481,7 +481,7 @@
File qf = new File(outDir, tname);
File expf = new File(outDir);
expf = new File(expf, qf.getName().concat(".out"));
-
+
File outf = null;
outf = new File(logDir);
outf = new File(outf, qf.getName().concat(".out"));
@@ -496,21 +496,21 @@
else {
throw e;
}
-
+
outfd.write(e.getMessage());
outfd.close();
-
+
String cmdLine = "diff " + outf.getPath() + " " + expf.getPath();
System.out.println(cmdLine);
-
+
Process executor = Runtime.getRuntime().exec(cmdLine);
-
+
StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out);
StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, System.err);
-
+
outPrinter.start();
errPrinter.start();
-
+
int exitVal = executor.waitFor();
if(exitVal != 0 && overWrite) {
@@ -528,7 +528,7 @@
if (tree != null) {
File parseDir = new File(outDir, "parse");
File expf = new File(parseDir, tname.concat(".out"));
-
+
File outf = null;
outf = new File(logDir);
outf = new File(outf, tname.concat(".out"));
@@ -536,18 +536,18 @@
FileWriter outfd = new FileWriter(outf);
outfd.write(tree.toStringTree());
outfd.close();
-
+
String cmdLine = "diff " + outf.getPath() + " " + expf.getPath();
System.out.println(cmdLine);
-
+
Process executor = Runtime.getRuntime().exec(cmdLine);
-
+
StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out);
StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, System.err);
-
+
outPrinter.start();
errPrinter.start();
-
+
int exitVal = executor.waitFor();
if(exitVal != 0 && overWrite) {
@@ -569,7 +569,7 @@
if (tasks != null) {
File planDir = new File(outDir, "plan");
File planFile = new File(planDir, tname.concat(".xml"));
-
+
File outf = null;
outf = new File(logDir);
outf = new File(outf, tname.concat(".xml"));
@@ -578,7 +578,7 @@
for(Task<? extends Serializable> plan: tasks) {
Utilities.serializeTasks(plan, ofs);
}
-
+
String [] cmdArray = new String[6];
cmdArray[0] = "diff";
cmdArray[1] = "-b";
@@ -586,6 +586,7 @@
cmdArray[3] = "\\(\\(<java version=\".*\" class=\"java.beans.XMLDecoder\">\\)" +
"\\|\\(<string>.*/tmp/.*</string>\\)" +
"\\|\\(<string>file:.*</string>\\)" +
+ "\\|\\(<string>[0-9]\\{10\\}</string>\\)" +
"\\|\\(<string>/.*/warehouse/.*</string>\\)\\)";
cmdArray[4] = outf.getPath();
cmdArray[5] = planFile.getPath();
@@ -596,12 +597,12 @@
StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out);
StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, System.err);
-
+
outPrinter.start();
errPrinter.start();
-
+
int exitVal = executor.waitFor();
-
+
if(exitVal != 0 && overWrite) {
System.out.println("Overwriting results");
String cmdLine = "cp " + outf.getPath() + " " + planFile.getPath();
@@ -616,7 +617,7 @@
}
}
-
+
public int checkResults(String tname) throws Exception {
Path warehousePath = new Path(FileSystem.get(conf).getUri().getPath());
warehousePath = new Path(warehousePath, (new URI(testWarehouse)).getPath());
@@ -642,7 +643,7 @@
cmdArray[3] = "--exclude=.svn";
cmdArray[4] = localPath.toUri().getPath();
cmdArray[5] = (new File(outDir, tname)).getPath() + "/warehouse";
- System.out.println(cmdArray[0] + " " + cmdArray[1] + " " + cmdArray[2] + " " +
+ System.out.println(cmdArray[0] + " " + cmdArray[1] + " " + cmdArray[2] + " " +
cmdArray[3] + " " + cmdArray[4] + " " + cmdArray[5]);
}
else {
@@ -655,10 +656,10 @@
System.out.println(cmdArray1[0] + " " + cmdArray1[1] + " " + cmdArray1[2]);
Process executor = Runtime.getRuntime().exec(cmdArray1);
-
+
StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out);
StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, System.err);
-
+
outPrinter.start();
errPrinter.start();
int exitVal = executor.waitFor();
@@ -679,10 +680,10 @@
StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out);
StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, System.err);
-
+
outPrinter.start();
errPrinter.start();
-
+
int exitVal = executor.waitFor();
return exitVal;
@@ -691,7 +692,7 @@
public int checkCliDriverResults(String tname) throws Exception {
String [] cmdArray;
- cmdArray = new String[12];
+ cmdArray = new String[14];
cmdArray[0] = "diff";
cmdArray[1] = "-a";
cmdArray[2] = "-I";
@@ -702,21 +703,24 @@
cmdArray[7] = "lastAccessTime";
cmdArray[8] = "-I";
cmdArray[9] = "owner";
- cmdArray[10] = (new File(logDir, tname + ".out")).getPath();
- cmdArray[11] = (new File(outDir, tname + ".out")).getPath();
+ cmdArray[10] = "-I";
+ cmdArray[11] = "transient_lastDdlTime";
+ cmdArray[12] = (new File(logDir, tname + ".out")).getPath();
+ cmdArray[13] = (new File(outDir, tname + ".out")).getPath();
System.out.println(cmdArray[0] + " " + cmdArray[1] + " " + cmdArray[2] + " " +
cmdArray[3] + " " + cmdArray[4] + " " + cmdArray[5] + " " +
cmdArray[6] + " " + cmdArray[7] + " " + cmdArray[8] + " " +
- cmdArray[9] + " " + cmdArray[10] + " " + cmdArray[11]);
+ cmdArray[9] + " " + cmdArray[10] + " " + cmdArray[11] + " " +
+ cmdArray[12] + " " + cmdArray[13]);
Process executor = Runtime.getRuntime().exec(cmdArray);
StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out);
StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, System.err);
-
+
outPrinter.start();
errPrinter.start();
-
+
int exitVal = executor.waitFor();
if(exitVal != 0 && overWrite) {
@@ -744,13 +748,13 @@
while((ast.getToken() == null) && (ast.getChildCount() > 0)) {
ast = (ASTNode)ast.getChild(0);
}
-
+
sem.analyze(ast, ctx);
ctx.clear();
return sem.getRootTasks();
}
-
+
public TreeMap<String, String> getQMap() {
return qMap;
}
@@ -758,7 +762,7 @@
/**
* QTRunner: Runnable class for running a a single query file
- *
+ *
**/
public static class QTRunner implements Runnable {
private QTestUtil qt;
@@ -768,7 +772,7 @@
this.qt = qt;
this.fname = fname;
}
-
+
public void run() {
try {
// assumption is that environment has already been cleaned once globally
@@ -792,14 +796,14 @@
* @param mt whether to run in multithreaded mode or not
* @return true if all the query files were executed successfully, else false
*
- * In multithreaded mode each query file is run in a separate thread. the caller has to
+ * In multithreaded mode each query file is run in a separate thread. the caller has to
* arrange that different query files do not collide (in terms of destination tables)
*/
public static boolean queryListRunner(File [] qfiles, String [] resDirs, String[] logDirs, boolean mt) {
assert(qfiles.length == resDirs.length);
assert(qfiles.length == logDirs.length);
- boolean failed = false;
+ boolean failed = false;
try {
QTestUtil[] qt = new QTestUtil [qfiles.length];
@@ -836,7 +840,7 @@
}
} else {
-
+
for(int i=0; i<qfiles.length && !failed; i++) {
qt[i].cliInit(qfiles[i].getName());
qt[i].executeClient(qfiles[i].getName());
Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/input42.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/input42.q.out?rev=822976&r1=822975&r2=822976&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/input42.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/input42.q.out Thu Oct 8 00:45:54 2009
@@ -20,12 +20,12 @@
PREHOOK: type: QUERY
PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: file:/data/users/njain/hive5/hive5/build/ql/tmp/1981400750/10000
+PREHOOK: Output: file:/data/users/njain/hive3/hive3/build/ql/tmp/1758164992/10000
POSTHOOK: query: select * from srcpart a where a.ds='2008-04-08'
POSTHOOK: type: QUERY
POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: file:/data/users/njain/hive5/hive5/build/ql/tmp/1981400750/10000
+POSTHOOK: Output: file:/data/users/njain/hive3/hive3/build/ql/tmp/1758164992/10000
238 val_238 2008-04-08 11
86 val_86 2008-04-08 11
311 val_311 2008-04-08 11
@@ -1070,7 +1070,7 @@
File Output Operator
compressed: false
GlobalTableId: 0
- directory: file:/data/users/njain/hive5/hive5/build/ql/tmp/1664046852/10001
+ directory: file:/data/users/njain/hive3/hive3/build/ql/tmp/1270379747/10001
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1080,10 +1080,10 @@
columns.types string:string:string:string
Needs Tagging: false
Path -> Alias:
- file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11 [a]
- file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12 [a]
+ file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11 [a]
+ file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12 [a]
Path -> Partition:
- file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11
+ file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11
Partition
partition values:
ds 2008-04-08
@@ -1102,10 +1102,11 @@
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart
+ location file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart
+ transient_lastDdlTime 1254950161
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: srcpart
- file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12
+ file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12
Partition
partition values:
ds 2008-04-08
@@ -1124,7 +1125,8 @@
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart
+ location file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart
+ transient_lastDdlTime 1254950161
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: srcpart
@@ -1132,16 +1134,17 @@
Fetch Operator
limit: -1
+
PREHOOK: query: select * from srcpart a where a.ds='2008-04-08' and key < 200
PREHOOK: type: QUERY
PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: file:/data/users/njain/hive5/hive5/build/ql/tmp/200140260/10000
+PREHOOK: Output: file:/data/users/njain/hive3/hive3/build/ql/tmp/421979472/10000
POSTHOOK: query: select * from srcpart a where a.ds='2008-04-08' and key < 200
POSTHOOK: type: QUERY
POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: file:/data/users/njain/hive5/hive5/build/ql/tmp/200140260/10000
+POSTHOOK: Output: file:/data/users/njain/hive3/hive3/build/ql/tmp/421979472/10000
86 val_86 2008-04-08 11
27 val_27 2008-04-08 11
165 val_165 2008-04-08 11
@@ -1559,7 +1562,7 @@
File Output Operator
compressed: false
GlobalTableId: 0
- directory: file:/data/users/njain/hive5/hive5/build/ql/tmp/56577408/10001
+ directory: file:/data/users/njain/hive3/hive3/build/ql/tmp/228653838/10001
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1569,10 +1572,10 @@
columns.types string:string:string:string
Needs Tagging: false
Path -> Alias:
- file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11 [a]
- file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12 [a]
+ file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11 [a]
+ file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12 [a]
Path -> Partition:
- file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11
+ file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11
Partition
partition values:
ds 2008-04-08
@@ -1591,10 +1594,11 @@
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart
+ location file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart
+ transient_lastDdlTime 1254950161
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: srcpart
- file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12
+ file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12
Partition
partition values:
ds 2008-04-08
@@ -1613,7 +1617,8 @@
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart
+ location file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart
+ transient_lastDdlTime 1254950161
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: srcpart
@@ -1626,12 +1631,12 @@
PREHOOK: type: QUERY
PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: file:/data/users/njain/hive5/hive5/build/ql/tmp/1684443155/10000
+PREHOOK: Output: file:/data/users/njain/hive3/hive3/build/ql/tmp/521327473/10000
POSTHOOK: query: select * from srcpart a where a.ds='2008-04-08' and rand(100) < 0.1
POSTHOOK: type: QUERY
POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: file:/data/users/njain/hive5/hive5/build/ql/tmp/1684443155/10000
+POSTHOOK: Output: file:/data/users/njain/hive3/hive3/build/ql/tmp/521327473/10000
145 val_145 2008-04-08 11
417 val_417 2008-04-08 11
292 val_292 2008-04-08 11
Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/input_part9.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/input_part9.q.out?rev=822976&r1=822975&r2=822976&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/input_part9.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/input_part9.q.out Thu Oct 8 00:45:54 2009
@@ -42,7 +42,7 @@
File Output Operator
compressed: false
GlobalTableId: 0
- directory: file:/data/users/njain/hive5/hive5/build/ql/tmp/1345835275/10001
+ directory: file:/data/users/njain/hive3/hive3/build/ql/tmp/1121318754/10001
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -52,10 +52,10 @@
columns.types string:string:string:string
Needs Tagging: false
Path -> Alias:
- file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11 [x]
- file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12 [x]
+ file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11 [x]
+ file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12 [x]
Path -> Partition:
- file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11
+ file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11
Partition
partition values:
ds 2008-04-08
@@ -74,10 +74,11 @@
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart
+ location file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart
+ transient_lastDdlTime 1254950147
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: srcpart
- file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12
+ file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12
Partition
partition values:
ds 2008-04-08
@@ -96,7 +97,8 @@
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart
+ location file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcpart
+ transient_lastDdlTime 1254950147
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: srcpart
@@ -104,16 +106,17 @@
Fetch Operator
limit: -1
+
PREHOOK: query: SELECT x.* FROM SRCPART x WHERE key IS NOT NULL AND ds = '2008-04-08'
PREHOOK: type: QUERY
PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: file:/data/users/njain/hive5/hive5/build/ql/tmp/1790177646/10000
+PREHOOK: Output: file:/data/users/njain/hive3/hive3/build/ql/tmp/766346895/10000
POSTHOOK: query: SELECT x.* FROM SRCPART x WHERE key IS NOT NULL AND ds = '2008-04-08'
POSTHOOK: type: QUERY
POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: file:/data/users/njain/hive5/hive5/build/ql/tmp/1790177646/10000
+POSTHOOK: Output: file:/data/users/njain/hive3/hive3/build/ql/tmp/766346895/10000
238 val_238 2008-04-08 11
86 val_86 2008-04-08 11
311 val_311 2008-04-08 11
Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/sample2.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/sample2.q.out?rev=822976&r1=822975&r2=822976&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/sample2.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/sample2.q.out Thu Oct 8 00:45:54 2009
@@ -50,7 +50,7 @@
File Output Operator
compressed: false
GlobalTableId: 1
- directory: file:/data/users/njain/hive5/hive5/build/ql/tmp/650204509/10002
+ directory: file:/data/users/njain/hive3/hive3/build/ql/tmp/897548542/10002
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -64,14 +64,15 @@
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/dest1
+ location file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/dest1
+ transient_lastDdlTime 1254944305
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: dest1
Needs Tagging: false
Path -> Alias:
- file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcbucket/srcbucket0.txt [s]
+ file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcbucket/srcbucket0.txt [s]
Path -> Partition:
- file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcbucket/srcbucket0.txt
+ file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcbucket/srcbucket0.txt
Partition
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -87,7 +88,8 @@
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcbucket
+ location file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcbucket
+ transient_lastDdlTime 1254944304
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: srcbucket
@@ -97,11 +99,11 @@
Move Operator
files:
hdfs directory: true
- source: file:/data/users/njain/hive5/hive5/build/ql/tmp/650204509/10002
- destination: file:/data/users/njain/hive5/hive5/build/ql/tmp/462151984/10000
+ source: file:/data/users/njain/hive3/hive3/build/ql/tmp/897548542/10002
+ destination: file:/data/users/njain/hive3/hive3/build/ql/tmp/1726881559/10000
Map Reduce
Alias -> Map Operator Tree:
- file:/data/users/njain/hive5/hive5/build/ql/tmp/650204509/10002
+ file:/data/users/njain/hive3/hive3/build/ql/tmp/897548542/10002
Reduce Output Operator
sort order:
Map-reduce partition columns:
@@ -115,9 +117,9 @@
type: string
Needs Tagging: false
Path -> Alias:
- file:/data/users/njain/hive5/hive5/build/ql/tmp/650204509/10002 [file:/data/users/njain/hive5/hive5/build/ql/tmp/650204509/10002]
+ file:/data/users/njain/hive3/hive3/build/ql/tmp/897548542/10002 [file:/data/users/njain/hive3/hive3/build/ql/tmp/897548542/10002]
Path -> Partition:
- file:/data/users/njain/hive5/hive5/build/ql/tmp/650204509/10002
+ file:/data/users/njain/hive3/hive3/build/ql/tmp/897548542/10002
Partition
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -132,7 +134,8 @@
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/dest1
+ location file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/dest1
+ transient_lastDdlTime 1254944305
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: dest1
Reduce Operator Tree:
@@ -140,7 +143,7 @@
File Output Operator
compressed: false
GlobalTableId: 0
- directory: file:/data/users/njain/hive5/hive5/build/ql/tmp/462151984/10000
+ directory: file:/data/users/njain/hive3/hive3/build/ql/tmp/1726881559/10000
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -153,8 +156,9 @@
bucket_count -1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
file.inputformat org.apache.hadoop.mapred.TextInputFormat
- location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/dest1
+ location file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/dest1
file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ transient_lastDdlTime 1254944305
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: dest1
@@ -162,7 +166,7 @@
Move Operator
tables:
replace: true
- source: file:/data/users/njain/hive5/hive5/build/ql/tmp/462151984/10000
+ source: file:/data/users/njain/hive3/hive3/build/ql/tmp/1726881559/10000
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -176,10 +180,11 @@
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/dest1
+ location file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/dest1
+ transient_lastDdlTime 1254944305
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: dest1
- tmp directory: file:/data/users/njain/hive5/hive5/build/ql/tmp/462151984/10001
+ tmp directory: file:/data/users/njain/hive3/hive3/build/ql/tmp/1726881559/10001
PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.*
@@ -195,11 +200,11 @@
PREHOOK: query: SELECT dest1.* FROM dest1
PREHOOK: type: QUERY
PREHOOK: Input: default@dest1
-PREHOOK: Output: file:/data/users/njain/hive5/hive5/build/ql/tmp/2048324970/10000
+PREHOOK: Output: file:/data/users/njain/hive3/hive3/build/ql/tmp/917862347/10000
POSTHOOK: query: SELECT dest1.* FROM dest1
POSTHOOK: type: QUERY
POSTHOOK: Input: default@dest1
-POSTHOOK: Output: file:/data/users/njain/hive5/hive5/build/ql/tmp/2048324970/10000
+POSTHOOK: Output: file:/data/users/njain/hive3/hive3/build/ql/tmp/917862347/10000
474 val_475
62 val_63
468 val_469
Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/sample4.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/sample4.q.out?rev=822976&r1=822975&r2=822976&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/sample4.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/sample4.q.out Thu Oct 8 00:45:54 2009
@@ -50,7 +50,7 @@
File Output Operator
compressed: false
GlobalTableId: 1
- directory: file:/data/users/njain/hive5/hive5/build/ql/tmp/1574872160/10002
+ directory: file:/data/users/njain/hive3/hive3/build/ql/tmp/46051233/10002
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -64,14 +64,15 @@
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/dest1
+ location file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/dest1
+ transient_lastDdlTime 1254944312
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: dest1
Needs Tagging: false
Path -> Alias:
- file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcbucket/srcbucket0.txt [s]
+ file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcbucket/srcbucket0.txt [s]
Path -> Partition:
- file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcbucket/srcbucket0.txt
+ file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcbucket/srcbucket0.txt
Partition
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -87,7 +88,8 @@
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcbucket
+ location file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/srcbucket
+ transient_lastDdlTime 1254944311
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: srcbucket
@@ -97,11 +99,11 @@
Move Operator
files:
hdfs directory: true
- source: file:/data/users/njain/hive5/hive5/build/ql/tmp/1574872160/10002
- destination: file:/data/users/njain/hive5/hive5/build/ql/tmp/63857533/10000
+ source: file:/data/users/njain/hive3/hive3/build/ql/tmp/46051233/10002
+ destination: file:/data/users/njain/hive3/hive3/build/ql/tmp/236890045/10000
Map Reduce
Alias -> Map Operator Tree:
- file:/data/users/njain/hive5/hive5/build/ql/tmp/1574872160/10002
+ file:/data/users/njain/hive3/hive3/build/ql/tmp/46051233/10002
Reduce Output Operator
sort order:
Map-reduce partition columns:
@@ -115,9 +117,9 @@
type: string
Needs Tagging: false
Path -> Alias:
- file:/data/users/njain/hive5/hive5/build/ql/tmp/1574872160/10002 [file:/data/users/njain/hive5/hive5/build/ql/tmp/1574872160/10002]
+ file:/data/users/njain/hive3/hive3/build/ql/tmp/46051233/10002 [file:/data/users/njain/hive3/hive3/build/ql/tmp/46051233/10002]
Path -> Partition:
- file:/data/users/njain/hive5/hive5/build/ql/tmp/1574872160/10002
+ file:/data/users/njain/hive3/hive3/build/ql/tmp/46051233/10002
Partition
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -132,7 +134,8 @@
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/dest1
+ location file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/dest1
+ transient_lastDdlTime 1254944312
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: dest1
Reduce Operator Tree:
@@ -140,7 +143,7 @@
File Output Operator
compressed: false
GlobalTableId: 0
- directory: file:/data/users/njain/hive5/hive5/build/ql/tmp/63857533/10000
+ directory: file:/data/users/njain/hive3/hive3/build/ql/tmp/236890045/10000
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -153,8 +156,9 @@
bucket_count -1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
file.inputformat org.apache.hadoop.mapred.TextInputFormat
- location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/dest1
+ location file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/dest1
file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ transient_lastDdlTime 1254944312
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: dest1
@@ -162,7 +166,7 @@
Move Operator
tables:
replace: true
- source: file:/data/users/njain/hive5/hive5/build/ql/tmp/63857533/10000
+ source: file:/data/users/njain/hive3/hive3/build/ql/tmp/236890045/10000
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -176,10 +180,11 @@
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/dest1
+ location file:/data/users/njain/hive3/hive3/build/ql/test/data/warehouse/dest1
+ transient_lastDdlTime 1254944312
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: dest1
- tmp directory: file:/data/users/njain/hive5/hive5/build/ql/tmp/63857533/10001
+ tmp directory: file:/data/users/njain/hive3/hive3/build/ql/tmp/236890045/10001
PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.*
@@ -195,11 +200,11 @@
PREHOOK: query: SELECT dest1.* FROM dest1
PREHOOK: type: QUERY
PREHOOK: Input: default@dest1
-PREHOOK: Output: file:/data/users/njain/hive5/hive5/build/ql/tmp/77872909/10000
+PREHOOK: Output: file:/data/users/njain/hive3/hive3/build/ql/tmp/693740466/10000
POSTHOOK: query: SELECT dest1.* FROM dest1
POSTHOOK: type: QUERY
POSTHOOK: Input: default@dest1
-POSTHOOK: Output: file:/data/users/njain/hive5/hive5/build/ql/tmp/77872909/10000
+POSTHOOK: Output: file:/data/users/njain/hive3/hive3/build/ql/tmp/693740466/10000
474 val_475
62 val_63
468 val_469