You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by he...@apache.org on 2010/02/11 03:33:56 UTC
svn commit: r908809 [1/7] - in /hadoop/hive/trunk: ./
contrib/src/test/results/clientnegative/
metastore/src/java/org/apache/hadoop/hive/metastore/
ql/src/java/org/apache/hadoop/hive/ql/
ql/src/java/org/apache/hadoop/hive/ql/exec/ ql/src/java/org/apach...
Author: heyongqiang
Date: Thu Feb 11 02:33:54 2010
New Revision: 908809
URL: http://svn.apache.org/viewvc?rev=908809&view=rev
Log:
HIVE-1122. Make ql/metadata/Table and Partition serializable (Zheng Shao via He Yongqiang)
Modified:
hadoop/hive/trunk/CHANGES.txt
hadoop/hive/trunk/contrib/src/test/results/clientnegative/serde_regex.q.out
hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java
hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java
hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestPartition.java
hadoop/hive/trunk/ql/src/test/results/clientpositive/partition_wise_fileformat.q.out
hadoop/hive/trunk/ql/src/test/results/compiler/plan/case_sensitivity.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/cast1.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby1.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby2.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby3.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby4.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby5.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby6.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input1.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input2.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input20.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input3.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input4.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input5.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input6.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input7.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input8.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input9.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input_part1.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input_testsequencefile.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input_testxpath.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/input_testxpath2.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/join1.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/join2.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/join3.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/join4.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/join5.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/join6.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/join7.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/join8.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample1.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample2.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample3.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample4.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample5.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample6.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample7.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/subq.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf1.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf4.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf6.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf_case.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf_when.q.xml
hadoop/hive/trunk/ql/src/test/results/compiler/plan/union.q.xml
Modified: hadoop/hive/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/CHANGES.txt?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/CHANGES.txt (original)
+++ hadoop/hive/trunk/CHANGES.txt Thu Feb 11 02:33:54 2010
@@ -69,6 +69,9 @@
HIVE-1150. Add comment to explain why we check for dir first in add_partitions().
(Paul Yang via zshao)
+ HIVE-1122. Make ql/metadata/Table and Partition serializable
+ (Zheng Shao via He Yongqiang)
+
OPTIMIZATIONS
BUG FIXES
Modified: hadoop/hive/trunk/contrib/src/test/results/clientnegative/serde_regex.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/contrib/src/test/results/clientnegative/serde_regex.q.out?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/contrib/src/test/results/clientnegative/serde_regex.q.out (original)
+++ hadoop/hive/trunk/contrib/src/test/results/clientnegative/serde_regex.q.out Thu Feb 11 02:33:54 2010
@@ -78,5 +78,5 @@
)
STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
-FAILED: Error in metadata: org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.contrib.serde2.RegexSerDe only accepts string columns, but column[5] named status has type int)
+FAILED: Error in metadata: java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.contrib.serde2.RegexSerDe only accepts string columns, but column[5] named status has type int)
FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask
Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Thu Feb 11 02:33:54 2010
@@ -625,16 +625,18 @@
schema.setProperty(
org.apache.hadoop.hive.metastore.api.Constants.BUCKET_COUNT, Integer
.toString(sd.getNumBuckets()));
- if (sd.getBucketCols().size() > 0) {
+ if (sd.getBucketCols() != null && sd.getBucketCols().size() > 0) {
schema.setProperty(
org.apache.hadoop.hive.metastore.api.Constants.BUCKET_FIELD_NAME, sd
.getBucketCols().get(0));
}
- schema.putAll(sd.getSerdeInfo().getParameters());
- if (sd.getSerdeInfo().getSerializationLib() != null) {
- schema.setProperty(
- org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB, sd
- .getSerdeInfo().getSerializationLib());
+ if (sd.getSerdeInfo() != null) {
+ schema.putAll(sd.getSerdeInfo().getParameters());
+ if (sd.getSerdeInfo().getSerializationLib() != null) {
+ schema.setProperty(
+ org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB, sd
+ .getSerdeInfo().getSerializationLib());
+ }
}
StringBuilder colNameBuf = new StringBuilder();
StringBuilder colTypeBuf = new StringBuilder();
@@ -656,10 +658,12 @@
schema.setProperty(
org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMN_TYPES,
colTypes);
- schema.setProperty(
- org.apache.hadoop.hive.serde.Constants.SERIALIZATION_DDL,
- getDDLFromFieldSchema(tableName, sd.getCols()));
-
+ if (sd.getCols() != null) {
+ schema.setProperty(
+ org.apache.hadoop.hive.serde.Constants.SERIALIZATION_DDL,
+ getDDLFromFieldSchema(tableName, sd.getCols()));
+ }
+
String partString = "";
String partStringSep = "";
for (FieldSchema partKey : partitionKeys) {
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java Thu Feb 11 02:33:54 2010
@@ -64,7 +64,14 @@
private ArrayList<Task<? extends Serializable>> rootTasks;
private FetchTask fetchTask;
+
private HashSet<ReadEntity> inputs;
+ /**
+ * Note: outputs are not all determined at compile time.
+ * Some of the tasks can change the outputs at run time, because only at run
+ * time, we know what are the changes. These tasks should keep a reference
+ * to the outputs here.
+ */
private HashSet<WriteEntity> outputs;
private HashMap<String, String> idToTableNameMap;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Thu Feb 11 02:33:54 2010
@@ -381,7 +381,7 @@
}
parts = db.getPartitionNames(MetaStoreUtils.DEFAULT_DATABASE_NAME, tbl
- .getName(), Short.MAX_VALUE);
+ .getTableName(), Short.MAX_VALUE);
// write the results in the file
try {
@@ -608,15 +608,15 @@
while (iterTables.hasNext()) {
// create a row per table name
Table tbl = iterTables.next();
- String tableName = tbl.getName();
+ String tableName = tbl.getTableName();
String tblLoc = null;
String inputFormattCls = null;
String outputFormattCls = null;
if (part != null) {
if (par != null) {
tblLoc = par.getDataLocation().toString();
- inputFormattCls = par.getTPartition().getSd().getInputFormat();
- outputFormattCls = par.getTPartition().getSd().getOutputFormat();
+ inputFormattCls = par.getInputFormatClass().getName();
+ outputFormattCls = par.getOutputFormatClass().getName();
}
} else {
tblLoc = tbl.getDataLocation().toString();
@@ -734,13 +734,13 @@
try {
- LOG.info("DDLTask: got data for " + tbl.getName());
+ LOG.info("DDLTask: got data for " + tbl.getTableName());
List<FieldSchema> cols = null;
if (colPath.equals(tableName)) {
cols = tbl.getCols();
if (part != null) {
- cols = part.getTPartition().getSd().getCols();
+ cols = part.getCols();
}
} else {
cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer());
@@ -798,7 +798,7 @@
}
}
- LOG.info("DDLTask: written data for " + tbl.getName());
+ LOG.info("DDLTask: written data for " + tbl.getTableName());
((FSDataOutputStream) outStream).close();
} catch (FileNotFoundException e) {
@@ -958,7 +958,7 @@
Table oldTbl = tbl.copy();
if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) {
- tbl.getTTable().setTableName(alterTbl.getNewName());
+ tbl.setTableName(alterTbl.getNewName());
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCOLS) {
List<FieldSchema> newCols = alterTbl.getNewCols();
List<FieldSchema> oldCols = tbl.getCols();
@@ -1080,10 +1080,7 @@
tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll(
alterTbl.getProps());
}
- // since serde is modified then do the appropriate things to reset columns
- // etc
- tbl.reinitSerDe();
- tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getName(), tbl
+ tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), tbl
.getDeserializer()));
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDFILEFORMAT) {
tbl.getTTable().getSd().setInputFormat(alterTbl.getInputFormat());
@@ -1266,18 +1263,11 @@
private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException {
// create the table
Table tbl = new Table(crtTbl.getTableName());
- StorageDescriptor tblStorDesc = tbl.getTTable().getSd();
- if (crtTbl.getBucketCols() != null) {
- tblStorDesc.setBucketCols(crtTbl.getBucketCols());
- }
- if (crtTbl.getSortCols() != null) {
- tbl.setSortCols(crtTbl.getSortCols());
- }
if (crtTbl.getPartCols() != null) {
tbl.setPartCols(crtTbl.getPartCols());
}
if (crtTbl.getNumBuckets() != -1) {
- tblStorDesc.setNumBuckets(crtTbl.getNumBuckets());
+ tbl.setNumBuckets(crtTbl.getNumBuckets());
}
if (crtTbl.getSerName() != null) {
@@ -1321,19 +1311,26 @@
*/
if (crtTbl.getSerName() == null) {
LOG.info("Default to LazySimpleSerDe for table " + crtTbl.getTableName());
- tbl
- .setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class
- .getName());
+ tbl.setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
} else {
// let's validate that the serde exists
validateSerDe(crtTbl.getSerName());
}
+ if (crtTbl.getCols() != null) {
+ tbl.setFields(crtTbl.getCols());
+ }
+ if (crtTbl.getBucketCols() != null) {
+ tbl.setBucketCols(crtTbl.getBucketCols());
+ }
+ if (crtTbl.getSortCols() != null) {
+ tbl.setSortCols(crtTbl.getSortCols());
+ }
if (crtTbl.getComment() != null) {
tbl.setProperty("comment", crtTbl.getComment());
}
if (crtTbl.getLocation() != null) {
- tblStorDesc.setLocation(crtTbl.getLocation());
+ tbl.setDataLocation(new Path(crtTbl.getLocation()).toUri());
}
tbl.setInputFormatClass(crtTbl.getInputFormat());
@@ -1382,10 +1379,6 @@
return rc;
}
- if (crtTbl.getCols() != null) {
- tbl.setFields(crtTbl.getCols());
- }
-
// create the table
db.createTable(tbl, crtTbl.getIfNotExists());
work.getOutputs().add(new WriteEntity(tbl));
@@ -1407,9 +1400,8 @@
// Get the existing table
Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, crtTbl
.getLikeTableName());
- StorageDescriptor tblStorDesc = tbl.getTTable().getSd();
- tbl.getTTable().setTableName(crtTbl.getTableName());
+ tbl.setTableName(crtTbl.getTableName());
if (crtTbl.isExternal()) {
tbl.setProperty("EXTERNAL", "TRUE");
@@ -1418,10 +1410,9 @@
}
if (crtTbl.getLocation() != null) {
- tblStorDesc.setLocation(crtTbl.getLocation());
+ tbl.setDataLocation(new Path(crtTbl.getLocation()).toUri());
} else {
- tblStorDesc.setLocation(null);
- tblStorDesc.unsetLocation();
+ tbl.unsetDataLocation();
}
// create the table
@@ -1445,7 +1436,7 @@
Table tbl = new Table(crtView.getViewName());
tbl.setTableType(TableType.VIRTUAL_VIEW);
tbl.setSerializationLib(null);
- tbl.getTTable().getSd().getSerdeInfo().getParameters().clear();
+ tbl.clearSerDeInfo();
tbl.setViewOriginalText(crtView.getViewOriginalText());
tbl.setViewExpandedText(crtView.getViewExpandedText());
tbl.setFields(crtView.getSchema());
@@ -1472,7 +1463,7 @@
return 1;
}
// set create time
- tbl.getTTable().setCreateTime((int) (System.currentTimeMillis() / 1000));
+ tbl.setCreateTime((int) (System.currentTimeMillis() / 1000));
return 0;
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java Thu Feb 11 02:33:54 2010
@@ -81,9 +81,9 @@
*/
public Map<String, String> getParameters() {
if (p != null) {
- return p.getTPartition().getParameters();
+ return p.getParameters();
} else {
- return t.getTTable().getParameters();
+ return t.getParameters();
}
}
@@ -118,10 +118,10 @@
@Override
public String toString() {
if (p != null) {
- return p.getTable().getDbName() + "@" + p.getTable().getName() + "@"
+ return p.getTable().getDbName() + "@" + p.getTable().getTableName() + "@"
+ p.getName();
} else {
- return t.getDbName() + "@" + t.getName();
+ return t.getDbName() + "@" + t.getTableName();
}
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java Thu Feb 11 02:33:54 2010
@@ -148,9 +148,9 @@
public String toString() {
switch (typ) {
case TABLE:
- return t.getDbName() + "@" + t.getName();
+ return t.getDbName() + "@" + t.getTableName();
case PARTITION:
- return t.getDbName() + "@" + t.getName() + "@" + p.getName();
+ return t.getDbName() + "@" + t.getTableName() + "@" + p.getName();
default:
return d;
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Thu Feb 11 02:33:54 2010
@@ -37,6 +37,7 @@
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -44,6 +45,7 @@
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
import org.apache.hadoop.hive.serde2.Deserializer;
import org.apache.hadoop.hive.serde2.SerDeException;
@@ -310,9 +312,8 @@
*/
public void createTable(Table tbl, boolean ifNotExists) throws HiveException {
try {
- tbl.initSerDe();
if (tbl.getCols().size() == 0) {
- tbl.setFields(MetaStoreUtils.getFieldsFromDeserializer(tbl.getName(),
+ tbl.setFields(MetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(),
tbl.getDeserializer()));
}
tbl.checkValidity();
@@ -404,7 +405,8 @@
if (tableName == null || tableName.equals("")) {
throw new HiveException("empty table creation??");
}
- Table table = new Table();
+
+ // Get the table from metastore
org.apache.hadoop.hive.metastore.api.Table tTable = null;
try {
tTable = getMSC().getTable(dbName, tableName);
@@ -417,18 +419,27 @@
} catch (Exception e) {
throw new HiveException("Unable to fetch table " + tableName, e);
}
- // just a sanity check
- assert (tTable != null);
- try {
-
+
+ // For non-views, we need to do some extra fixes
+ if (!TableType.VIRTUAL_VIEW.toString().equals(tTable.getTableType())) {
+ // Fix the non-printable chars
+ Map<String, String> parameters = tTable.getSd().getParameters();
+ String sf = parameters.get(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT);
+ if (sf != null) {
+ char[] b = sf.toCharArray();
+ if ((b.length == 1) && (b[0] < 10)) { // ^A, ^B, ^C, ^D, \t
+ parameters.put(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT,
+ Integer.toString(b[0]));
+ }
+ }
+
// Use LazySimpleSerDe for MetadataTypedColumnsetSerDe.
// NOTE: LazySimpleSerDe does not support tables with a single column of
// col
// of type "array<string>". This happens when the table is created using
// an
// earlier version of Hive.
- if (
- org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe.class
+ if (org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe.class
.getName().equals(
tTable.getSd().getSerdeInfo().getSerializationLib())
&& tTable.getSd().getColsSize() > 0
@@ -436,47 +447,10 @@
tTable.getSd().getSerdeInfo().setSerializationLib(
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
}
-
- // first get a schema (in key / vals)
- Properties p = MetaStoreUtils.getSchema(tTable);
- table.setSchema(p);
- table.setTTable(tTable);
-
- if (table.isView()) {
- // Skip the rest, which isn't relevant for a view.
- table.checkValidity();
- return table;
- }
-
- table
- .setInputFormatClass((Class<? extends InputFormat<WritableComparable, Writable>>) Class
- .forName(
- table
- .getSchema()
- .getProperty(
- org.apache.hadoop.hive.metastore.api.Constants.FILE_INPUT_FORMAT,
- org.apache.hadoop.mapred.SequenceFileInputFormat.class
- .getName()), true, JavaUtils.getClassLoader()));
- table.setOutputFormatClass(Class.forName(table.getSchema().getProperty(
- org.apache.hadoop.hive.metastore.api.Constants.FILE_OUTPUT_FORMAT,
- HiveSequenceFileOutputFormat.class.getName()), true, JavaUtils
- .getClassLoader()));
- table.setDeserializer(MetaStoreUtils.getDeserializer(getConf(), p));
- table.setDataLocation(new URI(tTable.getSd().getLocation()));
- } catch (Exception e) {
- LOG.error(StringUtils.stringifyException(e));
- throw new HiveException(e);
- }
- String sf = table
- .getSerdeParam(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT);
- if (sf != null) {
- char[] b = sf.toCharArray();
- if ((b.length == 1) && (b[0] < 10)) { // ^A, ^B, ^C, ^D, \t
- table.setSerdeParam(
- org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT,
- Integer.toString(b[0]));
- }
}
+
+ Table table = new Table(tTable);
+
table.checkValidity();
return table;
}
@@ -722,11 +696,11 @@
}
org.apache.hadoop.hive.metastore.api.Partition tpart = null;
try {
- tpart = getMSC().getPartition(tbl.getDbName(), tbl.getName(), pvals);
+ tpart = getMSC().getPartition(tbl.getDbName(), tbl.getTableName(), pvals);
if (tpart == null && forceCreate) {
- LOG.debug("creating partition for table " + tbl.getName()
+ LOG.debug("creating partition for table " + tbl.getTableName()
+ " with partition spec : " + partSpec);
- tpart = getMSC().appendPartition(tbl.getDbName(), tbl.getName(), pvals);
+ tpart = getMSC().appendPartition(tbl.getDbName(), tbl.getTableName(), pvals);
;
}
if (tpart == null) {
@@ -774,7 +748,7 @@
if (tbl.isPartitioned()) {
List<org.apache.hadoop.hive.metastore.api.Partition> tParts;
try {
- tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getName(),
+ tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getTableName(),
(short) -1);
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
@@ -786,12 +760,7 @@
}
return parts;
} else {
- // create an empty partition.
- // HACK, HACK. SemanticAnalyzer code requires that an empty partition when
- // the table is not partitioned
- org.apache.hadoop.hive.metastore.api.Partition tPart = new org.apache.hadoop.hive.metastore.api.Partition();
- tPart.setSd(tbl.getTTable().getSd()); // TODO: get a copy
- Partition part = new Partition(tbl, tPart);
+ Partition part = new Partition(tbl);
ArrayList<Partition> parts = new ArrayList<Partition>(1);
parts.add(part);
return parts;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java Thu Feb 11 02:33:54 2010
@@ -225,7 +225,7 @@
Path tablePath = table.getPath();
FileSystem fs = tablePath.getFileSystem(conf);
if (!fs.exists(tablePath)) {
- result.getTablesNotOnFs().add(table.getName());
+ result.getTablesNotOnFs().add(table.getTableName());
return;
}
@@ -242,7 +242,7 @@
if (!fs.exists(partPath)) {
PartitionResult pr = new PartitionResult();
pr.setPartitionName(partition.getName());
- pr.setTableName(partition.getTable().getName());
+ pr.setTableName(partition.getTable().getTableName());
result.getPartitionsNotOnFs().add(pr);
}
@@ -292,7 +292,7 @@
if (partitionName != null) {
PartitionResult pr = new PartitionResult();
pr.setPartitionName(partitionName);
- pr.setTableName(table.getName());
+ pr.setTableName(table.getTableName());
result.getPartitionsNotInMs().add(pr);
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java Thu Feb 11 02:33:54 2010
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.ql.metadata;
+import java.io.Serializable;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
@@ -25,8 +26,6 @@
import java.util.List;
import java.util.Map;
import java.util.Properties;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -38,7 +37,9 @@
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Order;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
@@ -49,9 +50,13 @@
import org.apache.thrift.transport.TMemoryBuffer;
/**
- * A Hive Table Partition: is a fundamental storage unit within a Table
+ * A Hive Table Partition: is a fundamental storage unit within a Table.
+ *
+ * Please note that the ql code should always go through methods of this class to access the
+ * metadata, instead of directly accessing org.apache.hadoop.hive.metastore.api.Partition.
+ * This helps to isolate the metastore code and the ql code.
*/
-public class Partition {
+public class Partition implements Serializable {
@SuppressWarnings("nls")
static final private Log LOG = LogFactory
@@ -60,20 +65,14 @@
private Table table;
private org.apache.hadoop.hive.metastore.api.Partition tPartition;
- private Deserializer deserializer;
- private Properties schema;
- private Class<? extends InputFormat> inputFormatClass;
- private Class<? extends HiveOutputFormat> outputFormatClass;
-
/**
- * @return the tPartition
+ * These fields are cached. The information comes from tPartition.
*/
- public org.apache.hadoop.hive.metastore.api.Partition getTPartition() {
- return tPartition;
- }
-
- private LinkedHashMap<String, String> spec;
-
+ private Deserializer deserializer;
+ private Class<? extends HiveOutputFormat> outputFormatClass;
+ private Class<? extends InputFormat> inputFormatClass;
+ private URI uri;
+
/**
* @return The values of the partition
* @see org.apache.hadoop.hive.metastore.api.Partition#getValues()
@@ -82,8 +81,16 @@
return tPartition.getValues();
}
- private Path partPath;
- private URI partURI;
+ /**
+ * create an empty partition.
+ * SemanticAnalyzer code requires that an empty partition when the table is not partitioned.
+ */
+ public Partition(Table tbl) throws HiveException {
+ org.apache.hadoop.hive.metastore.api.Partition tPart =
+ new org.apache.hadoop.hive.metastore.api.Partition();
+ tPart.setSd(tbl.getTTable().getSd()); // TODO: get a copy
+ initialize(tbl, tPart);
+ }
public Partition(Table tbl, org.apache.hadoop.hive.metastore.api.Partition tp)
throws HiveException {
@@ -117,7 +124,7 @@
org.apache.hadoop.hive.metastore.api.Partition tpart = new org.apache.hadoop.hive.metastore.api.Partition();
tpart.setDbName(tbl.getDbName());
- tpart.setTableName(tbl.getName());
+ tpart.setTableName(tbl.getTableName());
tpart.setValues(pvals);
StorageDescriptor sd = new StorageDescriptor();
@@ -146,103 +153,85 @@
/**
* Initializes this object with the given variables
*
- * @param tbl
+ * @param table
* Table the partition belongs to
- * @param tp
+ * @param tPartition
* Thrift Partition object
* @throws HiveException
* Thrown if we cannot initialize the partition
*/
- private void initialize(Table tbl,
- org.apache.hadoop.hive.metastore.api.Partition tp) throws HiveException {
+ private void initialize(Table table,
+ org.apache.hadoop.hive.metastore.api.Partition tPartition) throws HiveException {
- table = tbl;
- tPartition = tp;
- partName = "";
+ this.table = table;
+ this.tPartition = tPartition;
+ String partName = "";
- if (tbl.isPartitioned()) {
+ if (table.isPartitioned()) {
try {
- partName = Warehouse.makePartName(tbl.getPartCols(), tp.getValues());
- if (tp.getSd().getLocation() == null) {
+ partName = Warehouse.makePartName(table.getPartCols(), tPartition.getValues());
+ if (tPartition.getSd().getLocation() == null) {
// set default if location is not set
- partPath = new Path(tbl.getDataLocation().toString(), partName);
- tp.getSd().setLocation(partPath.toString());
- } else {
- partPath = new Path(tp.getSd().getLocation());
+ Path partPath = new Path(table.getDataLocation().toString(), partName);
+ tPartition.getSd().setLocation(partPath.toString());
}
} catch (MetaException e) {
- throw new HiveException("Invalid partition for table " + tbl.getName(),
+ throw new HiveException("Invalid partition for table " + table.getTableName(),
e);
}
- } else {
- // We are in the HACK territory.
- // SemanticAnalyzer expects a single partition whose schema
- // is same as the table partition.
- partPath = table.getPath();
}
- spec = tbl.createSpec(tp);
- partURI = partPath.toUri();
- }
+ // This will set up field: inputFormatClass
+ getInputFormatClass();
+ // This will set up field: outputFormatClass
+ getOutputFormatClass();
- public String getName() {
- return partName;
}
- public Table getTable() {
- return table;
+ public String getName() {
+ try {
+ return Warehouse.makePartName(table.getPartCols(), tPartition.getValues());
+ } catch (MetaException e) {
+ throw new RuntimeException(e);
+ }
}
public Path[] getPath() {
- Path[] ret = new Path[1];
- ret[0] = partPath;
- return (ret);
+ Path[] ret = new Path[]{getPartitionPath()};
+ return ret;
}
public Path getPartitionPath() {
- return partPath;
+ if (table.isPartitioned()) {
+ return new Path(tPartition.getSd().getLocation());
+ } else {
+ return new Path(table.getTTable().getSd().getLocation());
+ }
}
final public URI getDataLocation() {
- return partURI;
+ if (uri == null) {
+ uri = getPartitionPath().toUri();
+ }
+ return uri;
}
final public Deserializer getDeserializer() {
if (deserializer == null) {
try {
- initSerDe();
+ deserializer = MetaStoreUtils.getDeserializer(Hive.get().getConf(),
+ tPartition, table.getTTable());
} catch (HiveException e) {
- LOG.error("Error in initializing serde.", e);
+ throw new RuntimeException(e);
+ } catch (MetaException e) {
+ throw new RuntimeException(e);
}
}
return deserializer;
}
- /**
- * @param schema
- * the schema to set
- */
- public void setSchema(Properties schema) {
- this.schema = schema;
- }
-
public Properties getSchema() {
- if (schema == null) {
- schema = MetaStoreUtils
- .getSchema(getTPartition(), getTable().getTTable());
- }
- return schema;
- }
-
- protected void initSerDe() throws HiveException {
- if (deserializer == null) {
- try {
- deserializer = MetaStoreUtils.getDeserializer(Hive.get().getConf(),
- getTPartition(), getTable().getTTable());
- } catch (MetaException e) {
- throw new HiveException(e);
- }
- }
+ return MetaStoreUtils.getSchema(tPartition, table.getTTable());
}
/**
@@ -256,9 +245,10 @@
/**
* @param class1
*/
- public void setOutputFormatClass(Class<?> class1) {
- outputFormatClass = HiveFileFormatUtils.getOutputFormatSubstitute(class1);
- tPartition.getSd().setOutputFormat(class1.getName());
+ public void setOutputFormatClass(Class<? extends HiveOutputFormat> outputFormatClass) {
+ this.outputFormatClass = outputFormatClass;
+ tPartition.getSd().setOutputFormat(HiveFileFormatUtils
+ .getOutputFormatSubstitute(outputFormatClass).toString());
}
final public Class<? extends InputFormat> getInputFormatClass()
@@ -268,13 +258,12 @@
org.apache.hadoop.hive.metastore.api.Constants.FILE_INPUT_FORMAT,
org.apache.hadoop.mapred.SequenceFileInputFormat.class.getName());
try {
- setInputFormatClass((Class<? extends InputFormat>) Class.forName(
- clsName, true, JavaUtils.getClassLoader()));
+ inputFormatClass = ((Class<? extends InputFormat>) Class.forName(clsName, true,
+ JavaUtils.getClassLoader()));
} catch (ClassNotFoundException e) {
throw new HiveException("Class not found: " + clsName, e);
}
}
-
return inputFormatClass;
}
@@ -285,8 +274,14 @@
org.apache.hadoop.hive.metastore.api.Constants.FILE_OUTPUT_FORMAT,
HiveSequenceFileOutputFormat.class.getName());
try {
- setOutputFormatClass(Class.forName(clsName, true, JavaUtils
- .getClassLoader()));
+ Class<?> c = (Class<? extends HiveOutputFormat>)(Class.forName(clsName, true,
+ JavaUtils.getClassLoader()));
+ // Replace FileOutputFormat for backward compatibility
+ if (!HiveOutputFormat.class.isAssignableFrom(c)) {
+ outputFormatClass = HiveFileFormatUtils.getOutputFormatSubstitute(c);
+ } else {
+ outputFormatClass = (Class<? extends HiveOutputFormat>)c;
+ }
} catch (ClassNotFoundException e) {
throw new HiveException("Class not found: " + clsName, e);
}
@@ -320,6 +315,14 @@
return tPartition.getSd().getBucketCols();
}
+ public List<Order> getSortCols() {
+ return tPartition.getSd().getSortCols();
+ }
+
+ public List<String> getSortColNames() {
+ return Utilities.getColumnNamesFromSortCols(getSortCols());
+ }
+
/**
* mapping from bucket number to bucket path
*/
@@ -329,7 +332,7 @@
try {
FileSystem fs = FileSystem.get(table.getDataLocation(), Hive.get()
.getConf());
- String pathPattern = partPath.toString();
+ String pathPattern = getPartitionPath().toString();
if (getBucketCount() > 0) {
pathPattern = pathPattern + "/*";
}
@@ -349,29 +352,6 @@
}
}
- /**
- * mapping from a Path to the bucket number if any
- */
- private static Pattern bpattern = Pattern
- .compile("part-([0-9][0-9][0-9][0-9][0-9])");
-
- private String partName;
-
- @SuppressWarnings("nls")
- public static int getBucketNum(Path p) {
- Matcher m = bpattern.matcher(p.getName());
- if (m.find()) {
- String bnum_str = m.group(1);
- try {
- return (Integer.parseInt(bnum_str));
- } catch (NumberFormatException e) {
- throw new RuntimeException("Unexpected error parsing: " + p.getName()
- + "," + bnum_str);
- }
- }
- return 0;
- }
-
@SuppressWarnings("nls")
public Path[] getPath(Sample s) throws HiveException {
if (s == null) {
@@ -398,7 +378,7 @@
if ((scount / bcount) * bcount != scount) {
throw new HiveException("Sample Count" + scount
+ " is not a multiple of bucket count " + bcount + " for table "
- + table.getName());
+ + table.getTableName());
}
// undersampling a bucket
ret.add(getBucketPath((s.getSampleNum() - 1) % bcount));
@@ -406,7 +386,7 @@
if ((bcount / scount) * scount != bcount) {
throw new HiveException("Sample Count" + scount
+ " is not a divisor of bucket count " + bcount + " for table "
- + table.getName());
+ + table.getTableName());
}
// sampling multiple buckets
for (int i = 0; i < bcount / scount; i++) {
@@ -418,7 +398,7 @@
}
public LinkedHashMap<String, String> getSpec() {
- return spec;
+ return table.createSpec(tPartition);
}
@SuppressWarnings("nls")
@@ -426,7 +406,7 @@
public String toString() {
String pn = "Invalid Partition";
try {
- pn = Warehouse.makePartName(spec);
+ pn = Warehouse.makePartName(getSpec());
} catch (MetaException e) {
// ignore as we most probably in an exception path already otherwise this
// error wouldn't occur
@@ -434,20 +414,39 @@
return table.toString() + "(" + pn + ")";
}
- public void setProperty(String name, String value) {
- getTPartition().putToParameters(name, value);
+ public Table getTable() {
+ return table;
}
/**
- * getProperty
- *
+ * Should be only used by serialization.
*/
- public String getProperty(String name) {
- Map<String, String> params = getTPartition().getParameters();
- if (params == null) {
- return null;
- }
- return params.get(name);
+ public void setTable(Table table) {
+ this.table = table;
+ }
+
+ /**
+ * Should be only used by serialization.
+ */
+ public org.apache.hadoop.hive.metastore.api.Partition getTPartition() {
+ return tPartition;
+ }
+
+ /**
+ * Should be only used by serialization.
+ */
+ public void setTPartition(
+ org.apache.hadoop.hive.metastore.api.Partition partition) {
+ tPartition = partition;
+ }
+
+ public Map<String, String> getParameters() {
+ return tPartition.getParameters();
+ }
+
+ public List<FieldSchema> getCols() {
+ return tPartition.getSd().getCols();
}
+
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java Thu Feb 11 02:33:54 2010
@@ -19,6 +19,7 @@
package org.apache.hadoop.hive.ql.metadata;
import java.io.IOException;
+import java.io.Serializable;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashMap;
@@ -42,6 +43,7 @@
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
+import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
import org.apache.hadoop.hive.serde.Constants;
import org.apache.hadoop.hive.serde2.Deserializer;
import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe;
@@ -52,125 +54,100 @@
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.SequenceFileInputFormat;
/**
- * A Hive Table: is a fundamental unit of data in Hive that shares a common
- * schema/DDL
+ * A Hive Table: is a fundamental unit of data in Hive that shares a common schema/DDL.
+ *
+ * Please note that the ql code should always go through methods of this class to access the
+ * metadata, instead of directly accessing org.apache.hadoop.hive.metastore.api.Table. This
+ * helps to isolate the metastore code and the ql code.
*/
-public class Table {
+public class Table implements Serializable {
+
+ private static final long serialVersionUID = 1L;
static final private Log LOG = LogFactory.getLog("hive.ql.metadata.Table");
- private Properties schema;
- private Deserializer deserializer;
- private URI uri;
- private Class<? extends InputFormat> inputFormatClass;
- private Class<? extends HiveOutputFormat> outputFormatClass;
private org.apache.hadoop.hive.metastore.api.Table tTable;
/**
- * Table (only used internally)
- *
- * @throws HiveException
- *
+ * These fields are all cached fields. The information comes from tTable.
*/
- protected Table() throws HiveException {
+ private Deserializer deserializer;
+ private Class<? extends HiveOutputFormat> outputFormatClass;
+ private Class<? extends InputFormat> inputFormatClass;
+ private URI uri;
+
+ public Table() {
}
- /**
- * Table
- *
- * Create a TableMetaInfo object presumably with the intent of saving it to
- * the metastore
- *
- * @param name
- * the name of this table in the metadb
- * @param schema
- * an object that represents the schema that this SerDe must know
- * @param deserializer
- * a Class to be used for deserializing the data
- * @param dataLocation
- * where is the table ? (e.g.,
- * dfs://hadoop001.sf2p.facebook.com:9000/
- * user/facebook/warehouse/example) NOTE: should not be hardcoding
- * this, but ok for now
- *
- * @exception HiveException
- * on internal error. Note not possible now, but in the future
- * reserve the right to throw an exception
- */
- public Table(String name, Properties schema, Deserializer deserializer,
- Class<? extends InputFormat<?, ?>> inputFormatClass,
- Class<?> outputFormatClass, URI dataLocation, Hive hive)
- throws HiveException {
- initEmpty();
- this.schema = schema;
- this.deserializer = deserializer; // TODO: convert to SerDeInfo format
- getTTable().getSd().getSerdeInfo().setSerializationLib(
- deserializer.getClass().getName());
- getTTable().setTableName(name);
- getSerdeInfo().setSerializationLib(deserializer.getClass().getName());
- setInputFormatClass(inputFormatClass);
- setOutputFormatClass(HiveFileFormatUtils
- .getOutputFormatSubstitute(outputFormatClass));
- setDataLocation(dataLocation);
+ public Table(org.apache.hadoop.hive.metastore.api.Table table) {
+ tTable = table;
+ if (!isView()) {
+ // This will set up field: inputFormatClass
+ getInputFormatClass();
+ // This will set up field: outputFormatClass
+ getOutputFormatClass();
+ }
}
public Table(String name) {
- // fill in defaults
- initEmpty();
- getTTable().setTableName(name);
- getTTable().setDbName(MetaStoreUtils.DEFAULT_DATABASE_NAME);
- // We have to use MetadataTypedColumnsetSerDe because LazySimpleSerDe does
- // not
- // support a table with no columns.
- getSerdeInfo().setSerializationLib(
- MetadataTypedColumnsetSerDe.class.getName());
- getSerdeInfo().getParameters().put(Constants.SERIALIZATION_FORMAT, "1");
- }
-
- void initEmpty() {
- setTTable(new org.apache.hadoop.hive.metastore.api.Table());
- getTTable().setSd(new StorageDescriptor());
- getTTable().setPartitionKeys(new ArrayList<FieldSchema>());
- getTTable().setParameters(new HashMap<String, String>());
-
- StorageDescriptor sd = getTTable().getSd();
- sd.setSerdeInfo(new SerDeInfo());
- sd.setNumBuckets(-1);
- sd.setBucketCols(new ArrayList<String>());
- sd.setCols(new ArrayList<FieldSchema>());
- sd.setParameters(new HashMap<String, String>());
- sd.setSortCols(new ArrayList<Order>());
-
- sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-
- setTableType(TableType.MANAGED_TABLE);
+ this(getEmptyTable(name));
}
- public void reinitSerDe() throws HiveException {
- try {
- deserializer = MetaStoreUtils.getDeserializer(Hive.get().getConf(),
- getTTable());
- } catch (MetaException e) {
- throw new HiveException(e);
- }
+ /**
+ * This function should only be used in serialization.
+ * We should never call this function to modify the fields, because
+ * the cached fields will become outdated.
+ */
+ public org.apache.hadoop.hive.metastore.api.Table getTTable() {
+ return tTable;
}
-
- protected void initSerDe() throws HiveException {
- if (deserializer == null) {
- try {
- deserializer = MetaStoreUtils.getDeserializer(Hive.get().getConf(),
- getTTable());
- } catch (MetaException e) {
- throw new HiveException(e);
- }
+
+ /**
+ * This function should only be called by Java serialization.
+ */
+ public void setTTable(org.apache.hadoop.hive.metastore.api.Table tTable) {
+ this.tTable = tTable;
+ }
+
+ /**
+ * Initialize an emtpy table.
+ */
+ static org.apache.hadoop.hive.metastore.api.Table getEmptyTable(String name) {
+ StorageDescriptor sd = new StorageDescriptor();
+ {
+ sd.setSerdeInfo(new SerDeInfo());
+ sd.setNumBuckets(-1);
+ sd.setBucketCols(new ArrayList<String>());
+ sd.setCols(new ArrayList<FieldSchema>());
+ sd.setParameters(new HashMap<String, String>());
+ sd.setSortCols(new ArrayList<Order>());
+ sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+ // We have to use MetadataTypedColumnsetSerDe because LazySimpleSerDe does
+ // not support a table with no columns.
+ sd.getSerdeInfo().setSerializationLib(MetadataTypedColumnsetSerDe.class.getName());
+ sd.getSerdeInfo().getParameters().put(Constants.SERIALIZATION_FORMAT, "1");
+ sd.setInputFormat(SequenceFileInputFormat.class.getName());
+ sd.setOutputFormat(HiveSequenceFileOutputFormat.class.getName());
}
+
+ org.apache.hadoop.hive.metastore.api.Table t = new org.apache.hadoop.hive.metastore.api.Table();
+ {
+ t.setSd(sd);
+ t.setPartitionKeys(new ArrayList<FieldSchema>());
+ t.setParameters(new HashMap<String, String>());
+ t.setTableType(TableType.MANAGED_TABLE.toString());
+ t.setTableName(name);
+ t.setDbName(MetaStoreUtils.DEFAULT_DATABASE_NAME);
+ }
+ return t;
}
public void checkValidity() throws HiveException {
// check for validity
- String name = getTTable().getTableName();
+ String name = tTable.getTableName();
if (null == name || name.length() == 0
|| !MetaStoreUtils.validateName(name)) {
throw new HiveException("[" + name + "]: is not a valid table name");
@@ -228,54 +205,77 @@
return;
}
- /**
- * @param inputFormatClass
- */
public void setInputFormatClass(Class<? extends InputFormat> inputFormatClass) {
this.inputFormatClass = inputFormatClass;
tTable.getSd().setInputFormat(inputFormatClass.getName());
}
- /**
- * @param class1
- */
- public void setOutputFormatClass(Class<?> class1) {
- outputFormatClass = HiveFileFormatUtils.getOutputFormatSubstitute(class1);
- tTable.getSd().setOutputFormat(class1.getName());
+ public void setOutputFormatClass(Class<? extends HiveOutputFormat> outputFormatClass) {
+ this.outputFormatClass = outputFormatClass;
+ tTable.getSd().setOutputFormat(outputFormatClass.getName());
}
final public Properties getSchema() {
- return schema;
+ return MetaStoreUtils.getSchema(tTable);
}
final public Path getPath() {
- return new Path(getTTable().getSd().getLocation());
+ return new Path(tTable.getSd().getLocation());
}
- final public String getName() {
- return getTTable().getTableName();
+ final public String getTableName() {
+ return tTable.getTableName();
}
final public URI getDataLocation() {
+ if (uri == null) {
+ uri = getPath().toUri();
+ }
return uri;
}
final public Deserializer getDeserializer() {
- if (deserializer == null) {
+ if (deserializer == null) {
try {
- initSerDe();
+ deserializer = MetaStoreUtils.getDeserializer(Hive.get().getConf(), tTable);
+ } catch (MetaException e) {
+ throw new RuntimeException(e);
} catch (HiveException e) {
- LOG.error("Error in initializing serde.", e);
+ throw new RuntimeException(e);
}
}
return deserializer;
}
final public Class<? extends InputFormat> getInputFormatClass() {
- return inputFormatClass;
+ if (inputFormatClass == null) {
+ try {
+ inputFormatClass = (Class<? extends InputFormat>)
+ Class.forName(tTable.getSd().getInputFormat(), true, JavaUtils.getClassLoader());
+ } catch (ClassNotFoundException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ return inputFormatClass;
}
final public Class<? extends HiveOutputFormat> getOutputFormatClass() {
+ // Replace FileOutputFormat for backward compatibility
+
+ if (outputFormatClass == null) {
+ try {
+ Class<?> c = Class.forName(tTable.getSd().getOutputFormat(), true,
+ JavaUtils.getClassLoader());
+ if (!HiveOutputFormat.class.isAssignableFrom(c)) {
+ outputFormatClass = HiveFileFormatUtils.getOutputFormatSubstitute(c);
+ } else {
+ outputFormatClass = (Class<? extends HiveOutputFormat>)c;
+ }
+
+ } catch (ClassNotFoundException e) {
+ throw new RuntimeException(e);
+ }
+ }
return outputFormatClass;
}
@@ -283,7 +283,7 @@
throws HiveException {
// TODO - types need to be checked.
- List<FieldSchema> partCols = getTTable().getPartitionKeys();
+ List<FieldSchema> partCols = tTable.getPartitionKeys();
if (partCols == null || (partCols.size() == 0)) {
if (spec != null) {
throw new HiveException(
@@ -310,23 +310,19 @@
}
public void setProperty(String name, String value) {
- getTTable().getParameters().put(name, value);
+ tTable.getParameters().put(name, value);
}
- /**
- * getProperty
- *
- */
public String getProperty(String name) {
- return getTTable().getParameters().get(name);
+ return tTable.getParameters().get(name);
}
public void setTableType(TableType tableType) {
- getTTable().setTableType(tableType.toString());
+ tTable.setTableType(tableType.toString());
}
public TableType getTableType() {
- return Enum.valueOf(TableType.class, getTTable().getTableType());
+ return Enum.valueOf(TableType.class, tTable.getTableType());
}
public ArrayList<StructField> getFields() {
@@ -359,32 +355,16 @@
}
}
- /**
- * @param schema
- * the schema to set
- */
- public void setSchema(Properties schema) {
- this.schema = schema;
- }
-
- /**
- * @param deserializer
- * the deserializer to set
- */
- public void setDeserializer(Deserializer deserializer) {
- this.deserializer = deserializer;
- }
-
- @Override
+ @Override
public String toString() {
- return getTTable().getTableName();
+ return tTable.getTableName();
}
public List<FieldSchema> getPartCols() {
- List<FieldSchema> partKeys = getTTable().getPartitionKeys();
+ List<FieldSchema> partKeys = tTable.getPartitionKeys();
if (partKeys == null) {
partKeys = new ArrayList<FieldSchema>();
- getTTable().setPartitionKeys(partKeys);
+ tTable.setPartitionKeys(partKeys);
}
return partKeys;
}
@@ -400,7 +380,7 @@
// TODO merge this with getBucketCols function
public String getBucketingDimensionId() {
- List<String> bcols = getTTable().getSd().getBucketCols();
+ List<String> bcols = tTable.getSd().getBucketCols();
if (bcols == null || bcols.size() == 0) {
return null;
}
@@ -413,24 +393,14 @@
return bcols.get(0);
}
- /**
- * @return the tTable
- */
- public org.apache.hadoop.hive.metastore.api.Table getTTable() {
- return tTable;
+ public void setDataLocation(URI uri) {
+ this.uri = uri;
+ tTable.getSd().setLocation(uri.toString());
}
- /**
- * @param table
- * the tTable to set
- */
- protected void setTTable(org.apache.hadoop.hive.metastore.api.Table table) {
- tTable = table;
- }
-
- public void setDataLocation(URI uri2) {
- uri = uri2;
- getTTable().getSd().setLocation(uri2.toString());
+ public void unsetDataLocation() {
+ this.uri = null;
+ tTable.getSd().unsetLocation();
}
public void setBucketCols(List<String> bucketCols) throws HiveException {
@@ -441,14 +411,14 @@
for (String col : bucketCols) {
if (!isField(col)) {
throw new HiveException("Bucket columns " + col
- + " is not part of the table columns");
+ + " is not part of the table columns (" + getCols() );
}
}
- getTTable().getSd().setBucketCols(bucketCols);
+ tTable.getSd().setBucketCols(bucketCols);
}
public void setSortCols(List<Order> sortOrder) throws HiveException {
- getTTable().getSd().setSortCols(sortOrder);
+ tTable.getSd().setSortCols(sortOrder);
}
private boolean isField(String col) {
@@ -464,14 +434,12 @@
boolean getColsFromSerDe = SerDeUtils.shouldGetColsFromSerDe(
getSerializationLib());
if (!getColsFromSerDe) {
- return getTTable().getSd().getCols();
+ return tTable.getSd().getCols();
} else {
try {
- return Hive.getFieldsFromDeserializer(getName(), getDeserializer());
+ return Hive.getFieldsFromDeserializer(getTableName(), getDeserializer());
} catch (HiveException e) {
- LOG
- .error("Unable to get field from serde: " + getSerializationLib(),
- e);
+ LOG.error("Unable to get field from serde: " + getSerializationLib(), e);
}
return new ArrayList<FieldSchema>();
}
@@ -491,15 +459,15 @@
}
public void setPartCols(List<FieldSchema> partCols) {
- getTTable().setPartitionKeys(partCols);
+ tTable.setPartitionKeys(partCols);
}
public String getDbName() {
- return getTTable().getDbName();
+ return tTable.getDbName();
}
public int getNumBuckets() {
- return getTTable().getSd().getNumBuckets();
+ return tTable.getSd().getNumBuckets();
}
/**
@@ -564,11 +532,11 @@
}
public void setFields(List<FieldSchema> fields) {
- getTTable().getSd().setCols(fields);
+ tTable.getSd().setCols(fields);
}
public void setNumBuckets(int nb) {
- getTTable().getSd().setNumBuckets(nb);
+ tTable.getSd().setNumBuckets(nb);
}
/**
@@ -612,7 +580,7 @@
}
private SerDeInfo getSerdeInfo() {
- return getTTable().getSd().getSerdeInfo();
+ return tTable.getSd().getSerdeInfo();
}
public void setSerializationLib(String lib) {
@@ -632,18 +600,30 @@
}
public List<String> getBucketCols() {
- return getTTable().getSd().getBucketCols();
+ return tTable.getSd().getBucketCols();
}
public List<Order> getSortCols() {
- return getTTable().getSd().getSortCols();
+ return tTable.getSd().getSortCols();
}
+ public void setTableName(String tableName) {
+ tTable.setTableName(tableName);
+ }
+
+ public void setDbName(String databaseName) {
+ tTable.setDbName(databaseName);
+ }
+
+ public List<FieldSchema> getPartitionKeys() {
+ return tTable.getPartitionKeys();
+ }
+
/**
* @return the original view text, or null if this table is not a view
*/
public String getViewOriginalText() {
- return getTTable().getViewOriginalText();
+ return tTable.getViewOriginalText();
}
/**
@@ -651,22 +631,25 @@
* the original view text to set
*/
public void setViewOriginalText(String viewOriginalText) {
- getTTable().setViewOriginalText(viewOriginalText);
+ tTable.setViewOriginalText(viewOriginalText);
}
/**
* @return the expanded view text, or null if this table is not a view
*/
public String getViewExpandedText() {
- return getTTable().getViewExpandedText();
+ return tTable.getViewExpandedText();
}
+ public void clearSerDeInfo() {
+ tTable.getSd().getSerdeInfo().getParameters().clear();
+ }
/**
* @param viewExpandedText
* the expanded view text to set
*/
public void setViewExpandedText(String viewExpandedText) {
- getTTable().setViewExpandedText(viewExpandedText);
+ tTable.setViewExpandedText(viewExpandedText);
}
/**
@@ -698,15 +681,10 @@
}
public Table copy() throws HiveException {
- Table newTbl = new Table();
-
- newTbl.schema = schema;
- newTbl.deserializer = deserializer; // TODO: convert to SerDeInfo format
-
- newTbl.setTTable(getTTable().clone());
- newTbl.uri = uri;
- newTbl.inputFormatClass = inputFormatClass;
- newTbl.outputFormatClass = outputFormatClass;
- return newTbl;
+ return new Table(tTable.clone());
+ }
+
+ public void setCreateTime(int createTime) {
+ tTable.setCreateTime(createTime);
}
};
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java Thu Feb 11 02:33:54 2010
@@ -217,8 +217,7 @@
parts.addAll(partsList.getUnknownPartns());
for (Partition part : parts) {
List<String> bucketCols = part.getBucketCols();
- List<String> sortCols = Utilities.getColumnNamesFromSortCols(part
- .getTPartition().getSd().getSortCols());
+ List<String> sortCols = part.getSortColNames();
bucketGroupBy = matchBucketOrSortedColumns(groupByCols, bucketCols,
sortCols);
if (!bucketGroupBy) {
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java Thu Feb 11 02:33:54 2010
@@ -200,7 +200,7 @@
// need to do full scan
fullScanMsg = "Tablesample denominator " + den
+ " is not multiple/divisor of bucket count " + bucketCount
- + " of table " + part.getTable().getName();
+ + " of table " + part.getTable().getTableName();
}
} else {
// need to do full scan
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java Thu Feb 11 02:33:54 2010
@@ -159,10 +159,10 @@
HiveConf conf, String alias,
Map<String, PrunedPartitionList> prunedPartitionsMap) throws HiveException {
LOG.trace("Started pruning partiton");
- LOG.trace("tabname = " + tab.getName());
+ LOG.trace("tabname = " + tab.getTableName());
LOG.trace("prune Expression = " + prunerExpr);
- String key = tab.getName() + ";";
+ String key = tab.getTableName() + ";";
if (prunerExpr != null) {
key = key + prunerExpr.getExprString();
}
@@ -182,7 +182,7 @@
if (tab.isPartitioned()) {
for (String partName : Hive.get().getPartitionNames(tab.getDbName(),
- tab.getName(), (short) -1)) {
+ tab.getTableName(), (short) -1)) {
// Set all the variables here
LinkedHashMap<String, String> partSpec = Warehouse
.makeSpecFromName(partName);
@@ -214,7 +214,7 @@
if (!hasColumnExpr(prunerExpr)) {
throw new SemanticException(ErrorMsg.NO_PARTITION_PREDICATE
.getMsg("for Alias \"" + alias + "\" Table \""
- + tab.getName() + "\""));
+ + tab.getTableName() + "\""));
}
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java Thu Feb 11 02:33:54 2010
@@ -197,7 +197,7 @@
URI toURI = (ts.partHandle != null) ? ts.partHandle.getDataLocation()
: ts.tableHandle.getDataLocation();
- List<FieldSchema> parts = ts.tableHandle.getTTable().getPartitionKeys();
+ List<FieldSchema> parts = ts.tableHandle.getPartitionKeys();
if (isOverWrite && (parts != null && parts.size() > 0)
&& (ts.partSpec == null || ts.partSpec.size() == 0)) {
throw new SemanticException(ErrorMsg.NEED_PARTITION_ERROR.getMsg());
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Thu Feb 11 02:33:54 2010
@@ -769,10 +769,10 @@
throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg());
}
- if (!HiveOutputFormat.class.isAssignableFrom(ts.tableHandle
- .getOutputFormatClass())) {
+ Class<?> outputFormatClass = ts.tableHandle.getOutputFormatClass();
+ if (!HiveOutputFormat.class.isAssignableFrom(outputFormatClass)) {
throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE
- .getMsg(ast));
+ .getMsg(ast, "The class is " + outputFormatClass.toString()));
}
if (ts.partSpec == null) {
@@ -821,7 +821,7 @@
ParseDriver pd = new ParseDriver();
ASTNode viewTree;
- final ASTNodeOrigin viewOrigin = new ASTNodeOrigin("VIEW", tab.getName(),
+ final ASTNodeOrigin viewOrigin = new ASTNodeOrigin("VIEW", tab.getTableName(),
tab.getViewExpandedText(), alias, qb.getParseInfo().getSrcForAlias(
alias));
try {
@@ -2981,7 +2981,7 @@
dest_tab = qbm.getDestTableForAlias(dest);
// check for partition
- List<FieldSchema> parts = dest_tab.getTTable().getPartitionKeys();
+ List<FieldSchema> parts = dest_tab.getPartitionKeys();
if (parts != null && parts.size() > 0) {
throw new SemanticException(ErrorMsg.NEED_PARTITION_ERROR.getMsg());
}
@@ -2989,7 +2989,7 @@
queryTmpdir = ctx.getExternalTmpFileURI(dest_path.toUri());
table_desc = Utilities.getTableDesc(dest_tab);
- idToTableNameMap.put(String.valueOf(destTableId), dest_tab.getName());
+ idToTableNameMap.put(String.valueOf(destTableId), dest_tab.getTableName());
currentTableId = destTableId;
destTableId++;
@@ -2999,7 +2999,7 @@
new HashMap<String, String>()));
if (!outputs.add(new WriteEntity(dest_tab))) {
throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES
- .getMsg(dest_tab.getName()));
+ .getMsg(dest_tab.getTableName()));
}
break;
}
@@ -3011,7 +3011,7 @@
queryTmpdir = ctx.getExternalTmpFileURI(dest_path.toUri());
table_desc = Utilities.getTableDesc(dest_tab);
- idToTableNameMap.put(String.valueOf(destTableId), dest_tab.getName());
+ idToTableNameMap.put(String.valueOf(destTableId), dest_tab.getTableName());
currentTableId = destTableId;
destTableId++;
@@ -3020,7 +3020,7 @@
.getSpec()));
if (!outputs.add(new WriteEntity(dest_part))) {
throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES
- .getMsg(dest_tab.getName() + "@" + dest_part.getName()));
+ .getMsg(dest_tab.getTableName() + "@" + dest_part.getName()));
}
break;
}
@@ -4881,13 +4881,13 @@
// If there are no sample cols and no bucket cols then throw an error
if (tabBucketCols.size() == 0 && sampleExprs.size() == 0) {
throw new SemanticException(ErrorMsg.NON_BUCKETED_TABLE.getMsg() + " "
- + tab.getName());
+ + tab.getTableName());
}
if (num > den) {
throw new SemanticException(
ErrorMsg.BUCKETED_NUMBERATOR_BIGGER_DENOMINATOR.getMsg() + " "
- + tab.getName());
+ + tab.getTableName());
}
// check if a predicate is needed
@@ -4944,7 +4944,7 @@
} else {
boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE);
if (testMode) {
- String tabName = tab.getName();
+ String tabName = tab.getTableName();
// has the user explicitly asked not to sample this table
String unSampleTblList = conf
Modified: hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java (original)
+++ hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java Thu Feb 11 02:33:54 2010
@@ -153,7 +153,7 @@
ft = hm.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
ft.checkValidity();
assertEquals("Table names didn't match for table: " + tableName, tbl
- .getName(), ft.getName());
+ .getTableName(), ft.getTableName());
assertEquals("Table owners didn't match for table: " + tableName, tbl
.getOwner(), ft.getOwner());
assertEquals("Table retention didn't match for table: " + tableName,
@@ -226,7 +226,7 @@
assertNotNull("Unable to fetch table", ft);
ft.checkValidity();
assertEquals("Table names didn't match for table: " + tableName, tbl
- .getName(), ft.getName());
+ .getTableName(), ft.getTableName());
assertEquals("Table owners didn't match for table: " + tableName, tbl
.getOwner(), ft.getOwner());
assertEquals("Table retention didn't match for table: " + tableName,
@@ -257,7 +257,7 @@
private static Table createTestTable(String dbName, String tableName) throws HiveException {
Table tbl = new Table(tableName);
- tbl.getTTable().setDbName(dbName);
+ tbl.setDbName(dbName);
tbl.setInputFormatClass(SequenceFileInputFormat.class.getName());
tbl.setOutputFormatClass(SequenceFileOutputFormat.class.getName());
tbl.setSerializationLib(ThriftDeserializer.class.getName());
@@ -294,7 +294,7 @@
// also test getting a table from a specific db
Table table1 = hm.getTable(dbName, table1Name);
assertNotNull(table1);
- assertEquals(table1Name, table1.getName());
+ assertEquals(table1Name, table1.getTableName());
assertTrue(fs.exists(table1.getPath()));
// and test dropping this specific table
Modified: hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java (original)
+++ hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java Thu Feb 11 02:33:54 2010
@@ -13,6 +13,7 @@
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat;
import org.apache.hadoop.hive.serde.Constants;
import org.apache.hadoop.mapred.TextInputFormat;
@@ -91,9 +92,9 @@
hive.createDatabase(dbName, "");
Table table = new Table(tableName);
- table.getTTable().setDbName(dbName);
+ table.setDbName(dbName);
table.setInputFormatClass(TextInputFormat.class);
- table.setOutputFormatClass(IgnoreKeyTextOutputFormat.class);
+ table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class);
hive.createTable(table);
// now we've got a table, check that it works
@@ -161,9 +162,9 @@
hive.createDatabase(dbName, "");
Table table = new Table(tableName);
- table.getTTable().setDbName(dbName);
+ table.setDbName(dbName);
table.setInputFormatClass(TextInputFormat.class);
- table.setOutputFormatClass(IgnoreKeyTextOutputFormat.class);
+ table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class);
table.setPartCols(partCols);
hive.createTable(table);
@@ -196,7 +197,7 @@
assertEquals(1, result.getPartitionsNotOnFs().size());
assertEquals(partToRemove.getName(), result.getPartitionsNotOnFs().get(0)
.getPartitionName());
- assertEquals(partToRemove.getTable().getName(), result
+ assertEquals(partToRemove.getTable().getTableName(), result
.getPartitionsNotOnFs().get(0).getTableName());
assertTrue(result.getPartitionsNotInMs().isEmpty());
Modified: hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestPartition.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestPartition.java?rev=908809&r1=908808&r2=908809&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestPartition.java (original)
+++ hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestPartition.java Thu Feb 11 02:33:54 2010
@@ -43,8 +43,7 @@
tbl.setDataLocation(new URI("tmplocation"));
tbl.setPartCols(partCols);
- Map<String, String> spec = new org.apache.hadoop.hive.ql.metadata.Partition(
- tbl, tp).getSpec();
+ Map<String, String> spec = new org.apache.hadoop.hive.ql.metadata.Partition(tbl, tp).getSpec();
assertFalse(spec.isEmpty());
assertEquals(spec.get(PARTITION_COL), PARTITION_VALUE);
}