You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by he...@apache.org on 2010/11/30 01:05:20 UTC
svn commit: r1040351 - in /hive/trunk: ./
ql/src/java/org/apache/hadoop/hive/ql/exec/
ql/src/java/org/apache/hadoop/hive/ql/optimizer/
ql/src/java/org/apache/hadoop/hive/ql/plan/
ql/src/test/queries/clientpositive/ ql/src/test/results/clientpositive/
Author: heyongqiang
Date: Tue Nov 30 00:05:19 2010
New Revision: 1040351
URL: http://svn.apache.org/viewvc?rev=1040351&view=rev
Log:
HIVE-1804 Mapjoin will fail if there are no files associating with the join tables (Liyin Tang via He Yongqiang)
Added:
hive/trunk/ql/src/test/queries/clientpositive/join_empty.q
hive/trunk/ql/src/test/results/clientpositive/join_empty.q.out
Modified:
hive/trunk/CHANGES.txt
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredLocalTask.java
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java
Modified: hive/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hive/trunk/CHANGES.txt?rev=1040351&r1=1040350&r2=1040351&view=diff
==============================================================================
--- hive/trunk/CHANGES.txt (original)
+++ hive/trunk/CHANGES.txt Tue Nov 30 00:05:19 2010
@@ -552,6 +552,10 @@ Trunk - Unreleased
HIVE-1792 Track the joins which are being converted to map-join
automatically (Liyin Tang via namit)
+ HIVE-1804 Mapjoin will fail if there are no files associating with the join tables
+ (Liyin Tang via He Yongqiang)
+
+
TESTS
HIVE-1464. improve test query performance
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java?rev=1040351&r1=1040350&r2=1040351&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java Tue Nov 30 00:05:19 2010
@@ -64,6 +64,26 @@ public class FetchOperator implements Se
static Log LOG = LogFactory.getLog(FetchOperator.class.getName());
static LogHelper console = new LogHelper(LOG);
+ private boolean isEmptyTable;
+ private boolean isNativeTable;
+ private FetchWork work;
+ private int splitNum;
+ private PartitionDesc currPart;
+ private TableDesc currTbl;
+ private boolean tblDataDone;
+
+ private transient RecordReader<WritableComparable, Writable> currRecReader;
+ private transient InputSplit[] inputSplits;
+ private transient InputFormat inputFormat;
+ private transient JobConf job;
+ private transient WritableComparable key;
+ private transient Writable value;
+ private transient Deserializer serde;
+ private transient Iterator<Path> iterPath;
+ private transient Iterator<PartitionDesc> iterPartDesc;
+ private transient Path currPath;
+ private transient StructObjectInspector rowObjectInspector;
+ private transient Object[] rowWithPart;
public FetchOperator() {
}
@@ -123,38 +143,24 @@ public class FetchOperator implements Se
this.tblDataDone = tblDataDone;
}
- private boolean isNativeTable;
- private FetchWork work;
- private int splitNum;
- private PartitionDesc currPart;
- private TableDesc currTbl;
- private boolean tblDataDone;
+ public boolean isEmptyTable() {
+ return isEmptyTable;
+ }
- private transient RecordReader<WritableComparable, Writable> currRecReader;
- private transient InputSplit[] inputSplits;
- private transient InputFormat inputFormat;
- private transient JobConf job;
- private transient WritableComparable key;
- private transient Writable value;
- private transient Deserializer serde;
- private transient Iterator<Path> iterPath;
- private transient Iterator<PartitionDesc> iterPartDesc;
- private transient Path currPath;
- private transient StructObjectInspector rowObjectInspector;
- private transient Object[] rowWithPart;
+ public void setEmptyTable(boolean isEmptyTable) {
+ this.isEmptyTable = isEmptyTable;
+ }
/**
* A cache of InputFormat instances.
*/
- private static Map<Class, InputFormat<WritableComparable, Writable>> inputFormats =
- new HashMap<Class, InputFormat<WritableComparable, Writable>>();
+ private static Map<Class, InputFormat<WritableComparable, Writable>> inputFormats = new HashMap<Class, InputFormat<WritableComparable, Writable>>();
- static InputFormat<WritableComparable, Writable> getInputFormatFromCache(
- Class inputFormatClass, Configuration conf) throws IOException {
+ static InputFormat<WritableComparable, Writable> getInputFormatFromCache(Class inputFormatClass,
+ Configuration conf) throws IOException {
if (!inputFormats.containsKey(inputFormatClass)) {
try {
- InputFormat<WritableComparable, Writable> newInstance =
- (InputFormat<WritableComparable, Writable>) ReflectionUtils
+ InputFormat<WritableComparable, Writable> newInstance = (InputFormat<WritableComparable, Writable>) ReflectionUtils
.newInstance(inputFormatClass, conf);
inputFormats.put(inputFormatClass, newInstance);
} catch (Exception e) {
@@ -169,10 +175,7 @@ public class FetchOperator implements Se
List<String> partNames = new ArrayList<String>();
List<String> partValues = new ArrayList<String>();
- String pcols = currPart
- .getTableDesc()
- .getProperties()
- .getProperty(
+ String pcols = currPart.getTableDesc().getProperties().getProperty(
org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_PARTITION_COLUMNS);
LinkedHashMap<String, String> partSpec = currPart.getPartSpec();
@@ -181,16 +184,14 @@ public class FetchOperator implements Se
for (String key : partKeys) {
partNames.add(key);
partValues.add(partSpec.get(key));
- partObjectInspectors
- .add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);
+ partObjectInspectors.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);
}
StructObjectInspector partObjectInspector = ObjectInspectorFactory
.getStandardStructObjectInspector(partNames, partObjectInspectors);
rowObjectInspector = (StructObjectInspector) serde.getObjectInspector();
rowWithPart[1] = partValues;
- rowObjectInspector = ObjectInspectorFactory
- .getUnionStructObjectInspector(Arrays
+ rowObjectInspector = ObjectInspectorFactory.getUnionStructObjectInspector(Arrays
.asList(new StructObjectInspector[] {rowObjectInspector, partObjectInspector}));
}
@@ -226,8 +227,7 @@ public class FetchOperator implements Se
}
return;
} else {
- iterPath = FetchWork.convertStringToPathArray(work.getPartDir())
- .iterator();
+ iterPath = FetchWork.convertStringToPathArray(work.getPartDir()).iterator();
iterPartDesc = work.getPartDesc().iterator();
}
}
@@ -235,15 +235,16 @@ public class FetchOperator implements Se
while (iterPath.hasNext()) {
Path nxt = iterPath.next();
PartitionDesc prt = null;
- if(iterPartDesc != null)
+ if (iterPartDesc != null) {
prt = iterPartDesc.next();
+ }
FileSystem fs = nxt.getFileSystem(job);
if (fs.exists(nxt)) {
FileStatus[] fStats = listStatusUnderPath(fs, nxt);
for (FileStatus fStat : fStats) {
if (fStat.getLen() > 0) {
currPath = nxt;
- if(iterPartDesc != null) {
+ if (iterPartDesc != null) {
currPart = prt;
}
return;
@@ -265,14 +266,13 @@ public class FetchOperator implements Se
// to the default file system - which may or may not be online during pure
// metadata
// operations
- job.set("mapred.input.dir", org.apache.hadoop.util.StringUtils
- .escapeString(currPath.toString()));
+ job.set("mapred.input.dir", org.apache.hadoop.util.StringUtils.escapeString(currPath
+ .toString()));
PartitionDesc tmp;
if (currTbl == null) {
tmp = currPart;
- }
- else {
+ } else {
tmp = new PartitionDesc(currTbl, null);
}
@@ -283,9 +283,9 @@ public class FetchOperator implements Se
serde = tmp.getDeserializerClass().newInstance();
serde.initialize(job, tmp.getProperties());
- if(LOG.isDebugEnabled()) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Creating fetchTask with deserializer typeinfo: "
- + serde.getObjectInspector().getTypeName());
+ + serde.getObjectInspector().getTypeName());
LOG.debug("deserializer properties: " + tmp.getProperties());
}
@@ -303,8 +303,7 @@ public class FetchOperator implements Se
return getRecordReader();
}
- currRecReader = inputFormat.getRecordReader(inputSplits[splitNum++], job,
- Reporter.NULL);
+ currRecReader = inputFormat.getRecordReader(inputSplits[splitNum++], job, Reporter.NULL);
key = currRecReader.createKey();
value = currRecReader.createValue();
return currRecReader;
@@ -363,17 +362,17 @@ public class FetchOperator implements Se
}
/**
- * used for bucket map join. there is a hack for getting partitionDesc.
- * bucket map join right now only allow one partition present in bucket map join.
+ * used for bucket map join. there is a hack for getting partitionDesc. bucket map join right now
+ * only allow one partition present in bucket map join.
*/
- public void setupContext (Iterator<Path> iterPath, Iterator<PartitionDesc> iterPartDesc) {
+ public void setupContext(Iterator<Path> iterPath, Iterator<PartitionDesc> iterPartDesc) {
this.iterPath = iterPath;
this.iterPartDesc = iterPartDesc;
- if(iterPartDesc == null) {
+ if (iterPartDesc == null) {
if (work.getTblDir() != null) {
this.currTbl = work.getTblDesc();
} else {
- //hack, get the first.
+ // hack, get the first.
List<PartitionDesc> listParts = work.getPartDesc();
currPart = listParts.get(0);
}
@@ -387,14 +386,19 @@ public class FetchOperator implements Se
Deserializer serde = tbl.getDeserializerClass().newInstance();
serde.initialize(job, tbl.getProperties());
return serde.getObjectInspector();
- } else {
+ } else if (work.getPartDesc() != null) {
List<PartitionDesc> listParts = work.getPartDesc();
+ if(listParts.size() == 0) {
+ return null;
+ }
currPart = listParts.get(0);
serde = currPart.getTableDesc().getDeserializerClass().newInstance();
serde.initialize(job, currPart.getTableDesc().getProperties());
setPrtnDesc();
currPart = null;
return rowObjectInspector;
+ } else {
+ return null;
}
} catch (Exception e) {
throw new HiveException("Failed with exception " + e.getMessage()
@@ -403,21 +407,20 @@ public class FetchOperator implements Se
}
/**
- * Lists status for all files under a given path. Whether or not
- * this is recursive depends on the setting of
- * job configuration parameter mapred.input.dir.recursive.
+ * Lists status for all files under a given path. Whether or not this is recursive depends on the
+ * setting of job configuration parameter mapred.input.dir.recursive.
*
- * @param fs file system
+ * @param fs
+ * file system
*
- * @param p path in file system
+ * @param p
+ * path in file system
*
* @return list of file status entries
*/
- private FileStatus[] listStatusUnderPath(FileSystem fs, Path p)
- throws IOException {
+ private FileStatus[] listStatusUnderPath(FileSystem fs, Path p) throws IOException {
HiveConf hiveConf = new HiveConf(job, FetchOperator.class);
- boolean recursive =
- hiveConf.getBoolVar(HiveConf.ConfVars.HADOOPMAPREDINPUTDIRRECURSIVE);
+ boolean recursive = hiveConf.getBoolVar(HiveConf.ConfVars.HADOOPMAPREDINPUTDIRRECURSIVE);
if (!recursive) {
return fs.listStatus(p);
}
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredLocalTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredLocalTask.java?rev=1040351&r1=1040350&r2=1040351&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredLocalTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredLocalTask.java Tue Nov 30 00:05:19 2010
@@ -44,6 +44,9 @@ import org.apache.hadoop.hive.ql.Context
import org.apache.hadoop.hive.ql.DriverContext;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.exec.Utilities.StreamPrinter;
+import org.apache.hadoop.hive.ql.exec.persistence.AbstractMapJoinKey;
+import org.apache.hadoop.hive.ql.exec.persistence.HashMapWrapper;
+import org.apache.hadoop.hive.ql.exec.persistence.MapJoinObjectValue;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.FetchWork;
import org.apache.hadoop.hive.ql.plan.MapredLocalWork;
@@ -234,7 +237,7 @@ public class MapredLocalTask extends Tas
memoryMXBean = ManagementFactory.getMemoryMXBean();
long startTime = System.currentTimeMillis();
console.printInfo(Utilities.now()
- + "\tStarting to luaunch local task to process map join;\tmaximum memory = "
+ + "\tStarting to launch local task to process map join;\tmaximum memory = "
+ memoryMXBean.getHeapMemoryUsage().getMax());
fetchOperators = new HashMap<String, FetchOperator>();
Map<FetchOperator, JobConf> fetchOpJobConfMap = new HashMap<FetchOperator, JobConf>();
@@ -286,6 +289,12 @@ public class MapredLocalTask extends Tas
setUpFetchOpContext(fetchOp, alias, bigTableBucket);
}
+ if (fetchOp.isEmptyTable()) {
+ //generate empty hashtable for empty table
+ this.generateDummyHashTable(alias, bigTableBucket);
+ continue;
+ }
+
// get the root operator
Operator<? extends Serializable> forwardOp = work.getAliasToWork().get(alias);
// walk through the operator tree
@@ -341,7 +350,8 @@ public class MapredLocalTask extends Tas
// initilize all forward operator
for (Map.Entry<String, FetchOperator> entry : fetchOperators.entrySet()) {
// get the forward op
- Operator<? extends Serializable> forwardOp = work.getAliasToWork().get(entry.getKey());
+ String alias = entry.getKey();
+ Operator<? extends Serializable> forwardOp = work.getAliasToWork().get(alias);
// put the exe context into all the operators
forwardOp.setExecContext(execContext);
@@ -353,11 +363,50 @@ public class MapredLocalTask extends Tas
jobConf = job;
}
// initialize the forward operator
- forwardOp.initialize(jobConf, new ObjectInspector[] {fetchOp.getOutputObjectInspector()});
- l4j.info("fetchoperator for " + entry.getKey() + " initialized");
+ ObjectInspector objectInspector = fetchOp.getOutputObjectInspector();
+ if (objectInspector != null) {
+ forwardOp.initialize(jobConf, new ObjectInspector[] {objectInspector});
+ l4j.info("fetchoperator for " + entry.getKey() + " initialized");
+ } else {
+ fetchOp.setEmptyTable(true);
+ }
}
}
+ private void generateDummyHashTable(String alias, String bigBucketFileName) throws HiveException,IOException {
+ // find the (byte)tag for the map join(HashTableSinkOperator)
+ Operator<? extends Serializable> parentOp = work.getAliasToWork().get(alias);
+ Operator<? extends Serializable> childOp = parentOp.getChildOperators().get(0);
+ while ((childOp != null) && (!(childOp instanceof HashTableSinkOperator))) {
+ parentOp = childOp;
+ assert parentOp.getChildOperators().size() == 1;
+ childOp = parentOp.getChildOperators().get(0);
+ }
+ if (childOp == null) {
+ throw new HiveException(
+ "Cannot find HashTableSink op by tracing down the table scan operator tree");
+ }
+ byte tag = (byte) childOp.getParentOperators().indexOf(parentOp);
+
+ // generate empty hashtable for this (byte)tag
+ String tmpURI = this.getWork().getTmpFileURI();
+ HashMapWrapper<AbstractMapJoinKey, MapJoinObjectValue> hashTable =
+ new HashMapWrapper<AbstractMapJoinKey, MapJoinObjectValue>();
+
+ if (bigBucketFileName == null || bigBucketFileName.length() == 0) {
+ bigBucketFileName = "-";
+ }
+ String tmpURIPath = Utilities.generatePath(tmpURI, tag, bigBucketFileName);
+ console.printInfo(Utilities.now() + "\tDump the hashtable into file: " + tmpURIPath);
+ Path path = new Path(tmpURIPath);
+ FileSystem fs = path.getFileSystem(job);
+ File file = new File(path.toUri().getPath());
+ fs.create(path);
+ long fileLength = hashTable.flushMemoryCacheToPersistent(file);
+ console.printInfo(Utilities.now() + "\tUpload 1 File to: " + tmpURIPath + " File size: "
+ + fileLength);
+ hashTable.close();
+ }
private void setUpFetchOpContext(FetchOperator fetchOp, String alias, String currentInputFile)
throws Exception {
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java?rev=1040351&r1=1040350&r2=1040351&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java Tue Nov 30 00:05:19 2010
@@ -582,6 +582,7 @@ public final class GenMapRedUtils {
if (aliasPartnDesc == null) {
aliasPartnDesc = new PartitionDesc(Utilities.getTableDesc(parseCtx
.getTopToTable().get(topOp)), null);
+
}
plan.getAliasToPartnInfo().put(alias_id, aliasPartnDesc);
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java?rev=1040351&r1=1040350&r2=1040351&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java Tue Nov 30 00:05:19 2010
@@ -184,10 +184,6 @@ public class MapJoinProcessor implements
pathToAliases.remove(path);
}
- if (pathSet.size() == 0) {
- throw new SemanticException("No input path for alias " + alias);
- }
-
// create fetch work
FetchWork fetchWork = null;
List<String> partDir = new ArrayList<String>();
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java?rev=1040351&r1=1040350&r2=1040351&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java Tue Nov 30 00:05:19 2010
@@ -29,7 +29,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
* ConditionalResolverSkewJoin.
@@ -132,9 +131,6 @@ public class ConditionalResolverCommonJo
for (int i = 0; i < fstatus.length; i++) {
fileSize += fstatus[i].getLen();
}
- if (fileSize == 0) {
- throw new HiveException("Input file size is 0");
- }
// put into list and sorted set
aliasList.add(alias);
Added: hive/trunk/ql/src/test/queries/clientpositive/join_empty.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/join_empty.q?rev=1040351&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/join_empty.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/join_empty.q Tue Nov 30 00:05:19 2010
@@ -0,0 +1,10 @@
+create table srcpart_empty(key int, value string) partitioned by (ds string);
+create table src2_empty (key int, value string);
+
+select /*+mapjoin(a)*/ a.key, b.value from srcpart_empty a join src b on a.key=b.key;
+select /*+mapjoin(a)*/ a.key, b.value from src2_empty a join src b on a.key=b.key;
+
+set hive.mapred.mode=nonstrict;
+set hive.auto.convert.join = true;
+select a.key, b.value from srcpart_empty a join src b on a.key=b.key;
+select a.key, b.value from src2_empty a join src b on a.key=b.key;
\ No newline at end of file
Added: hive/trunk/ql/src/test/results/clientpositive/join_empty.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/join_empty.q.out?rev=1040351&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/join_empty.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/join_empty.q.out Tue Nov 30 00:05:19 2010
@@ -0,0 +1,46 @@
+PREHOOK: query: create table srcpart_empty(key int, value string) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table srcpart_empty(key int, value string) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@srcpart_empty
+PREHOOK: query: create table src2_empty (key int, value string)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table src2_empty (key int, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@src2_empty
+PREHOOK: query: select /*+mapjoin(a)*/ a.key, b.value from srcpart_empty a join src b on a.key=b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-22_17-44-39_138_107997606390957326/-mr-10000
+POSTHOOK: query: select /*+mapjoin(a)*/ a.key, b.value from srcpart_empty a join src b on a.key=b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-22_17-44-39_138_107997606390957326/-mr-10000
+PREHOOK: query: select /*+mapjoin(a)*/ a.key, b.value from src2_empty a join src b on a.key=b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src2_empty
+PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-22_17-44-43_473_3587274534534962486/-mr-10000
+POSTHOOK: query: select /*+mapjoin(a)*/ a.key, b.value from src2_empty a join src b on a.key=b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src2_empty
+POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-22_17-44-43_473_3587274534534962486/-mr-10000
+PREHOOK: query: select a.key, b.value from srcpart_empty a join src b on a.key=b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-22_17-44-47_872_1531725911506590422/-mr-10000
+POSTHOOK: query: select a.key, b.value from srcpart_empty a join src b on a.key=b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-22_17-44-47_872_1531725911506590422/-mr-10000
+PREHOOK: query: select a.key, b.value from src2_empty a join src b on a.key=b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src2_empty
+PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-22_17-44-52_182_4671419724484282586/-mr-10000
+POSTHOOK: query: select a.key, b.value from src2_empty a join src b on a.key=b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src2_empty
+POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-22_17-44-52_182_4671419724484282586/-mr-10000