You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by zs...@apache.org on 2010/01/25 19:49:05 UTC
svn commit: r902921 [3/26] - in /hadoop/hive/trunk: ./
contrib/src/java/org/apache/hadoop/hive/contrib/genericudf/example/
contrib/src/java/org/apache/hadoop/hive/contrib/udtf/example/
ql/src/java/org/apache/hadoop/hive/ql/ ql/src/java/org/apache/hadoo...
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java Mon Jan 25 18:48:58 2010
@@ -50,17 +50,17 @@
import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles;
import org.apache.hadoop.hive.ql.plan.ConditionalWork;
import org.apache.hadoop.hive.ql.plan.PlanUtils;
-import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.extractDesc;
-import org.apache.hadoop.hive.ql.plan.fileSinkDesc;
-import org.apache.hadoop.hive.ql.plan.loadFileDesc;
-import org.apache.hadoop.hive.ql.plan.mapredWork;
-import org.apache.hadoop.hive.ql.plan.moveWork;
-import org.apache.hadoop.hive.ql.plan.partitionDesc;
-import org.apache.hadoop.hive.ql.plan.reduceSinkDesc;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
-import org.apache.hadoop.hive.ql.plan.tableScanDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExtractDesc;
+import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
+import org.apache.hadoop.hive.ql.plan.LoadFileDesc;
+import org.apache.hadoop.hive.ql.plan.MapredWork;
+import org.apache.hadoop.hive.ql.plan.MoveWork;
+import org.apache.hadoop.hive.ql.plan.PartitionDesc;
+import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
+import org.apache.hadoop.hive.ql.plan.TableScanDesc;
import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles.ConditionalResolverMergeFilesCtx;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
@@ -105,10 +105,10 @@
// merge for a map-only job
// or for a map-reduce job
if ((parseCtx.getConf().getBoolVar(
- HiveConf.ConfVars.HIVEMERGEMAPFILES) && (((mapredWork) currTask
+ HiveConf.ConfVars.HIVEMERGEMAPFILES) && (((MapredWork) currTask
.getWork()).getReducer() == null))
|| (parseCtx.getConf().getBoolVar(
- HiveConf.ConfVars.HIVEMERGEMAPREDFILES) && (((mapredWork) currTask
+ HiveConf.ConfVars.HIVEMERGEMAPREDFILES) && (((MapredWork) currTask
.getWork()).getReducer() != null))) {
chDir = true;
}
@@ -132,35 +132,35 @@
RowSchema fsRS = fsOp.getSchema();
// create a reduce Sink operator - key is the first column
- ArrayList<exprNodeDesc> keyCols = new ArrayList<exprNodeDesc>();
+ ArrayList<ExprNodeDesc> keyCols = new ArrayList<ExprNodeDesc>();
keyCols.add(TypeCheckProcFactory.DefaultExprProcessor
.getFuncExprNodeDesc("rand"));
- ArrayList<exprNodeDesc> valueCols = new ArrayList<exprNodeDesc>();
+ ArrayList<ExprNodeDesc> valueCols = new ArrayList<ExprNodeDesc>();
for (ColumnInfo ci : fsRS.getSignature()) {
- valueCols.add(new exprNodeColumnDesc(ci.getType(), ci.getInternalName(),
+ valueCols.add(new ExprNodeColumnDesc(ci.getType(), ci.getInternalName(),
ci.getTabAlias(), ci.getIsPartitionCol()));
}
// create a dummy tableScan operator
Operator<? extends Serializable> ts_op = OperatorFactory.get(
- tableScanDesc.class, fsRS);
+ TableScanDesc.class, fsRS);
ArrayList<String> outputColumns = new ArrayList<String>();
for (int i = 0; i < valueCols.size(); i++) {
outputColumns.add(SemanticAnalyzer.getColumnInternalName(i));
}
- reduceSinkDesc rsDesc = PlanUtils.getReduceSinkDesc(
- new ArrayList<exprNodeDesc>(), valueCols, outputColumns, false, -1, -1,
+ ReduceSinkDesc rsDesc = PlanUtils.getReduceSinkDesc(
+ new ArrayList<ExprNodeDesc>(), valueCols, outputColumns, false, -1, -1,
-1);
OperatorFactory.getAndMakeChild(rsDesc, fsRS, ts_op);
- mapredWork cplan = GenMapRedUtils.getMapRedWork();
+ MapredWork cplan = GenMapRedUtils.getMapRedWork();
ParseContext parseCtx = ctx.getParseCtx();
Task<? extends Serializable> mergeTask = TaskFactory.get(cplan, parseCtx
.getConf());
- fileSinkDesc fsConf = fsOp.getConf();
+ FileSinkDesc fsConf = fsOp.getConf();
// Add the extract operator to get the value fields
RowResolver out_rwsch = new RowResolver();
@@ -174,19 +174,19 @@
pos = Integer.valueOf(pos.intValue() + 1);
}
- Operator extract = OperatorFactory.getAndMakeChild(new extractDesc(
- new exprNodeColumnDesc(TypeInfoFactory.stringTypeInfo,
+ Operator extract = OperatorFactory.getAndMakeChild(new ExtractDesc(
+ new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo,
Utilities.ReduceField.VALUE.toString(), "", false)), new RowSchema(
out_rwsch.getColumnInfos()));
- tableDesc ts = (tableDesc) fsConf.getTableInfo().clone();
+ TableDesc ts = (TableDesc) fsConf.getTableInfo().clone();
fsConf
.getTableInfo()
.getProperties()
.remove(
org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_PARTITION_COLUMNS);
FileSinkOperator newOutput = (FileSinkOperator) OperatorFactory
- .getAndMakeChild(new fileSinkDesc(finalName, ts, parseCtx.getConf()
+ .getAndMakeChild(new FileSinkDesc(finalName, ts, parseCtx.getConf()
.getBoolVar(HiveConf.ConfVars.COMPRESSRESULT)), fsRS, extract);
cplan.setReducer(extract);
@@ -195,10 +195,10 @@
cplan.getPathToAliases().put(fsConf.getDirName(), aliases);
cplan.getAliasToWork().put(fsConf.getDirName(), ts_op);
cplan.getPathToPartitionInfo().put(fsConf.getDirName(),
- new partitionDesc(fsConf.getTableInfo(), null));
+ new PartitionDesc(fsConf.getTableInfo(), null));
cplan.setNumReduceTasks(-1);
- moveWork dummyMv = new moveWork(null, null, null, new loadFileDesc(fsOp
+ MoveWork dummyMv = new MoveWork(null, null, null, new LoadFileDesc(fsOp
.getConf().getDirName(), finalName, true, null, null), false);
Task<? extends Serializable> dummyMergeTask = TaskFactory.get(dummyMv, ctx
.getConf());
@@ -234,7 +234,7 @@
List<Task<? extends Serializable>> mvTasks, FileSinkOperator fsOp) {
// find the move task
for (Task<? extends Serializable> mvTsk : mvTasks) {
- moveWork mvWork = (moveWork) mvTsk.getWork();
+ MoveWork mvWork = (MoveWork) mvTsk.getWork();
String srcDir = null;
if (mvWork.getLoadFileWork() != null) {
srcDir = mvWork.getLoadFileWork().getSourceDir();
@@ -315,14 +315,14 @@
assert (!seenOps.contains(currTopOp));
seenOps.add(currTopOp);
GenMapRedUtils.setTaskPlan(currAliasId, currTopOp,
- (mapredWork) currTask.getWork(), false, ctx);
+ (MapredWork) currTask.getWork(), false, ctx);
opTaskMap.put(null, currTask);
rootTasks.add(currTask);
} else {
if (!seenOps.contains(currTopOp)) {
seenOps.add(currTopOp);
GenMapRedUtils.setTaskPlan(currAliasId, currTopOp,
- (mapredWork) mapTask.getWork(), false, ctx);
+ (MapredWork) mapTask.getWork(), false, ctx);
}
// mapTask and currTask should be merged by and join/union operator
// (e.g., GenMRUnion1j) which has multiple topOps.
@@ -347,15 +347,15 @@
if (currMapJoinOp != null) {
opTaskMap.put(null, currTask);
GenMRMapJoinCtx mjCtx = ctx.getMapJoinCtx(currMapJoinOp);
- mapredWork plan = (mapredWork) currTask.getWork();
+ MapredWork plan = (MapredWork) currTask.getWork();
String taskTmpDir = mjCtx.getTaskTmpDir();
- tableDesc tt_desc = mjCtx.getTTDesc();
+ TableDesc tt_desc = mjCtx.getTTDesc();
assert plan.getPathToAliases().get(taskTmpDir) == null;
plan.getPathToAliases().put(taskTmpDir, new ArrayList<String>());
plan.getPathToAliases().get(taskTmpDir).add(taskTmpDir);
plan.getPathToPartitionInfo().put(taskTmpDir,
- new partitionDesc(tt_desc, null));
+ new PartitionDesc(tt_desc, null));
plan.getAliasToWork().put(taskTmpDir, mjCtx.getRootMapJoinOp());
return dest;
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java Mon Jan 25 18:48:58 2010
@@ -35,7 +35,7 @@
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
/**
* Processor Context for creating map reduce task. Walk the tree in a DFS manner
@@ -95,12 +95,12 @@
public static class GenMRUnionCtx {
Task<? extends Serializable> uTask;
List<String> taskTmpDir;
- List<tableDesc> tt_desc;
+ List<TableDesc> tt_desc;
public GenMRUnionCtx() {
uTask = null;
taskTmpDir = new ArrayList<String>();
- tt_desc = new ArrayList<tableDesc>();
+ tt_desc = new ArrayList<TableDesc>();
}
public Task<? extends Serializable> getUTask() {
@@ -119,18 +119,18 @@
return taskTmpDir;
}
- public void addTTDesc(tableDesc tt_desc) {
+ public void addTTDesc(TableDesc tt_desc) {
this.tt_desc.add(tt_desc);
}
- public List<tableDesc> getTTDesc() {
+ public List<TableDesc> getTTDesc() {
return tt_desc;
}
}
public static class GenMRMapJoinCtx {
String taskTmpDir;
- tableDesc tt_desc;
+ TableDesc tt_desc;
Operator<? extends Serializable> rootMapJoinOp;
MapJoinOperator oldMapJoin;
@@ -147,7 +147,7 @@
* @param rootMapJoinOp
* @param oldMapJoin
*/
- public GenMRMapJoinCtx(String taskTmpDir, tableDesc tt_desc,
+ public GenMRMapJoinCtx(String taskTmpDir, TableDesc tt_desc,
Operator<? extends Serializable> rootMapJoinOp,
MapJoinOperator oldMapJoin) {
this.taskTmpDir = taskTmpDir;
@@ -164,11 +164,11 @@
return taskTmpDir;
}
- public void setTTDesc(tableDesc tt_desc) {
+ public void setTTDesc(TableDesc tt_desc) {
this.tt_desc = tt_desc;
}
- public tableDesc getTTDesc() {
+ public TableDesc getTTDesc() {
return tt_desc;
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java Mon Jan 25 18:48:58 2010
@@ -31,7 +31,7 @@
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx;
import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.mapredWork;
+import org.apache.hadoop.hive.ql.plan.MapredWork;
/**
* Processor for the rule - table scan followed by reduce sink
@@ -58,7 +58,7 @@
.getMapCurrCtx();
GenMapRedCtx mapredCtx = mapCurrCtx.get(stack.get(stack.size() - 2));
Task<? extends Serializable> currTask = mapredCtx.getCurrTask();
- mapredWork currPlan = (mapredWork) currTask.getWork();
+ MapredWork currPlan = (MapredWork) currTask.getWork();
Operator<? extends Serializable> currTopOp = mapredCtx.getCurrTopOp();
String currAliasId = mapredCtx.getCurrAliasId();
Operator<? extends Serializable> reducer = op.getChildOperators().get(0);
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java Mon Jan 25 18:48:58 2010
@@ -33,7 +33,7 @@
import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.mapredWork;
+import org.apache.hadoop.hive.ql.plan.MapredWork;
/**
* Processor for the rule - union followed by reduce sink
@@ -71,7 +71,7 @@
.getMapCurrCtx();
GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(0));
Task<? extends Serializable> currTask = mapredCtx.getCurrTask();
- mapredWork plan = (mapredWork) currTask.getWork();
+ MapredWork plan = (MapredWork) currTask.getWork();
HashMap<Operator<? extends Serializable>, Task<? extends Serializable>> opTaskMap = ctx
.getOpTaskMap();
Task<? extends Serializable> opMapTask = opTaskMap.get(reducer);
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink4.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink4.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink4.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink4.java Mon Jan 25 18:48:58 2010
@@ -31,7 +31,7 @@
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx;
import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.mapredWork;
+import org.apache.hadoop.hive.ql.plan.MapredWork;
/**
* Processor for the rule - map join followed by reduce sink
@@ -63,7 +63,7 @@
.getMapCurrCtx();
GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(0));
Task<? extends Serializable> currTask = mapredCtx.getCurrTask();
- mapredWork plan = (mapredWork) currTask.getWork();
+ MapredWork plan = (MapredWork) currTask.getWork();
HashMap<Operator<? extends Serializable>, Task<? extends Serializable>> opTaskMap = ctx
.getOpTaskMap();
Task<? extends Serializable> opMapTask = opTaskMap.get(reducer);
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java Mon Jan 25 18:48:58 2010
@@ -45,10 +45,10 @@
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.PlanUtils;
-import org.apache.hadoop.hive.ql.plan.fileSinkDesc;
-import org.apache.hadoop.hive.ql.plan.mapredWork;
-import org.apache.hadoop.hive.ql.plan.partitionDesc;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
+import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
+import org.apache.hadoop.hive.ql.plan.MapredWork;
+import org.apache.hadoop.hive.ql.plan.PartitionDesc;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
/**
* Processor for the rule - TableScan followed by Union
@@ -125,7 +125,7 @@
Operator<? extends Serializable> parent = union.getParentOperators().get(
pos);
- mapredWork uPlan = null;
+ MapredWork uPlan = null;
// union is encountered for the first time
if (uCtxTask == null) {
@@ -136,7 +136,7 @@
ctx.setUnionTask(union, uCtxTask);
} else {
uTask = uCtxTask.getUTask();
- uPlan = (mapredWork) uTask.getWork();
+ uPlan = (MapredWork) uTask.getWork();
}
// If there is a mapjoin at position 'pos'
@@ -145,19 +145,19 @@
assert mjOp != null;
GenMRMapJoinCtx mjCtx = ctx.getMapJoinCtx(mjOp);
assert mjCtx != null;
- mapredWork plan = (mapredWork) currTask.getWork();
+ MapredWork plan = (MapredWork) currTask.getWork();
String taskTmpDir = mjCtx.getTaskTmpDir();
- tableDesc tt_desc = mjCtx.getTTDesc();
+ TableDesc tt_desc = mjCtx.getTTDesc();
assert plan.getPathToAliases().get(taskTmpDir) == null;
plan.getPathToAliases().put(taskTmpDir, new ArrayList<String>());
plan.getPathToAliases().get(taskTmpDir).add(taskTmpDir);
plan.getPathToPartitionInfo().put(taskTmpDir,
- new partitionDesc(tt_desc, null));
+ new PartitionDesc(tt_desc, null));
plan.getAliasToWork().put(taskTmpDir, mjCtx.getRootMapJoinOp());
}
- tableDesc tt_desc = PlanUtils.getIntermediateFileTableDesc(PlanUtils
+ TableDesc tt_desc = PlanUtils.getIntermediateFileTableDesc(PlanUtils
.getFieldSchemasFromRowSchema(parent.getSchema(), "temporarycol"));
// generate the temporary file
@@ -174,7 +174,7 @@
// Create a file sink operator for this file name
Operator<? extends Serializable> fs_op = OperatorFactory.get(
- new fileSinkDesc(taskTmpDir, tt_desc, parseCtx.getConf().getBoolVar(
+ new FileSinkDesc(taskTmpDir, tt_desc, parseCtx.getConf().getBoolVar(
HiveConf.ConfVars.COMPRESSINTERMEDIATE)), parent.getSchema());
assert parent.getChildOperators().size() == 1;
@@ -189,7 +189,7 @@
// If it is map-only task, add the files to be processed
if (uPrsCtx.getMapOnlySubq(pos) && uPrsCtx.getRootTask(pos)) {
GenMapRedUtils.setTaskPlan(ctx.getCurrAliasId(), ctx.getCurrTopOp(),
- (mapredWork) currTask.getWork(), false, ctx);
+ (MapredWork) currTask.getWork(), false, ctx);
}
ctx.setCurrTask(uTask);
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java Mon Jan 25 18:48:58 2010
@@ -56,16 +56,16 @@
import org.apache.hadoop.hive.ql.parse.RowResolver;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.PlanUtils;
-import org.apache.hadoop.hive.ql.plan.fetchWork;
-import org.apache.hadoop.hive.ql.plan.fileSinkDesc;
-import org.apache.hadoop.hive.ql.plan.mapJoinDesc;
-import org.apache.hadoop.hive.ql.plan.mapredLocalWork;
-import org.apache.hadoop.hive.ql.plan.mapredWork;
-import org.apache.hadoop.hive.ql.plan.partitionDesc;
-import org.apache.hadoop.hive.ql.plan.reduceSinkDesc;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
-import org.apache.hadoop.hive.ql.plan.tableScanDesc;
-import org.apache.hadoop.hive.ql.plan.filterDesc.sampleDesc;
+import org.apache.hadoop.hive.ql.plan.FetchWork;
+import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
+import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
+import org.apache.hadoop.hive.ql.plan.MapredLocalWork;
+import org.apache.hadoop.hive.ql.plan.MapredWork;
+import org.apache.hadoop.hive.ql.plan.PartitionDesc;
+import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
+import org.apache.hadoop.hive.ql.plan.TableScanDesc;
+import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc;
/**
* General utility common functions for the Processor to convert operator into
@@ -94,14 +94,14 @@
.getMapCurrCtx();
GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(0));
Task<? extends Serializable> currTask = mapredCtx.getCurrTask();
- mapredWork plan = (mapredWork) currTask.getWork();
+ MapredWork plan = (MapredWork) currTask.getWork();
HashMap<Operator<? extends Serializable>, Task<? extends Serializable>> opTaskMap = opProcCtx
.getOpTaskMap();
Operator<? extends Serializable> currTopOp = opProcCtx.getCurrTopOp();
opTaskMap.put(reducer, currTask);
plan.setReducer(reducer);
- reduceSinkDesc desc = op.getConf();
+ ReduceSinkDesc desc = op.getConf();
plan.setNumReduceTasks(desc.getNumReducers());
@@ -148,7 +148,7 @@
GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(
parentPos));
Task<? extends Serializable> currTask = mapredCtx.getCurrTask();
- mapredWork plan = (mapredWork) currTask.getWork();
+ MapredWork plan = (MapredWork) currTask.getWork();
HashMap<Operator<? extends Serializable>, Task<? extends Serializable>> opTaskMap = opProcCtx
.getOpTaskMap();
Operator<? extends Serializable> currTopOp = opProcCtx.getCurrTopOp();
@@ -169,7 +169,7 @@
if (reducer.getClass() == JoinOperator.class) {
plan.setNeedsTagging(true);
}
- reduceSinkDesc desc = (reduceSinkDesc) op.getConf();
+ ReduceSinkDesc desc = (ReduceSinkDesc) op.getConf();
plan.setNumReduceTasks(desc.getNumReducers());
} else {
opTaskMap.put(op, currTask);
@@ -178,7 +178,7 @@
if (!readInputUnion) {
GenMRMapJoinCtx mjCtx = opProcCtx.getMapJoinCtx(currMapJoinOp);
String taskTmpDir;
- tableDesc tt_desc;
+ TableDesc tt_desc;
Operator<? extends Serializable> rootOp;
if (mjCtx.getOldMapJoin() == null) {
@@ -200,7 +200,7 @@
opProcCtx.setCurrMapJoinOp(null);
} else {
- mapJoinDesc desc = (mapJoinDesc) op.getConf();
+ MapJoinDesc desc = (MapJoinDesc) op.getConf();
// The map is overloaded to keep track of mapjoins also
opTaskMap.put(op, currTask);
@@ -237,13 +237,13 @@
.getMapCurrCtx();
GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(0));
Task<? extends Serializable> currTask = mapredCtx.getCurrTask();
- mapredWork plan = (mapredWork) currTask.getWork();
+ MapredWork plan = (MapredWork) currTask.getWork();
HashMap<Operator<? extends Serializable>, Task<? extends Serializable>> opTaskMap = opProcCtx
.getOpTaskMap();
opTaskMap.put(reducer, currTask);
plan.setReducer(reducer);
- reduceSinkDesc desc = op.getConf();
+ ReduceSinkDesc desc = op.getConf();
plan.setNumReduceTasks(desc.getNumReducers());
@@ -260,14 +260,14 @@
*/
public static void initUnionPlan(GenMRProcContext opProcCtx,
Task<? extends Serializable> currTask, boolean local) {
- mapredWork plan = (mapredWork) currTask.getWork();
+ MapredWork plan = (MapredWork) currTask.getWork();
UnionOperator currUnionOp = opProcCtx.getCurrUnionOp();
assert currUnionOp != null;
GenMRUnionCtx uCtx = opProcCtx.getUnionTask(currUnionOp);
assert uCtx != null;
List<String> taskTmpDirLst = uCtx.getTaskTmpDir();
- List<tableDesc> tt_descLst = uCtx.getTTDesc();
+ List<TableDesc> tt_descLst = uCtx.getTTDesc();
assert !taskTmpDirLst.isEmpty() && !tt_descLst.isEmpty();
assert taskTmpDirLst.size() == tt_descLst.size();
int size = taskTmpDirLst.size();
@@ -275,12 +275,12 @@
for (int pos = 0; pos < size; pos++) {
String taskTmpDir = taskTmpDirLst.get(pos);
- tableDesc tt_desc = tt_descLst.get(pos);
+ TableDesc tt_desc = tt_descLst.get(pos);
if (plan.getPathToAliases().get(taskTmpDir) == null) {
plan.getPathToAliases().put(taskTmpDir, new ArrayList<String>());
plan.getPathToAliases().get(taskTmpDir).add(taskTmpDir);
plan.getPathToPartitionInfo().put(taskTmpDir,
- new partitionDesc(tt_desc, null));
+ new PartitionDesc(tt_desc, null));
plan.getAliasToWork().put(taskTmpDir, currUnionOp);
}
}
@@ -305,7 +305,7 @@
GenMRProcContext opProcCtx, int pos, boolean split,
boolean readMapJoinData, boolean readUnionData) throws SemanticException {
Task<? extends Serializable> currTask = task;
- mapredWork plan = (mapredWork) currTask.getWork();
+ MapredWork plan = (MapredWork) currTask.getWork();
Operator<? extends Serializable> currTopOp = opProcCtx.getCurrTopOp();
List<Task<? extends Serializable>> parTasks = null;
@@ -334,7 +334,7 @@
seenOps.add(currTopOp);
boolean local = false;
if (pos != -1) {
- local = (pos == ((mapJoinDesc) op.getConf()).getPosBigTable()) ? false
+ local = (pos == ((MapJoinDesc) op.getConf()).getPosBigTable()) ? false
: true;
}
setTaskPlan(currAliasId, currTopOp, plan, local, opProcCtx);
@@ -352,7 +352,7 @@
// obtained from the old map join
MapJoinOperator oldMapJoin = mjCtx.getOldMapJoin();
String taskTmpDir = null;
- tableDesc tt_desc = null;
+ TableDesc tt_desc = null;
Operator<? extends Serializable> rootOp = null;
if (oldMapJoin == null) {
@@ -398,7 +398,7 @@
public static void splitPlan(ReduceSinkOperator op, GenMRProcContext opProcCtx)
throws SemanticException {
// Generate a new task
- mapredWork cplan = getMapRedWork();
+ MapredWork cplan = getMapRedWork();
ParseContext parseCtx = opProcCtx.getParseCtx();
Task<? extends Serializable> redTask = TaskFactory.get(cplan, parseCtx
.getConf());
@@ -406,7 +406,7 @@
// Add the reducer
cplan.setReducer(reducer);
- reduceSinkDesc desc = op.getConf();
+ ReduceSinkDesc desc = op.getConf();
cplan.setNumReduceTasks(new Integer(desc.getNumReducers()));
@@ -434,16 +434,16 @@
* processing context
*/
public static void setTaskPlan(String alias_id,
- Operator<? extends Serializable> topOp, mapredWork plan, boolean local,
+ Operator<? extends Serializable> topOp, MapredWork plan, boolean local,
GenMRProcContext opProcCtx) throws SemanticException {
ParseContext parseCtx = opProcCtx.getParseCtx();
Set<ReadEntity> inputs = opProcCtx.getInputs();
ArrayList<Path> partDir = new ArrayList<Path>();
- ArrayList<partitionDesc> partDesc = new ArrayList<partitionDesc>();
+ ArrayList<PartitionDesc> partDesc = new ArrayList<PartitionDesc>();
Path tblDir = null;
- tableDesc tblDesc = null;
+ TableDesc tblDesc = null;
PrunedPartitionList partsList = null;
@@ -465,7 +465,7 @@
parts = partsList.getConfirmedPartns();
parts.addAll(partsList.getUnknownPartns());
- partitionDesc aliasPartnDesc = null;
+ PartitionDesc aliasPartnDesc = null;
try {
if (parts.isEmpty()) {
if (!partsList.getDeniedPartns().isEmpty()) {
@@ -482,7 +482,7 @@
// The table does not have any partitions
if (aliasPartnDesc == null) {
- aliasPartnDesc = new partitionDesc(Utilities.getTableDesc(parseCtx
+ aliasPartnDesc = new PartitionDesc(Utilities.getTableDesc(parseCtx
.getTopToTable().get(topOp)), null);
}
@@ -532,14 +532,14 @@
}
Iterator<Path> iterPath = partDir.iterator();
- Iterator<partitionDesc> iterPartnDesc = partDesc.iterator();
+ Iterator<PartitionDesc> iterPartnDesc = partDesc.iterator();
if (!local) {
while (iterPath.hasNext()) {
assert iterPartnDesc.hasNext();
String path = iterPath.next().toString();
- partitionDesc prtDesc = iterPartnDesc.next();
+ PartitionDesc prtDesc = iterPartnDesc.next();
// Add the path to alias mapping
if (plan.getPathToAliases().get(path) == null) {
@@ -554,11 +554,11 @@
plan.getAliasToWork().put(alias_id, topOp);
} else {
// populate local work if needed
- mapredLocalWork localPlan = plan.getMapLocalWork();
+ MapredLocalWork localPlan = plan.getMapLocalWork();
if (localPlan == null) {
- localPlan = new mapredLocalWork(
+ localPlan = new MapredLocalWork(
new LinkedHashMap<String, Operator<? extends Serializable>>(),
- new LinkedHashMap<String, fetchWork>());
+ new LinkedHashMap<String, FetchWork>());
}
assert localPlan.getAliasToWork().get(alias_id) == null;
@@ -568,11 +568,11 @@
localPlan.getAliasToFetchWork()
.put(
alias_id,
- new fetchWork(fetchWork.convertPathToStringArray(partDir),
+ new FetchWork(FetchWork.convertPathToStringArray(partDir),
partDesc));
} else {
localPlan.getAliasToFetchWork().put(alias_id,
- new fetchWork(tblDir.toString(), tblDesc));
+ new FetchWork(tblDir.toString(), tblDesc));
}
plan.setMapLocalWork(localPlan);
}
@@ -593,29 +593,29 @@
* table descriptor
*/
public static void setTaskPlan(String path, String alias,
- Operator<? extends Serializable> topOp, mapredWork plan, boolean local,
- tableDesc tt_desc) throws SemanticException {
+ Operator<? extends Serializable> topOp, MapredWork plan, boolean local,
+ TableDesc tt_desc) throws SemanticException {
if (!local) {
if (plan.getPathToAliases().get(path) == null) {
plan.getPathToAliases().put(path, new ArrayList<String>());
}
plan.getPathToAliases().get(path).add(alias);
- plan.getPathToPartitionInfo().put(path, new partitionDesc(tt_desc, null));
+ plan.getPathToPartitionInfo().put(path, new PartitionDesc(tt_desc, null));
plan.getAliasToWork().put(alias, topOp);
} else {
// populate local work if needed
- mapredLocalWork localPlan = plan.getMapLocalWork();
+ MapredLocalWork localPlan = plan.getMapLocalWork();
if (localPlan == null) {
- localPlan = new mapredLocalWork(
+ localPlan = new MapredLocalWork(
new LinkedHashMap<String, Operator<? extends Serializable>>(),
- new LinkedHashMap<String, fetchWork>());
+ new LinkedHashMap<String, FetchWork>());
}
assert localPlan.getAliasToWork().get(alias) == null;
assert localPlan.getAliasToFetchWork().get(alias) == null;
localPlan.getAliasToWork().put(alias, topOp);
- localPlan.getAliasToFetchWork().put(alias, new fetchWork(alias, tt_desc));
+ localPlan.getAliasToFetchWork().put(alias, new FetchWork(alias, tt_desc));
plan.setMapLocalWork(localPlan);
}
}
@@ -628,7 +628,7 @@
* @param topOp
* current top operator in the path
*/
- public static void setKeyAndValueDesc(mapredWork plan,
+ public static void setKeyAndValueDesc(MapredWork plan,
Operator<? extends Serializable> topOp) {
if (topOp == null) {
return;
@@ -638,7 +638,7 @@
ReduceSinkOperator rs = (ReduceSinkOperator) topOp;
plan.setKeyDesc(rs.getConf().getKeySerializeInfo());
int tag = Math.max(0, rs.getConf().getTag());
- List<tableDesc> tagToSchema = plan.getTagToValueDesc();
+ List<TableDesc> tagToSchema = plan.getTagToValueDesc();
while (tag + 1 > tagToSchema.size()) {
tagToSchema.add(null);
}
@@ -659,13 +659,13 @@
*
* @return the new plan
*/
- public static mapredWork getMapRedWork() {
- mapredWork work = new mapredWork();
+ public static MapredWork getMapRedWork() {
+ MapredWork work = new MapredWork();
work.setPathToAliases(new LinkedHashMap<String, ArrayList<String>>());
- work.setPathToPartitionInfo(new LinkedHashMap<String, partitionDesc>());
+ work.setPathToPartitionInfo(new LinkedHashMap<String, PartitionDesc>());
work
.setAliasToWork(new LinkedHashMap<String, Operator<? extends Serializable>>());
- work.setTagToValueDesc(new ArrayList<tableDesc>());
+ work.setTagToValueDesc(new ArrayList<TableDesc>());
work.setReducer(null);
return work;
}
@@ -720,13 +720,13 @@
String taskTmpDir = baseCtx.getMRTmpFileURI();
Operator<? extends Serializable> parent = op.getParentOperators().get(posn);
- tableDesc tt_desc = PlanUtils.getIntermediateFileTableDesc(PlanUtils
+ TableDesc tt_desc = PlanUtils.getIntermediateFileTableDesc(PlanUtils
.getFieldSchemasFromRowSchema(parent.getSchema(), "temporarycol"));
// Create a file sink operator for this file name
boolean compressIntermediate = parseCtx.getConf().getBoolVar(
HiveConf.ConfVars.COMPRESSINTERMEDIATE);
- fileSinkDesc desc = new fileSinkDesc(taskTmpDir, tt_desc,
+ FileSinkDesc desc = new FileSinkDesc(taskTmpDir, tt_desc,
compressIntermediate);
if (compressIntermediate) {
desc.setCompressCodec(parseCtx.getConf().getVar(
@@ -753,7 +753,7 @@
// create a dummy tableScan operator on top of op
Operator<? extends Serializable> ts_op = putOpInsertMap(OperatorFactory
- .get(tableScanDesc.class, parent.getSchema()), null, parseCtx);
+ .get(TableScanDesc.class, parent.getSchema()), null, parseCtx);
childOpList = new ArrayList<Operator<? extends Serializable>>();
childOpList.add(op);
@@ -765,7 +765,7 @@
mapCurrCtx.put(ts_op, new GenMapRedCtx(childTask, null, null));
String streamDesc = taskTmpDir;
- mapredWork cplan = (mapredWork) childTask.getWork();
+ MapredWork cplan = (MapredWork) childTask.getWork();
if (setReducer) {
Operator<? extends Serializable> reducer = op.getChildOperators().get(0);
@@ -829,7 +829,7 @@
Task<? extends Serializable> uTask = null;
union.getParentOperators().get(pos);
- mapredWork uPlan = null;
+ MapredWork uPlan = null;
// union is encountered for the first time
if (uCtxTask == null) {
@@ -840,7 +840,7 @@
ctx.setUnionTask(union, uCtxTask);
} else {
uTask = uCtxTask.getUTask();
- uPlan = (mapredWork) uTask.getWork();
+ uPlan = (MapredWork) uTask.getWork();
}
// If there is a mapjoin at position 'pos'
@@ -851,7 +851,7 @@
uPlan.getPathToAliases().put(taskTmpDir, new ArrayList<String>());
uPlan.getPathToAliases().get(taskTmpDir).add(taskTmpDir);
uPlan.getPathToPartitionInfo().put(taskTmpDir,
- new partitionDesc(mjCtx.getTTDesc(), null));
+ new PartitionDesc(mjCtx.getTTDesc(), null));
uPlan.getAliasToWork().put(taskTmpDir, mjCtx.getRootMapJoinOp());
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java Mon Jan 25 18:48:58 2010
@@ -50,13 +50,13 @@
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeConstantDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeFieldDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeGenericFuncDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeNullDesc;
-import org.apache.hadoop.hive.ql.plan.groupByDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeNullDesc;
+import org.apache.hadoop.hive.ql.plan.GroupByDesc;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
/**
@@ -130,7 +130,7 @@
throws SemanticException {
// if this is not a HASH groupby, return
- if (curr.getConf().getMode() != groupByDesc.Mode.HASH) {
+ if (curr.getConf().getMode() != GroupByDesc.Mode.HASH) {
return;
}
@@ -140,23 +140,23 @@
}
boolean bucketGroupBy = true;
- groupByDesc desc = curr.getConf();
- List<exprNodeDesc> groupByKeys = new LinkedList<exprNodeDesc>();
+ GroupByDesc desc = curr.getConf();
+ List<ExprNodeDesc> groupByKeys = new LinkedList<ExprNodeDesc>();
groupByKeys.addAll(desc.getKeys());
// compute groupby columns from groupby keys
List<String> groupByCols = new ArrayList<String>();
while (groupByKeys.size() > 0) {
- exprNodeDesc node = groupByKeys.remove(0);
- if (node instanceof exprNodeColumnDesc) {
+ ExprNodeDesc node = groupByKeys.remove(0);
+ if (node instanceof ExprNodeColumnDesc) {
groupByCols.addAll(node.getCols());
- } else if ((node instanceof exprNodeConstantDesc)
- || (node instanceof exprNodeNullDesc)) {
+ } else if ((node instanceof ExprNodeConstantDesc)
+ || (node instanceof ExprNodeNullDesc)) {
// nothing
- } else if (node instanceof exprNodeFieldDesc) {
- groupByKeys.add(0, ((exprNodeFieldDesc) node).getDesc());
+ } else if (node instanceof ExprNodeFieldDesc) {
+ groupByKeys.add(0, ((ExprNodeFieldDesc) node).getDesc());
continue;
- } else if (node instanceof exprNodeGenericFuncDesc) {
- exprNodeGenericFuncDesc udfNode = ((exprNodeGenericFuncDesc) node);
+ } else if (node instanceof ExprNodeGenericFuncDesc) {
+ ExprNodeGenericFuncDesc udfNode = ((ExprNodeGenericFuncDesc) node);
GenericUDF udf = udfNode.getGenericUDF();
if (!FunctionRegistry.isDeterministic(udf)) {
return;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java Mon Jan 25 18:48:58 2010
@@ -43,9 +43,9 @@
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.PlanUtils;
-import org.apache.hadoop.hive.ql.plan.fileSinkDesc;
-import org.apache.hadoop.hive.ql.plan.mapredWork;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
+import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
+import org.apache.hadoop.hive.ql.plan.MapredWork;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
/**
* Operator factory for MapJoin processing
@@ -83,7 +83,7 @@
GenMapRedCtx mapredCtx = mapCurrCtx.get(mapJoin.getParentOperators().get(
pos));
Task<? extends Serializable> currTask = mapredCtx.getCurrTask();
- mapredWork currPlan = (mapredWork) currTask.getWork();
+ MapredWork currPlan = (MapredWork) currTask.getWork();
Operator<? extends Serializable> currTopOp = mapredCtx.getCurrTopOp();
String currAliasId = mapredCtx.getCurrAliasId();
Operator<? extends Serializable> reducer = mapJoin;
@@ -126,7 +126,7 @@
MapJoinOperator mapJoin = (MapJoinOperator) nd;
GenMRProcContext opProcCtx = (GenMRProcContext) procCtx;
- mapredWork cplan = GenMapRedUtils.getMapRedWork();
+ MapredWork cplan = GenMapRedUtils.getMapRedWork();
ParseContext parseCtx = opProcCtx.getParseCtx();
Task<? extends Serializable> redTask = TaskFactory.get(cplan, parseCtx
.getConf());
@@ -212,11 +212,11 @@
ctx.setMapJoinCtx(mapJoin, mjCtx);
}
- mapredWork mjPlan = GenMapRedUtils.getMapRedWork();
+ MapredWork mjPlan = GenMapRedUtils.getMapRedWork();
Task<? extends Serializable> mjTask = TaskFactory.get(mjPlan, parseCtx
.getConf());
- tableDesc tt_desc = PlanUtils.getIntermediateFileTableDesc(PlanUtils
+ TableDesc tt_desc = PlanUtils.getIntermediateFileTableDesc(PlanUtils
.getFieldSchemasFromRowSchema(mapJoin.getSchema(), "temporarycol"));
// generate the temporary file
@@ -232,7 +232,7 @@
// Create a file sink operator for this file name
Operator<? extends Serializable> fs_op = OperatorFactory.get(
- new fileSinkDesc(taskTmpDir, tt_desc, parseCtx.getConf().getBoolVar(
+ new FileSinkDesc(taskTmpDir, tt_desc, parseCtx.getConf().getBoolVar(
HiveConf.ConfVars.COMPRESSINTERMEDIATE)), mapJoin.getSchema());
assert mapJoin.getChildOperators().size() == 1;
@@ -288,7 +288,7 @@
GenMapRedCtx mapredCtx = mapCurrCtx.get(mapJoin.getParentOperators().get(
pos));
Task<? extends Serializable> currTask = mapredCtx.getCurrTask();
- mapredWork currPlan = (mapredWork) currTask.getWork();
+ MapredWork currPlan = (MapredWork) currTask.getWork();
mapredCtx.getCurrAliasId();
Operator<? extends Serializable> reducer = mapJoin;
HashMap<Operator<? extends Serializable>, Task<? extends Serializable>> opTaskMap = ctx
@@ -348,7 +348,7 @@
GenMapRedCtx mapredCtx = mapCurrCtx.get(mapJoin.getParentOperators().get(
pos));
Task<? extends Serializable> currTask = mapredCtx.getCurrTask();
- mapredWork currPlan = (mapredWork) currTask.getWork();
+ MapredWork currPlan = (MapredWork) currTask.getWork();
Operator<? extends Serializable> reducer = mapJoin;
HashMap<Operator<? extends Serializable>, Task<? extends Serializable>> opTaskMap = ctx
.getOpTaskMap();
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java Mon Jan 25 18:48:58 2010
@@ -52,13 +52,13 @@
import org.apache.hadoop.hive.ql.parse.RowResolver;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.PlanUtils;
-import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.joinDesc;
-import org.apache.hadoop.hive.ql.plan.mapJoinDesc;
-import org.apache.hadoop.hive.ql.plan.reduceSinkDesc;
-import org.apache.hadoop.hive.ql.plan.selectDesc;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.JoinDesc;
+import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
+import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
+import org.apache.hadoop.hive.ql.plan.SelectDesc;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
/**
* Implementation of one of the rule-based map join optimization. User passes
@@ -99,17 +99,17 @@
private MapJoinOperator convertMapJoin(ParseContext pctx, JoinOperator op,
QBJoinTree joinTree, int mapJoinPos) throws SemanticException {
// outer join cannot be performed on a table which is being cached
- joinDesc desc = op.getConf();
- org.apache.hadoop.hive.ql.plan.joinCond[] condns = desc.getConds();
- for (org.apache.hadoop.hive.ql.plan.joinCond condn : condns) {
- if (condn.getType() == joinDesc.FULL_OUTER_JOIN) {
+ JoinDesc desc = op.getConf();
+ org.apache.hadoop.hive.ql.plan.JoinCondDesc[] condns = desc.getConds();
+ for (org.apache.hadoop.hive.ql.plan.JoinCondDesc condn : condns) {
+ if (condn.getType() == JoinDesc.FULL_OUTER_JOIN) {
throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg());
}
- if ((condn.getType() == joinDesc.LEFT_OUTER_JOIN)
+ if ((condn.getType() == JoinDesc.LEFT_OUTER_JOIN)
&& (condn.getLeft() != mapJoinPos)) {
throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg());
}
- if ((condn.getType() == joinDesc.RIGHT_OUTER_JOIN)
+ if ((condn.getType() == JoinDesc.RIGHT_OUTER_JOIN)
&& (condn.getRight() != mapJoinPos)) {
throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg());
}
@@ -118,8 +118,8 @@
RowResolver oldOutputRS = pctx.getOpParseCtx().get(op).getRR();
RowResolver outputRS = new RowResolver();
ArrayList<String> outputColumnNames = new ArrayList<String>();
- Map<Byte, List<exprNodeDesc>> keyExprMap = new HashMap<Byte, List<exprNodeDesc>>();
- Map<Byte, List<exprNodeDesc>> valueExprMap = new HashMap<Byte, List<exprNodeDesc>>();
+ Map<Byte, List<ExprNodeDesc>> keyExprMap = new HashMap<Byte, List<ExprNodeDesc>>();
+ Map<Byte, List<ExprNodeDesc>> valueExprMap = new HashMap<Byte, List<ExprNodeDesc>>();
// Walk over all the sources (which are guaranteed to be reduce sink
// operators).
@@ -129,7 +129,7 @@
List<Operator<? extends Serializable>> parentOps = op.getParentOperators();
List<Operator<? extends Serializable>> newParentOps = new ArrayList<Operator<? extends Serializable>>();
List<Operator<? extends Serializable>> oldReduceSinkParentOps = new ArrayList<Operator<? extends Serializable>>();
- Map<String, exprNodeDesc> colExprMap = new HashMap<String, exprNodeDesc>();
+ Map<String, ExprNodeDesc> colExprMap = new HashMap<String, ExprNodeDesc>();
// found a source which is not to be stored in memory
if (leftSrc != null) {
// assert mapJoinPos == 0;
@@ -162,9 +162,9 @@
for (pos = 0; pos < newParentOps.size(); pos++) {
ReduceSinkOperator oldPar = (ReduceSinkOperator) oldReduceSinkParentOps
.get(pos);
- reduceSinkDesc rsconf = oldPar.getConf();
+ ReduceSinkDesc rsconf = oldPar.getConf();
Byte tag = (byte) rsconf.getTag();
- List<exprNodeDesc> keys = rsconf.getKeyCols();
+ List<ExprNodeDesc> keys = rsconf.getKeyCols();
keyExprMap.put(tag, keys);
}
@@ -173,7 +173,7 @@
RowResolver inputRS = pGraphContext.getOpParseCtx().get(
newParentOps.get(pos)).getRR();
- List<exprNodeDesc> values = new ArrayList<exprNodeDesc>();
+ List<ExprNodeDesc> values = new ArrayList<ExprNodeDesc>();
Iterator<String> keysIter = inputRS.getTableNames().iterator();
while (keysIter.hasNext()) {
@@ -190,7 +190,7 @@
String outputCol = oldValueInfo.getInternalName();
if (outputRS.get(key, field) == null) {
outputColumnNames.add(outputCol);
- exprNodeDesc colDesc = new exprNodeColumnDesc(valueInfo.getType(),
+ ExprNodeDesc colDesc = new ExprNodeColumnDesc(valueInfo.getType(),
valueInfo.getInternalName(), valueInfo.getTabAlias(), valueInfo
.getIsPartitionCol());
values.add(colDesc);
@@ -205,7 +205,7 @@
valueExprMap.put(new Byte((byte) pos), values);
}
- org.apache.hadoop.hive.ql.plan.joinCond[] joinCondns = op.getConf()
+ org.apache.hadoop.hive.ql.plan.JoinCondDesc[] joinCondns = op.getConf()
.getConds();
Operator[] newPar = new Operator[newParentOps.size()];
@@ -214,32 +214,32 @@
newPar[pos++] = o;
}
- List<exprNodeDesc> keyCols = keyExprMap.get(new Byte((byte) 0));
+ List<ExprNodeDesc> keyCols = keyExprMap.get(new Byte((byte) 0));
StringBuilder keyOrder = new StringBuilder();
for (int i = 0; i < keyCols.size(); i++) {
keyOrder.append("+");
}
- tableDesc keyTableDesc = PlanUtils.getMapJoinKeyTableDesc(PlanUtils
+ TableDesc keyTableDesc = PlanUtils.getMapJoinKeyTableDesc(PlanUtils
.getFieldSchemasFromColumnList(keyCols, "mapjoinkey"));
- List<tableDesc> valueTableDescs = new ArrayList<tableDesc>();
+ List<TableDesc> valueTableDescs = new ArrayList<TableDesc>();
for (pos = 0; pos < newParentOps.size(); pos++) {
- List<exprNodeDesc> valueCols = valueExprMap.get(new Byte((byte) pos));
+ List<ExprNodeDesc> valueCols = valueExprMap.get(new Byte((byte) pos));
keyOrder = new StringBuilder();
for (int i = 0; i < valueCols.size(); i++) {
keyOrder.append("+");
}
- tableDesc valueTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils
+ TableDesc valueTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils
.getFieldSchemasFromColumnList(valueCols, "mapjoinvalue"));
valueTableDescs.add(valueTableDesc);
}
MapJoinOperator mapJoinOp = (MapJoinOperator) putOpInsertMap(
- OperatorFactory.getAndMakeChild(new mapJoinDesc(keyExprMap,
+ OperatorFactory.getAndMakeChild(new MapJoinDesc(keyExprMap,
keyTableDesc, valueExprMap, valueTableDescs, outputColumnNames,
mapJoinPos, joinCondns), new RowSchema(outputRS.getColumnInfos()),
newPar), outputRS);
@@ -273,18 +273,18 @@
// mapJoin later on
RowResolver inputRR = pctx.getOpParseCtx().get(input).getRR();
- ArrayList<exprNodeDesc> exprs = new ArrayList<exprNodeDesc>();
+ ArrayList<ExprNodeDesc> exprs = new ArrayList<ExprNodeDesc>();
ArrayList<String> outputs = new ArrayList<String>();
List<String> outputCols = input.getConf().getOutputColumnNames();
RowResolver outputRS = new RowResolver();
- Map<String, exprNodeDesc> colExprMap = new HashMap<String, exprNodeDesc>();
+ Map<String, ExprNodeDesc> colExprMap = new HashMap<String, ExprNodeDesc>();
for (int i = 0; i < outputCols.size(); i++) {
String internalName = outputCols.get(i);
String[] nm = inputRR.reverseLookup(internalName);
ColumnInfo valueInfo = inputRR.get(nm[0], nm[1]);
- exprNodeDesc colDesc = new exprNodeColumnDesc(valueInfo.getType(),
+ ExprNodeDesc colDesc = new ExprNodeColumnDesc(valueInfo.getType(),
valueInfo.getInternalName(), nm[0], valueInfo.getIsPartitionCol());
exprs.add(colDesc);
outputs.add(internalName);
@@ -293,7 +293,7 @@
colExprMap.put(internalName, colDesc);
}
- selectDesc select = new selectDesc(exprs, outputs, false);
+ SelectDesc select = new SelectDesc(exprs, outputs, false);
SelectOperator sel = (SelectOperator) putOpInsertMap(
OperatorFactory.getAndMakeChild(select, new RowSchema(inputRR
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java Mon Jan 25 18:48:58 2010
@@ -41,8 +41,8 @@
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.filterDesc;
-import org.apache.hadoop.hive.ql.plan.filterDesc.sampleDesc;
+import org.apache.hadoop.hive.ql.plan.FilterDesc;
+import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc;
/**
* The transformation step that does sample pruning.
@@ -116,7 +116,7 @@
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
Object... nodeOutputs) throws SemanticException {
FilterOperator filOp = (FilterOperator) nd;
- filterDesc filOpDesc = filOp.getConf();
+ FilterDesc filOpDesc = filOp.getConf();
sampleDesc sampleDescr = filOpDesc.getSampleDescr();
if ((sampleDescr == null) || !sampleDescr.getInputPruning()) {
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java Mon Jan 25 18:48:58 2010
@@ -45,16 +45,16 @@
import org.apache.hadoop.hive.ql.plan.ConditionalResolverSkewJoin;
import org.apache.hadoop.hive.ql.plan.ConditionalWork;
import org.apache.hadoop.hive.ql.plan.PlanUtils;
-import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.fetchWork;
-import org.apache.hadoop.hive.ql.plan.joinDesc;
-import org.apache.hadoop.hive.ql.plan.mapJoinDesc;
-import org.apache.hadoop.hive.ql.plan.mapredLocalWork;
-import org.apache.hadoop.hive.ql.plan.mapredWork;
-import org.apache.hadoop.hive.ql.plan.partitionDesc;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
-import org.apache.hadoop.hive.ql.plan.tableScanDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.FetchWork;
+import org.apache.hadoop.hive.ql.plan.JoinDesc;
+import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
+import org.apache.hadoop.hive.ql.plan.MapredLocalWork;
+import org.apache.hadoop.hive.ql.plan.MapredWork;
+import org.apache.hadoop.hive.ql.plan.PartitionDesc;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
+import org.apache.hadoop.hive.ql.plan.TableScanDesc;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
@@ -109,8 +109,8 @@
String baseTmpDir = parseCtx.getContext().getMRTmpFileURI();
- joinDesc joinDescriptor = joinOp.getConf();
- Map<Byte, List<exprNodeDesc>> joinValues = joinDescriptor.getExprs();
+ JoinDesc joinDescriptor = joinOp.getConf();
+ Map<Byte, List<ExprNodeDesc>> joinValues = joinDescriptor.getExprs();
int numAliases = joinValues.size();
Map<Byte, String> bigKeysDirMap = new HashMap<Byte, String>();
@@ -141,18 +141,18 @@
Map<String, Task<? extends Serializable>> bigKeysDirToTaskMap = new HashMap<String, Task<? extends Serializable>>();
List<Serializable> listWorks = new ArrayList<Serializable>();
List<Task<? extends Serializable>> listTasks = new ArrayList<Task<? extends Serializable>>();
- mapredWork currPlan = (mapredWork) currTask.getWork();
+ MapredWork currPlan = (MapredWork) currTask.getWork();
- tableDesc keyTblDesc = (tableDesc) currPlan.getKeyDesc().clone();
+ TableDesc keyTblDesc = (TableDesc) currPlan.getKeyDesc().clone();
List<String> joinKeys = Utilities
.getColumnNames(keyTblDesc.getProperties());
List<String> joinKeyTypes = Utilities.getColumnTypes(keyTblDesc
.getProperties());
- Map<Byte, tableDesc> tableDescList = new HashMap<Byte, tableDesc>();
- Map<Byte, List<exprNodeDesc>> newJoinValues = new HashMap<Byte, List<exprNodeDesc>>();
- Map<Byte, List<exprNodeDesc>> newJoinKeys = new HashMap<Byte, List<exprNodeDesc>>();
- List<tableDesc> newJoinValueTblDesc = new ArrayList<tableDesc>();// used for
+ Map<Byte, TableDesc> tableDescList = new HashMap<Byte, TableDesc>();
+ Map<Byte, List<ExprNodeDesc>> newJoinValues = new HashMap<Byte, List<ExprNodeDesc>>();
+ Map<Byte, List<ExprNodeDesc>> newJoinKeys = new HashMap<Byte, List<ExprNodeDesc>>();
+ List<TableDesc> newJoinValueTblDesc = new ArrayList<TableDesc>();// used for
// create
// mapJoinDesc,
// should
@@ -165,19 +165,19 @@
for (int i = 0; i < numAliases; i++) {
Byte alias = tags[i];
- List<exprNodeDesc> valueCols = joinValues.get(alias);
+ List<ExprNodeDesc> valueCols = joinValues.get(alias);
String colNames = "";
String colTypes = "";
int columnSize = valueCols.size();
- List<exprNodeDesc> newValueExpr = new ArrayList<exprNodeDesc>();
- List<exprNodeDesc> newKeyExpr = new ArrayList<exprNodeDesc>();
+ List<ExprNodeDesc> newValueExpr = new ArrayList<ExprNodeDesc>();
+ List<ExprNodeDesc> newKeyExpr = new ArrayList<ExprNodeDesc>();
boolean first = true;
for (int k = 0; k < columnSize; k++) {
TypeInfo type = valueCols.get(k).getTypeInfo();
String newColName = i + "_VALUE_" + k; // any name, it does not matter.
newValueExpr
- .add(new exprNodeColumnDesc(type, newColName, "" + i, false));
+ .add(new ExprNodeColumnDesc(type, newColName, "" + i, false));
if (!first) {
colNames = colNames + ",";
colTypes = colTypes + ",";
@@ -196,7 +196,7 @@
first = false;
colNames = colNames + joinKeys.get(k);
colTypes = colTypes + joinKeyTypes.get(k);
- newKeyExpr.add(new exprNodeColumnDesc(TypeInfoFactory
+ newKeyExpr.add(new ExprNodeColumnDesc(TypeInfoFactory
.getPrimitiveTypeInfo(joinKeyTypes.get(k)), joinKeys.get(k),
"" + i, false));
}
@@ -228,8 +228,8 @@
for (int i = 0; i < numAliases - 1; i++) {
Byte src = tags[i];
- mapredWork newPlan = PlanUtils.getMapRedWork();
- mapredWork clonePlan = null;
+ MapredWork newPlan = PlanUtils.getMapRedWork();
+ MapredWork clonePlan = null;
try {
String xmlPlan = currPlan.toXML();
StringBuffer sb = new StringBuffer(xmlPlan);
@@ -243,7 +243,7 @@
Operator<? extends Serializable>[] parentOps = new TableScanOperator[tags.length];
for (int k = 0; k < tags.length; k++) {
Operator<? extends Serializable> ts = OperatorFactory.get(
- tableScanDesc.class, (RowSchema) null);
+ TableScanDesc.class, (RowSchema) null);
parentOps[k] = ts;
}
Operator<? extends Serializable> tblScan_op = parentOps[i];
@@ -254,7 +254,7 @@
String bigKeyDirPath = bigKeysDirMap.get(src);
newPlan.getPathToAliases().put(bigKeyDirPath, aliases);
newPlan.getAliasToWork().put(alias, tblScan_op);
- partitionDesc part = new partitionDesc(tableDescList.get(src), null);
+ PartitionDesc part = new PartitionDesc(tableDescList.get(src), null);
newPlan.getPathToPartitionInfo().put(bigKeyDirPath, part);
newPlan.getAliasToPartnInfo().put(alias, part);
@@ -262,16 +262,16 @@
assert reducer instanceof JoinOperator;
JoinOperator cloneJoinOp = (JoinOperator) reducer;
- mapJoinDesc mapJoinDescriptor = new mapJoinDesc(newJoinKeys, keyTblDesc,
+ MapJoinDesc mapJoinDescriptor = new MapJoinDesc(newJoinKeys, keyTblDesc,
newJoinValues, newJoinValueTblDesc, joinDescriptor
.getOutputColumnNames(), i, joinDescriptor.getConds());
mapJoinDescriptor.setNoOuterJoin(joinDescriptor.isNoOuterJoin());
mapJoinDescriptor.setTagOrder(tags);
mapJoinDescriptor.setHandleSkewJoin(false);
- mapredLocalWork localPlan = new mapredLocalWork(
+ MapredLocalWork localPlan = new MapredLocalWork(
new LinkedHashMap<String, Operator<? extends Serializable>>(),
- new LinkedHashMap<String, fetchWork>());
+ new LinkedHashMap<String, FetchWork>());
Map<Byte, String> smallTblDirs = smallKeysDirMap.get(src);
for (int j = 0; j < numAliases; j++) {
@@ -283,7 +283,7 @@
localPlan.getAliasToWork().put(small_alias.toString(), tblScan_op2);
Path tblDir = new Path(smallTblDirs.get(small_alias));
localPlan.getAliasToFetchWork().put(small_alias.toString(),
- new fetchWork(tblDir.toString(), tableDescList.get(small_alias)));
+ new FetchWork(tblDir.toString(), tableDescList.get(small_alias)));
}
newPlan.setMapLocalWork(localPlan);
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java Mon Jan 25 18:48:58 2010
@@ -37,7 +37,7 @@
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.mapredWork;
+import org.apache.hadoop.hive.ql.plan.MapredWork;
/**
* An implementation of PhysicalPlanResolver. It iterator each task with a rule
@@ -73,7 +73,7 @@
Task<? extends Serializable> task = (Task<? extends Serializable>) nd;
if (!task.isMapRedTask() || task instanceof ConditionalTask
- || ((mapredWork) task.getWork()).getReducer() == null) {
+ || ((MapredWork) task.getWork()).getReducer() == null) {
return null;
}
@@ -92,7 +92,7 @@
// iterator the reducer operator tree
ArrayList<Node> topNodes = new ArrayList<Node>();
- topNodes.add(((mapredWork) task.getWork()).getReducer());
+ topNodes.add(((MapredWork) task.getWork()).getReducer());
ogw.startWalking(topNodes, null);
return null;
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/ExprProcFactory.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/ExprProcFactory.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/ExprProcFactory.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/ExprProcFactory.java Mon Jan 25 18:48:58 2010
@@ -36,12 +36,12 @@
import org.apache.hadoop.hive.ql.lib.Rule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeConstantDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeFieldDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeGenericFuncDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeNullDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeNullDesc;
/**
* Expression processor factory for partition pruning. Each processor tries to
@@ -60,14 +60,14 @@
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
Object... nodeOutputs) throws SemanticException {
- exprNodeDesc newcd = null;
- exprNodeColumnDesc cd = (exprNodeColumnDesc) nd;
+ ExprNodeDesc newcd = null;
+ ExprNodeColumnDesc cd = (ExprNodeColumnDesc) nd;
ExprProcCtx epc = (ExprProcCtx) procCtx;
if (cd.getTabAlias().equalsIgnoreCase(epc.getTabAlias())
&& cd.getIsParititonCol()) {
newcd = cd.clone();
} else {
- newcd = new exprNodeConstantDesc(cd.getTypeInfo(), null);
+ newcd = new ExprNodeConstantDesc(cd.getTypeInfo(), null);
epc.setHasNonPartCols(true);
}
@@ -87,8 +87,8 @@
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
Object... nodeOutputs) throws SemanticException {
- exprNodeDesc newfd = null;
- exprNodeGenericFuncDesc fd = (exprNodeGenericFuncDesc) nd;
+ ExprNodeDesc newfd = null;
+ ExprNodeGenericFuncDesc fd = (ExprNodeGenericFuncDesc) nd;
boolean unknown = false;
@@ -106,24 +106,24 @@
} else {
// If any child is null, set unknown to true
for (Object child : nodeOutputs) {
- exprNodeDesc child_nd = (exprNodeDesc) child;
- if (child_nd instanceof exprNodeConstantDesc
- && ((exprNodeConstantDesc) child_nd).getValue() == null) {
+ ExprNodeDesc child_nd = (ExprNodeDesc) child;
+ if (child_nd instanceof ExprNodeConstantDesc
+ && ((ExprNodeConstantDesc) child_nd).getValue() == null) {
unknown = true;
}
}
}
if (unknown) {
- newfd = new exprNodeConstantDesc(fd.getTypeInfo(), null);
+ newfd = new ExprNodeConstantDesc(fd.getTypeInfo(), null);
} else {
// Create the list of children
- ArrayList<exprNodeDesc> children = new ArrayList<exprNodeDesc>();
+ ArrayList<ExprNodeDesc> children = new ArrayList<ExprNodeDesc>();
for (Object child : nodeOutputs) {
- children.add((exprNodeDesc) child);
+ children.add((ExprNodeDesc) child);
}
// Create a copy of the function descriptor
- newfd = new exprNodeGenericFuncDesc(fd.getTypeInfo(), fd
+ newfd = new ExprNodeGenericFuncDesc(fd.getTypeInfo(), fd
.getGenericUDF(), children);
}
@@ -138,14 +138,14 @@
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
Object... nodeOutputs) throws SemanticException {
- exprNodeFieldDesc fnd = (exprNodeFieldDesc) nd;
+ ExprNodeFieldDesc fnd = (ExprNodeFieldDesc) nd;
boolean unknown = false;
int idx = 0;
- exprNodeDesc left_nd = null;
+ ExprNodeDesc left_nd = null;
for (Object child : nodeOutputs) {
- exprNodeDesc child_nd = (exprNodeDesc) child;
- if (child_nd instanceof exprNodeConstantDesc
- && ((exprNodeConstantDesc) child_nd).getValue() == null) {
+ ExprNodeDesc child_nd = (ExprNodeDesc) child;
+ if (child_nd instanceof ExprNodeConstantDesc
+ && ((ExprNodeConstantDesc) child_nd).getValue() == null) {
unknown = true;
}
left_nd = child_nd;
@@ -153,11 +153,11 @@
assert (idx == 0);
- exprNodeDesc newnd = null;
+ ExprNodeDesc newnd = null;
if (unknown) {
- newnd = new exprNodeConstantDesc(fnd.getTypeInfo(), null);
+ newnd = new ExprNodeConstantDesc(fnd.getTypeInfo(), null);
} else {
- newnd = new exprNodeFieldDesc(fnd.getTypeInfo(), left_nd, fnd
+ newnd = new ExprNodeFieldDesc(fnd.getTypeInfo(), left_nd, fnd
.getFieldName(), fnd.getIsList());
}
return newnd;
@@ -174,10 +174,10 @@
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
Object... nodeOutputs) throws SemanticException {
- if (nd instanceof exprNodeConstantDesc) {
- return ((exprNodeConstantDesc) nd).clone();
- } else if (nd instanceof exprNodeNullDesc) {
- return ((exprNodeNullDesc) nd).clone();
+ if (nd instanceof ExprNodeConstantDesc) {
+ return ((ExprNodeConstantDesc) nd).clone();
+ } else if (nd instanceof ExprNodeNullDesc) {
+ return ((ExprNodeNullDesc) nd).clone();
}
assert (false);
@@ -214,7 +214,7 @@
* has a non partition column
* @throws SemanticException
*/
- public static exprNodeDesc genPruner(String tabAlias, exprNodeDesc pred,
+ public static ExprNodeDesc genPruner(String tabAlias, ExprNodeDesc pred,
boolean hasNonPartCols) throws SemanticException {
// Create the walker, the rules dispatcher and the context.
ExprProcCtx pprCtx = new ExprProcCtx(tabAlias);
@@ -224,12 +224,12 @@
// generates the plan from the operator tree
Map<Rule, NodeProcessor> exprRules = new LinkedHashMap<Rule, NodeProcessor>();
exprRules.put(
- new RuleRegExp("R1", exprNodeColumnDesc.class.getName() + "%"),
+ new RuleRegExp("R1", ExprNodeColumnDesc.class.getName() + "%"),
getColumnProcessor());
exprRules.put(
- new RuleRegExp("R2", exprNodeFieldDesc.class.getName() + "%"),
+ new RuleRegExp("R2", ExprNodeFieldDesc.class.getName() + "%"),
getFieldProcessor());
- exprRules.put(new RuleRegExp("R5", exprNodeGenericFuncDesc.class.getName()
+ exprRules.put(new RuleRegExp("R5", ExprNodeGenericFuncDesc.class.getName()
+ "%"), getGenericFuncProcessor());
// The dispatcher fires the processor corresponding to the closest matching
@@ -246,7 +246,7 @@
hasNonPartCols = pprCtx.getHasNonPartCols();
// Get the exprNodeDesc corresponding to the first start node;
- return (exprNodeDesc) outputMap.get(pred);
+ return (ExprNodeDesc) outputMap.get(pred);
}
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java Mon Jan 25 18:48:58 2010
@@ -28,7 +28,7 @@
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory;
-import org.apache.hadoop.hive.ql.plan.exprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
/**
* Operator factory for partition pruning processing of operator graph We find
@@ -81,12 +81,12 @@
}
// Otherwise this is not a sampling predicate and we need to
- exprNodeDesc predicate = fop.getConf().getPredicate();
+ ExprNodeDesc predicate = fop.getConf().getPredicate();
String alias = top.getConf().getAlias();
// Generate the partition pruning predicate
boolean hasNonPartCols = false;
- exprNodeDesc ppr_pred = ExprProcFactory.genPruner(alias, predicate,
+ ExprNodeDesc ppr_pred = ExprProcFactory.genPruner(alias, predicate,
hasNonPartCols);
owc.addHasNonPartCols(hasNonPartCols);
@@ -96,10 +96,10 @@
return null;
}
- private void addPruningPred(Map<TableScanOperator, exprNodeDesc> opToPPR,
- TableScanOperator top, exprNodeDesc new_ppr_pred) {
- exprNodeDesc old_ppr_pred = opToPPR.get(top);
- exprNodeDesc ppr_pred = null;
+ private void addPruningPred(Map<TableScanOperator, ExprNodeDesc> opToPPR,
+ TableScanOperator top, ExprNodeDesc new_ppr_pred) {
+ ExprNodeDesc old_ppr_pred = opToPPR.get(top);
+ ExprNodeDesc ppr_pred = null;
if (old_ppr_pred != null) {
// or the old_ppr_pred and the new_ppr_pred
ppr_pred = TypeCheckProcFactory.DefaultExprProcessor
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpWalkerCtx.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpWalkerCtx.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpWalkerCtx.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpWalkerCtx.java Mon Jan 25 18:48:58 2010
@@ -22,7 +22,7 @@
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.plan.exprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
/**
* Context class for operator tree walker for partition pruner.
@@ -35,17 +35,17 @@
* Map from tablescan operator to partition pruning predicate that is
* initialized from the ParseContext
*/
- private final HashMap<TableScanOperator, exprNodeDesc> opToPartPruner;
+ private final HashMap<TableScanOperator, ExprNodeDesc> opToPartPruner;
/**
* Constructor
*/
- public OpWalkerCtx(HashMap<TableScanOperator, exprNodeDesc> opToPartPruner) {
+ public OpWalkerCtx(HashMap<TableScanOperator, ExprNodeDesc> opToPartPruner) {
this.opToPartPruner = opToPartPruner;
hasNonPartCols = false;
}
- public HashMap<TableScanOperator, exprNodeDesc> getOpToPartPruner() {
+ public HashMap<TableScanOperator, ExprNodeDesc> getOpToPartPruner() {
return opToPartPruner;
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java Mon Jan 25 18:48:58 2010
@@ -48,9 +48,9 @@
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
@@ -109,25 +109,25 @@
* @param expr
* the pruner expression for the table
*/
- public static boolean onlyContainsPartnCols(Table tab, exprNodeDesc expr) {
+ public static boolean onlyContainsPartnCols(Table tab, ExprNodeDesc expr) {
if (!tab.isPartitioned() || (expr == null)) {
return true;
}
- if (expr instanceof exprNodeColumnDesc) {
- String colName = ((exprNodeColumnDesc) expr).getColumn();
+ if (expr instanceof ExprNodeColumnDesc) {
+ String colName = ((ExprNodeColumnDesc) expr).getColumn();
return tab.isPartitionKey(colName);
}
// It cannot contain a non-deterministic function
- if ((expr instanceof exprNodeGenericFuncDesc)
- && !FunctionRegistry.isDeterministic(((exprNodeGenericFuncDesc) expr)
+ if ((expr instanceof ExprNodeGenericFuncDesc)
+ && !FunctionRegistry.isDeterministic(((ExprNodeGenericFuncDesc) expr)
.getGenericUDF())) {
return false;
}
// All columns of the expression must be parttioned columns
- List<exprNodeDesc> children = expr.getChildren();
+ List<ExprNodeDesc> children = expr.getChildren();
if (children != null) {
for (int i = 0; i < children.size(); i++) {
if (!onlyContainsPartnCols(tab, children.get(i))) {
@@ -155,7 +155,7 @@
* pruner condition.
* @throws HiveException
*/
- public static PrunedPartitionList prune(Table tab, exprNodeDesc prunerExpr,
+ public static PrunedPartitionList prune(Table tab, ExprNodeDesc prunerExpr,
HiveConf conf, String alias,
Map<String, PrunedPartitionList> prunedPartitionsMap)
throws HiveException {
@@ -271,17 +271,17 @@
/**
* Whether the expression contains a column node or not.
*/
- public static boolean hasColumnExpr(exprNodeDesc desc) {
+ public static boolean hasColumnExpr(ExprNodeDesc desc) {
// Return false for null
if (desc == null) {
return false;
}
// Return true for exprNodeColumnDesc
- if (desc instanceof exprNodeColumnDesc) {
+ if (desc instanceof ExprNodeColumnDesc) {
return true;
}
// Return true in case one of the children is column expr.
- List<exprNodeDesc> children = desc.getChildren();
+ List<ExprNodeDesc> children = desc.getChildren();
if (children != null) {
for (int i = 0; i < children.size(); i++) {
if (hasColumnExpr(children.get(i))) {
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Mon Jan 25 18:48:58 2010
@@ -42,17 +42,17 @@
import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
import org.apache.hadoop.hive.ql.plan.DDLWork;
import org.apache.hadoop.hive.ql.plan.MsckDesc;
-import org.apache.hadoop.hive.ql.plan.alterTableDesc;
-import org.apache.hadoop.hive.ql.plan.descFunctionDesc;
-import org.apache.hadoop.hive.ql.plan.descTableDesc;
-import org.apache.hadoop.hive.ql.plan.dropTableDesc;
-import org.apache.hadoop.hive.ql.plan.fetchWork;
-import org.apache.hadoop.hive.ql.plan.showFunctionsDesc;
-import org.apache.hadoop.hive.ql.plan.showPartitionsDesc;
-import org.apache.hadoop.hive.ql.plan.showTableStatusDesc;
-import org.apache.hadoop.hive.ql.plan.showTablesDesc;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
-import org.apache.hadoop.hive.ql.plan.alterTableDesc.alterTableTypes;
+import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
+import org.apache.hadoop.hive.ql.plan.DescFunctionDesc;
+import org.apache.hadoop.hive.ql.plan.DescTableDesc;
+import org.apache.hadoop.hive.ql.plan.DropTableDesc;
+import org.apache.hadoop.hive.ql.plan.FetchWork;
+import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc;
+import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc;
+import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc;
+import org.apache.hadoop.hive.ql.plan.ShowTablesDesc;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
+import org.apache.hadoop.hive.ql.plan.AlterTableDesc.alterTableTypes;
import org.apache.hadoop.hive.serde.Constants;
import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
import org.apache.hadoop.mapred.TextInputFormat;
@@ -146,7 +146,7 @@
private void analyzeDropTable(ASTNode ast, boolean expectView)
throws SemanticException {
String tableName = unescapeIdentifier(ast.getChild(0).getText());
- dropTableDesc dropTblDesc = new dropTableDesc(tableName, expectView);
+ DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectView);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
dropTblDesc), conf));
}
@@ -155,7 +155,7 @@
String tableName = unescapeIdentifier(ast.getChild(0).getText());
HashMap<String, String> mapProp = getProps((ASTNode) (ast.getChild(1))
.getChild(0));
- alterTableDesc alterTblDesc = new alterTableDesc(alterTableTypes.ADDPROPS);
+ AlterTableDesc alterTblDesc = new AlterTableDesc(alterTableTypes.ADDPROPS);
alterTblDesc.setProps(mapProp);
alterTblDesc.setOldName(tableName);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
@@ -167,7 +167,7 @@
String tableName = unescapeIdentifier(ast.getChild(0).getText());
HashMap<String, String> mapProp = getProps((ASTNode) (ast.getChild(1))
.getChild(0));
- alterTableDesc alterTblDesc = new alterTableDesc(
+ AlterTableDesc alterTblDesc = new AlterTableDesc(
alterTableTypes.ADDSERDEPROPS);
alterTblDesc.setProps(mapProp);
alterTblDesc.setOldName(tableName);
@@ -178,7 +178,7 @@
private void analyzeAlterTableSerde(ASTNode ast) throws SemanticException {
String tableName = unescapeIdentifier(ast.getChild(0).getText());
String serdeName = unescapeSQLString(ast.getChild(1).getText());
- alterTableDesc alterTblDesc = new alterTableDesc(alterTableTypes.ADDSERDE);
+ AlterTableDesc alterTblDesc = new AlterTableDesc(alterTableTypes.ADDSERDE);
if (ast.getChildCount() > 2) {
HashMap<String, String> mapProp = getProps((ASTNode) (ast.getChild(2))
.getChild(0));
@@ -225,7 +225,7 @@
serde = COLUMNAR_SERDE;
break;
}
- alterTableDesc alterTblDesc = new alterTableDesc(tableName, inputFormat,
+ AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, inputFormat,
outputFormat, serde);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
alterTblDesc), conf));
@@ -247,7 +247,7 @@
if (numBuckets <= 0) {
throw new SemanticException(ErrorMsg.INVALID_BUCKET_NUMBER.getMsg());
}
- alterTableDesc alterTblDesc = new alterTableDesc(tableName, numBuckets,
+ AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, numBuckets,
bucketCols, sortCols);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
alterTblDesc), conf));
@@ -299,7 +299,7 @@
prop.setProperty("columns", colTypes[0]);
prop.setProperty("columns.types", colTypes[1]);
- fetchWork fetch = new fetchWork(ctx.getResFile().toString(), new tableDesc(
+ FetchWork fetch = new FetchWork(ctx.getResFile().toString(), new TableDesc(
LazySimpleSerDe.class, TextInputFormat.class,
IgnoreKeyTextOutputFormat.class, prop), -1);
fetch.setSerializationNullFormat(" ");
@@ -319,7 +319,7 @@
}
boolean isExt = ast.getChildCount() > 1;
- descTableDesc descTblDesc = new descTableDesc(ctx.getResFile(), tableName,
+ DescTableDesc descTblDesc = new DescTableDesc(ctx.getResFile(), tableName,
partSpec, isExt);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
descTblDesc), conf));
@@ -339,21 +339,21 @@
}
private void analyzeShowPartitions(ASTNode ast) throws SemanticException {
- showPartitionsDesc showPartsDesc;
+ ShowPartitionsDesc showPartsDesc;
String tableName = unescapeIdentifier(ast.getChild(0).getText());
- showPartsDesc = new showPartitionsDesc(tableName, ctx.getResFile());
+ showPartsDesc = new ShowPartitionsDesc(tableName, ctx.getResFile());
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
showPartsDesc), conf));
setFetchTask(createFetchTask(showPartsDesc.getSchema()));
}
private void analyzeShowTables(ASTNode ast) throws SemanticException {
- showTablesDesc showTblsDesc;
+ ShowTablesDesc showTblsDesc;
if (ast.getChildCount() == 1) {
String tableNames = unescapeSQLString(ast.getChild(0).getText());
- showTblsDesc = new showTablesDesc(ctx.getResFile(), tableNames);
+ showTblsDesc = new ShowTablesDesc(ctx.getResFile(), tableNames);
} else {
- showTblsDesc = new showTablesDesc(ctx.getResFile());
+ showTblsDesc = new ShowTablesDesc(ctx.getResFile());
}
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
showTblsDesc), conf));
@@ -361,7 +361,7 @@
}
private void analyzeShowTableStatus(ASTNode ast) throws SemanticException {
- showTableStatusDesc showTblStatusDesc;
+ ShowTableStatusDesc showTblStatusDesc;
String tableNames = unescapeIdentifier(ast.getChild(0).getText());
String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME;
int children = ast.getChildCount();
@@ -381,7 +381,7 @@
}
}
}
- showTblStatusDesc = new showTableStatusDesc(ctx.getResFile(), dbName,
+ showTblStatusDesc = new ShowTableStatusDesc(ctx.getResFile(), dbName,
tableNames, partSpec);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
showTblStatusDesc), conf));
@@ -398,12 +398,12 @@
* Parsin failed
*/
private void analyzeShowFunctions(ASTNode ast) throws SemanticException {
- showFunctionsDesc showFuncsDesc;
+ ShowFunctionsDesc showFuncsDesc;
if (ast.getChildCount() == 1) {
String funcNames = stripQuotes(ast.getChild(0).getText());
- showFuncsDesc = new showFunctionsDesc(ctx.getResFile(), funcNames);
+ showFuncsDesc = new ShowFunctionsDesc(ctx.getResFile(), funcNames);
} else {
- showFuncsDesc = new showFunctionsDesc(ctx.getResFile());
+ showFuncsDesc = new ShowFunctionsDesc(ctx.getResFile());
}
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
showFuncsDesc), conf));
@@ -433,7 +433,7 @@
throw new SemanticException("Unexpected Tokens at DESCRIBE FUNCTION");
}
- descFunctionDesc descFuncDesc = new descFunctionDesc(ctx.getResFile(),
+ DescFunctionDesc descFuncDesc = new DescFunctionDesc(ctx.getResFile(),
funcName, isExtended);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
descFuncDesc), conf));
@@ -441,7 +441,7 @@
}
private void analyzeAlterTableRename(ASTNode ast) throws SemanticException {
- alterTableDesc alterTblDesc = new alterTableDesc(unescapeIdentifier(ast
+ AlterTableDesc alterTblDesc = new AlterTableDesc(unescapeIdentifier(ast
.getChild(0).getText()), unescapeIdentifier(ast.getChild(1).getText()));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
alterTblDesc), conf));
@@ -474,7 +474,7 @@
}
}
- alterTableDesc alterTblDesc = new alterTableDesc(tblName,
+ AlterTableDesc alterTblDesc = new AlterTableDesc(tblName,
unescapeIdentifier(ast.getChild(1).getText()), unescapeIdentifier(ast
.getChild(2).getText()), newType, newComment, first, flagCol);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
@@ -485,7 +485,7 @@
alterTableTypes alterType) throws SemanticException {
String tblName = unescapeIdentifier(ast.getChild(0).getText());
List<FieldSchema> newCols = getColumns((ASTNode) ast.getChild(1));
- alterTableDesc alterTblDesc = new alterTableDesc(tblName, newCols,
+ AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, newCols,
alterType);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
alterTblDesc), conf));
@@ -495,7 +495,7 @@
String tblName = unescapeIdentifier(ast.getChild(0).getText());
// get table metadata
List<Map<String, String>> partSpecs = getPartitionSpecs(ast);
- dropTableDesc dropTblDesc = new dropTableDesc(tblName, partSpecs);
+ DropTableDesc dropTblDesc = new DropTableDesc(tblName, partSpecs);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
dropTblDesc), conf));
}