You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by zs...@apache.org on 2010/01/25 19:49:05 UTC
svn commit: r902921 [2/26] - in /hadoop/hive/trunk: ./
contrib/src/java/org/apache/hadoop/hive/contrib/genericudf/example/
contrib/src/java/org/apache/hadoop/hive/contrib/udtf/example/
ql/src/java/org/apache/hadoop/hive/ql/ ql/src/java/org/apache/hadoo...
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ExtractOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ExtractOperator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ExtractOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ExtractOperator.java Mon Jan 25 18:48:58 2010
@@ -22,13 +22,13 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.extractDesc;
+import org.apache.hadoop.hive.ql.plan.ExtractDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
/**
* Extract operator implementation Extracts a subobject and passes that on.
**/
-public class ExtractOperator extends Operator<extractDesc> implements
+public class ExtractOperator extends Operator<ExtractDesc> implements
Serializable {
private static final long serialVersionUID = 1L;
transient protected ExprNodeEvaluator eval;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java Mon Jan 25 18:48:58 2010
@@ -34,9 +34,9 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.fetchWork;
-import org.apache.hadoop.hive.ql.plan.partitionDesc;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
+import org.apache.hadoop.hive.ql.plan.FetchWork;
+import org.apache.hadoop.hive.ql.plan.PartitionDesc;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
import org.apache.hadoop.hive.serde2.Deserializer;
import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject;
@@ -61,7 +61,7 @@
transient protected Log LOG;
transient protected LogHelper console;
- public FetchOperator(fetchWork work, JobConf job) {
+ public FetchOperator(FetchWork work, JobConf job) {
LOG = LogFactory.getLog(this.getClass().getName());
console = new LogHelper(LOG);
@@ -78,7 +78,7 @@
rowWithPart = new Object[2];
}
- private final fetchWork work;
+ private final FetchWork work;
private int splitNum;
private RecordReader<WritableComparable, Writable> currRecReader;
private InputSplit[] inputSplits;
@@ -88,10 +88,10 @@
private Writable value;
private Deserializer serde;
private Iterator<Path> iterPath;
- private Iterator<partitionDesc> iterPartDesc;
+ private Iterator<PartitionDesc> iterPartDesc;
private Path currPath;
- private partitionDesc currPart;
- private tableDesc currTbl;
+ private PartitionDesc currPart;
+ private TableDesc currTbl;
private boolean tblDataDone;
private StructObjectInspector rowObjectInspector;
private final Object[] rowWithPart;
@@ -174,7 +174,7 @@
}
return;
} else {
- iterPath = fetchWork.convertStringToPathArray(work.getPartDir())
+ iterPath = FetchWork.convertStringToPathArray(work.getPartDir())
.iterator();
iterPartDesc = work.getPartDesc().iterator();
}
@@ -182,7 +182,7 @@
while (iterPath.hasNext()) {
Path nxt = iterPath.next();
- partitionDesc prt = iterPartDesc.next();
+ PartitionDesc prt = iterPartDesc.next();
FileSystem fs = nxt.getFileSystem(job);
if (fs.exists(nxt)) {
FileStatus[] fStats = fs.listStatus(nxt);
@@ -213,7 +213,7 @@
job.set("mapred.input.dir", org.apache.hadoop.util.StringUtils
.escapeString(currPath.toString()));
- tableDesc tmp = currTbl;
+ TableDesc tmp = currTbl;
if (tmp == null) {
tmp = currPart.getTableDesc();
}
@@ -302,12 +302,12 @@
public ObjectInspector getOutputObjectInspector() throws HiveException {
try {
if (work.getTblDir() != null) {
- tableDesc tbl = work.getTblDesc();
+ TableDesc tbl = work.getTblDesc();
Deserializer serde = tbl.getDeserializerClass().newInstance();
serde.initialize(job, tbl.getProperties());
return serde.getObjectInspector();
} else {
- List<partitionDesc> listParts = work.getPartDesc();
+ List<PartitionDesc> listParts = work.getPartDesc();
currPart = listParts.get(0);
serde = currPart.getTableDesc().getDeserializerClass().newInstance();
serde.initialize(job, currPart.getTableDesc().getProperties());
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java Mon Jan 25 18:48:58 2010
@@ -26,8 +26,8 @@
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.DriverContext;
import org.apache.hadoop.hive.ql.QueryPlan;
-import org.apache.hadoop.hive.ql.plan.fetchWork;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
+import org.apache.hadoop.hive.ql.plan.FetchWork;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.ql.plan.api.StageType;
import org.apache.hadoop.hive.serde.Constants;
import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
@@ -39,7 +39,7 @@
/**
* FetchTask implementation
**/
-public class FetchTask extends Task<fetchWork> implements Serializable {
+public class FetchTask extends Task<FetchWork> implements Serializable {
private static final long serialVersionUID = 1L;
private int maxRows = 100;
@@ -85,7 +85,7 @@
/**
* Return the tableDesc of the fetchWork
*/
- public tableDesc getTblDesc() {
+ public TableDesc getTblDesc() {
return work.getTblDesc();
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java Mon Jan 25 18:48:58 2010
@@ -28,7 +28,7 @@
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.fileSinkDesc;
+import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.Serializer;
@@ -41,7 +41,7 @@
/**
* File Sink operator implementation
**/
-public class FileSinkOperator extends TerminalOperator<fileSinkDesc> implements
+public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
Serializable {
public static interface RecordWriter {
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java Mon Jan 25 18:48:58 2010
@@ -23,7 +23,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.filterDesc;
+import org.apache.hadoop.hive.ql.plan.FilterDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
@@ -32,7 +32,7 @@
/**
* Filter operator implementation
**/
-public class FilterOperator extends Operator<filterDesc> implements
+public class FilterOperator extends Operator<FilterDesc> implements
Serializable {
private static final long serialVersionUID = 1L;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ForwardOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ForwardOperator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ForwardOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ForwardOperator.java Mon Jan 25 18:48:58 2010
@@ -21,13 +21,13 @@
import java.io.Serializable;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.forwardDesc;
+import org.apache.hadoop.hive.ql.plan.ForwardDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
/**
* Forward Operator Just forwards. Doesn't do anything itself.
**/
-public class ForwardOperator extends Operator<forwardDesc> implements
+public class ForwardOperator extends Operator<ForwardDesc> implements
Serializable {
private static final long serialVersionUID = 1L;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java Mon Jan 25 18:48:58 2010
@@ -35,8 +35,8 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.exprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
import org.apache.hadoop.hive.ql.udf.UDAFMax;
import org.apache.hadoop.hive.ql.udf.UDAFMin;
import org.apache.hadoop.hive.ql.udf.UDFAbs;
@@ -869,11 +869,11 @@
* Get the UDF class from an exprNodeDesc. Returns null if the exprNodeDesc
* does not contain a UDF class.
*/
- private static Class<? extends UDF> getUDFClassFromExprDesc(exprNodeDesc desc) {
- if (!(desc instanceof exprNodeGenericFuncDesc)) {
+ private static Class<? extends UDF> getUDFClassFromExprDesc(ExprNodeDesc desc) {
+ if (!(desc instanceof ExprNodeGenericFuncDesc)) {
return null;
}
- exprNodeGenericFuncDesc genericFuncDesc = (exprNodeGenericFuncDesc) desc;
+ ExprNodeGenericFuncDesc genericFuncDesc = (ExprNodeGenericFuncDesc) desc;
if (!(genericFuncDesc.getGenericUDF() instanceof GenericUDFBridge)) {
return null;
}
@@ -905,7 +905,7 @@
/**
* Returns whether the exprNodeDesc is a node of "and", "or", "not".
*/
- public static boolean isOpAndOrNot(exprNodeDesc desc) {
+ public static boolean isOpAndOrNot(ExprNodeDesc desc) {
Class<? extends UDF> udfClass = getUDFClassFromExprDesc(desc);
return UDFOPAnd.class == udfClass || UDFOPOr.class == udfClass
|| UDFOPNot.class == udfClass;
@@ -914,7 +914,7 @@
/**
* Returns whether the exprNodeDesc is a node of "and".
*/
- public static boolean isOpAnd(exprNodeDesc desc) {
+ public static boolean isOpAnd(ExprNodeDesc desc) {
Class<? extends UDF> udfClass = getUDFClassFromExprDesc(desc);
return UDFOPAnd.class == udfClass;
}
@@ -922,7 +922,7 @@
/**
* Returns whether the exprNodeDesc is a node of "positive".
*/
- public static boolean isOpPositive(exprNodeDesc desc) {
+ public static boolean isOpPositive(ExprNodeDesc desc) {
Class<? extends UDF> udfClass = getUDFClassFromExprDesc(desc);
return UDFOPPositive.class == udfClass;
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java Mon Jan 25 18:48:58 2010
@@ -26,8 +26,8 @@
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.FunctionWork;
-import org.apache.hadoop.hive.ql.plan.createFunctionDesc;
-import org.apache.hadoop.hive.ql.plan.dropFunctionDesc;
+import org.apache.hadoop.hive.ql.plan.CreateFunctionDesc;
+import org.apache.hadoop.hive.ql.plan.DropFunctionDesc;
import org.apache.hadoop.hive.ql.plan.api.StageType;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFResolver;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
@@ -53,19 +53,19 @@
@Override
public int execute() {
- createFunctionDesc createFunctionDesc = work.getCreateFunctionDesc();
+ CreateFunctionDesc createFunctionDesc = work.getCreateFunctionDesc();
if (createFunctionDesc != null) {
return createFunction(createFunctionDesc);
}
- dropFunctionDesc dropFunctionDesc = work.getDropFunctionDesc();
+ DropFunctionDesc dropFunctionDesc = work.getDropFunctionDesc();
if (dropFunctionDesc != null) {
return dropFunction(dropFunctionDesc);
}
return 0;
}
- private int createFunction(createFunctionDesc createFunctionDesc) {
+ private int createFunction(CreateFunctionDesc createFunctionDesc) {
try {
Class<?> udfClass = getUdfClass(createFunctionDesc);
if (UDF.class.isAssignableFrom(udfClass)) {
@@ -98,7 +98,7 @@
}
}
- private int dropFunction(dropFunctionDesc dropFunctionDesc) {
+ private int dropFunction(DropFunctionDesc dropFunctionDesc) {
try {
FunctionRegistry.unregisterTemporaryUDF(dropFunctionDesc
.getFunctionName());
@@ -110,7 +110,7 @@
}
@SuppressWarnings("unchecked")
- private Class<?> getUdfClass(createFunctionDesc desc)
+ private Class<?> getUdfClass(CreateFunctionDesc desc)
throws ClassNotFoundException {
return Class.forName(desc.getClassName(), true, JavaUtils.getClassLoader());
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java Mon Jan 25 18:48:58 2010
@@ -34,9 +34,9 @@
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.parse.OpParseContext;
-import org.apache.hadoop.hive.ql.plan.aggregationDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.groupByDesc;
+import org.apache.hadoop.hive.ql.plan.AggregationDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.GroupByDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.AggregationBuffer;
@@ -53,7 +53,7 @@
/**
* GroupBy operator implementation.
*/
-public class GroupByOperator extends Operator<groupByDesc> implements
+public class GroupByOperator extends Operator<GroupByDesc> implements
Serializable {
static final private Log LOG = LogFactory.getLog(GroupByOperator.class
@@ -185,7 +185,7 @@
.getAggregators().size()][];
aggregationParameterObjects = new Object[conf.getAggregators().size()][];
for (int i = 0; i < aggregationParameterFields.length; i++) {
- ArrayList<exprNodeDesc> parameters = conf.getAggregators().get(i)
+ ArrayList<ExprNodeDesc> parameters = conf.getAggregators().get(i)
.getParameters();
aggregationParameterFields[i] = new ExprNodeEvaluator[parameters.size()];
aggregationParameterObjectInspectors[i] = new ObjectInspector[parameters
@@ -215,7 +215,7 @@
aggregationEvaluators = new GenericUDAFEvaluator[conf.getAggregators()
.size()];
for (int i = 0; i < aggregationEvaluators.length; i++) {
- aggregationDesc agg = conf.getAggregators().get(i);
+ AggregationDesc agg = conf.getAggregators().get(i);
aggregationEvaluators[i] = agg.getGenericUDAFEvaluator();
}
@@ -233,7 +233,7 @@
bucketGroup = conf.getBucketGroup();
aggregationsParametersLastInvoke = new Object[conf.getAggregators().size()][];
- if (conf.getMode() != groupByDesc.Mode.HASH || bucketGroup) {
+ if (conf.getMode() != GroupByDesc.Mode.HASH || bucketGroup) {
aggregations = newAggregations();
hashAggr = false;
} else {
@@ -411,7 +411,7 @@
// 64 bytes is the overhead for a reference
fixedRowSize = javaHashEntryOverHead;
- ArrayList<exprNodeDesc> keys = conf.getKeys();
+ ArrayList<ExprNodeDesc> keys = conf.getKeys();
// Go over all the keys and get the size of the fields of fixed length. Keep
// track of the variable length keys
@@ -905,15 +905,15 @@
public List<String> genColLists(
HashMap<Operator<? extends Serializable>, OpParseContext> opParseCtx) {
List<String> colLists = new ArrayList<String>();
- ArrayList<exprNodeDesc> keys = conf.getKeys();
- for (exprNodeDesc key : keys) {
+ ArrayList<ExprNodeDesc> keys = conf.getKeys();
+ for (ExprNodeDesc key : keys) {
colLists = Utilities.mergeUniqElems(colLists, key.getCols());
}
- ArrayList<aggregationDesc> aggrs = conf.getAggregators();
- for (aggregationDesc aggr : aggrs) {
- ArrayList<exprNodeDesc> params = aggr.getParameters();
- for (exprNodeDesc param : params) {
+ ArrayList<AggregationDesc> aggrs = conf.getAggregators();
+ for (AggregationDesc aggr : aggrs) {
+ ArrayList<ExprNodeDesc> params = aggr.getParameters();
+ for (ExprNodeDesc param : params) {
colLists = Utilities.mergeUniqElems(colLists, param.getCols());
}
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java Mon Jan 25 18:48:58 2010
@@ -28,7 +28,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.joinDesc;
+import org.apache.hadoop.hive.ql.plan.JoinDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
@@ -36,7 +36,7 @@
/**
* Join operator implementation.
*/
-public class JoinOperator extends CommonJoinOperator<joinDesc> implements
+public class JoinOperator extends CommonJoinOperator<JoinDesc> implements
Serializable {
private static final long serialVersionUID = 1L;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java Mon Jan 25 18:48:58 2010
@@ -23,7 +23,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.lateralViewJoinDesc;
+import org.apache.hadoop.hive.ql.plan.LateralViewJoinDesc;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
@@ -57,7 +57,7 @@
* previous LVJ operator.
*/
-public class LateralViewJoinOperator extends Operator<lateralViewJoinDesc> {
+public class LateralViewJoinOperator extends Operator<LateralViewJoinDesc> {
private static final long serialVersionUID = 1L;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java Mon Jan 25 18:48:58 2010
@@ -22,13 +22,13 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.limitDesc;
+import org.apache.hadoop.hive.ql.plan.LimitDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
/**
* Limit operator implementation Limits the number of rows to be passed on.
**/
-public class LimitOperator extends Operator<limitDesc> implements Serializable {
+public class LimitOperator extends Operator<LimitDesc> implements Serializable {
private static final long serialVersionUID = 1L;
transient protected int limit;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java Mon Jan 25 18:48:58 2010
@@ -34,8 +34,8 @@
import org.apache.hadoop.hive.ql.exec.persistence.MapJoinObjectValue;
import org.apache.hadoop.hive.ql.exec.persistence.RowContainer;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.mapJoinDesc;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
+import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
import org.apache.hadoop.hive.serde2.SerDe;
import org.apache.hadoop.hive.serde2.SerDeException;
@@ -50,7 +50,7 @@
/**
* Map side Join operator implementation.
*/
-public class MapJoinOperator extends CommonJoinOperator<mapJoinDesc> implements
+public class MapJoinOperator extends CommonJoinOperator<MapJoinDesc> implements
Serializable {
private static final long serialVersionUID = 1L;
static final private Log LOG = LogFactory.getLog(MapJoinOperator.class
@@ -86,7 +86,7 @@
public static class MapJoinObjectCtx {
ObjectInspector standardOI;
SerDe serde;
- tableDesc tblDesc;
+ TableDesc tblDesc;
Configuration conf;
/**
@@ -94,7 +94,7 @@
* @param serde
*/
public MapJoinObjectCtx(ObjectInspector standardOI, SerDe serde,
- tableDesc tblDesc, Configuration conf) {
+ TableDesc tblDesc, Configuration conf) {
this.standardOI = standardOI;
this.serde = serde;
this.tblDesc = tblDesc;
@@ -115,7 +115,7 @@
return serde;
}
- public tableDesc getTblDesc() {
+ public TableDesc getTblDesc() {
return tblDesc;
}
@@ -239,7 +239,7 @@
if (firstRow) {
metadataKeyTag = nextVal++;
- tableDesc keyTableDesc = conf.getKeyTblDesc();
+ TableDesc keyTableDesc = conf.getKeyTblDesc();
SerDe keySerializer = (SerDe) ReflectionUtils.newInstance(
keyTableDesc.getDeserializerClass(), null);
keySerializer.initialize(null, keyTableDesc.getProperties());
@@ -299,7 +299,7 @@
if (metadataValueTag[tag] == -1) {
metadataValueTag[tag] = nextVal++;
- tableDesc valueTableDesc = conf.getValueTblDescs().get(tag);
+ TableDesc valueTableDesc = conf.getValueTblDescs().get(tag);
SerDe valueSerDe = (SerDe) ReflectionUtils.newInstance(valueTableDesc
.getDeserializerClass(), null);
valueSerDe.initialize(null, valueTableDesc.getProperties());
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java Mon Jan 25 18:48:58 2010
@@ -32,8 +32,8 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.mapredWork;
-import org.apache.hadoop.hive.ql.plan.partitionDesc;
+import org.apache.hadoop.hive.ql.plan.MapredWork;
+import org.apache.hadoop.hive.ql.plan.PartitionDesc;
import org.apache.hadoop.hive.serde2.Deserializer;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -49,7 +49,7 @@
* different from regular operators in that it starts off by processing a
* Writable data structure from a Table (instead of a Hive Object).
**/
-public class MapOperator extends Operator<mapredWork> implements Serializable {
+public class MapOperator extends Operator<MapredWork> implements Serializable {
private static final long serialVersionUID = 1L;
@@ -168,18 +168,18 @@
* @param mrwork
* @throws HiveException
*/
- public void initializeAsRoot(Configuration hconf, mapredWork mrwork)
+ public void initializeAsRoot(Configuration hconf, MapredWork mrwork)
throws HiveException {
setConf(mrwork);
setChildren(hconf);
initialize(hconf, null);
}
- private static MapOpCtx initObjectInspector(mapredWork conf,
+ private static MapOpCtx initObjectInspector(MapredWork conf,
Configuration hconf, String onefile) throws HiveException,
ClassNotFoundException, InstantiationException, IllegalAccessException,
SerDeException {
- partitionDesc td = conf.getPathToPartitionInfo().get(onefile);
+ PartitionDesc td = conf.getPathToPartitionInfo().get(onefile);
LinkedHashMap<String, String> partSpec = td.getPartSpec();
Properties tblProps = td.getProperties();
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java Mon Jan 25 18:48:58 2010
@@ -29,7 +29,7 @@
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.ql.exec.Utilities.StreamPrinter;
-import org.apache.hadoop.hive.ql.plan.mapredWork;
+import org.apache.hadoop.hive.ql.plan.MapredWork;
import org.apache.hadoop.hive.ql.plan.api.StageType;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.shims.ShimLoader;
@@ -39,7 +39,7 @@
* runs it from a separate jvm. The primary issue with this is the inability to
* control logging from a separate jvm in a consistent manner
**/
-public class MapRedTask extends Task<mapredWork> implements Serializable {
+public class MapRedTask extends Task<MapredWork> implements Serializable {
private static final long serialVersionUID = 1L;
@@ -86,7 +86,7 @@
String hiveConfArgs = ExecDriver.generateCmdLine(conf);
File scratchDir = new File(conf.getVar(HiveConf.ConfVars.SCRATCHDIR));
- mapredWork plan = getWork();
+ MapredWork plan = getWork();
File planFile = File.createTempFile("plan", ".xml", scratchDir);
LOG.info("Generating plan file " + planFile.toString());
@@ -192,7 +192,7 @@
@Override
public boolean hasReduce() {
- mapredWork w = getWork();
+ MapredWork w = getWork();
return w.getReducer() != null;
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java Mon Jan 25 18:48:58 2010
@@ -34,16 +34,16 @@
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.plan.loadFileDesc;
-import org.apache.hadoop.hive.ql.plan.loadTableDesc;
-import org.apache.hadoop.hive.ql.plan.moveWork;
+import org.apache.hadoop.hive.ql.plan.LoadFileDesc;
+import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
+import org.apache.hadoop.hive.ql.plan.MoveWork;
import org.apache.hadoop.hive.ql.plan.api.StageType;
import org.apache.hadoop.util.StringUtils;
/**
* MoveTask implementation
**/
-public class MoveTask extends Task<moveWork> implements Serializable {
+public class MoveTask extends Task<MoveWork> implements Serializable {
private static final long serialVersionUID = 1L;
@@ -57,7 +57,7 @@
try {
// Do any hive related operations like moving tables and files
// to appropriate locations
- loadFileDesc lfd = work.getLoadFileWork();
+ LoadFileDesc lfd = work.getLoadFileWork();
if (lfd != null) {
Path targetPath = new Path(lfd.getTargetDir());
Path sourcePath = new Path(lfd.getSourceDir());
@@ -108,7 +108,7 @@
}
// Next we do this for tables and partitions
- loadTableDesc tbd = work.getLoadTableWork();
+ LoadTableDesc tbd = work.getLoadTableWork();
if (tbd != null) {
String mesg = "Loading data to table "
+ tbd.getTable().getTableName()
@@ -181,12 +181,12 @@
* Does the move task involve moving to a local file system
*/
public boolean isLocal() {
- loadTableDesc tbd = work.getLoadTableWork();
+ LoadTableDesc tbd = work.getLoadTableWork();
if (tbd != null) {
return false;
}
- loadFileDesc lfd = work.getLoadFileWork();
+ LoadFileDesc lfd = work.getLoadFileWork();
if (lfd != null) {
if (lfd.getIsDfsDir()) {
return false;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java Mon Jan 25 18:48:58 2010
@@ -32,8 +32,8 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.lib.Node;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.explain;
-import org.apache.hadoop.hive.ql.plan.exprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.Explain;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
@@ -159,7 +159,7 @@
this.conf = conf;
}
- @explain
+ @Explain
public T getConf() {
return conf;
}
@@ -201,7 +201,7 @@
* optimizer and built during semantic analysis contains only key elements for
* reduce sink and group by op
*/
- protected transient Map<String, exprNodeDesc> colExprMap;
+ protected transient Map<String, ExprNodeDesc> colExprMap;
public void setId(String id) {
this.id = id;
@@ -707,11 +707,11 @@
*
* @return null if the operator doesn't change columns
*/
- public Map<String, exprNodeDesc> getColumnExprMap() {
+ public Map<String, ExprNodeDesc> getColumnExprMap() {
return colExprMap;
}
- public void setColumnExprMap(Map<String, exprNodeDesc> colExprMap) {
+ public void setColumnExprMap(Map<String, ExprNodeDesc> colExprMap) {
this.colExprMap = colExprMap;
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java Mon Jan 25 18:48:58 2010
@@ -22,22 +22,22 @@
import java.util.ArrayList;
import java.util.List;
-import org.apache.hadoop.hive.ql.plan.collectDesc;
-import org.apache.hadoop.hive.ql.plan.extractDesc;
-import org.apache.hadoop.hive.ql.plan.fileSinkDesc;
-import org.apache.hadoop.hive.ql.plan.filterDesc;
-import org.apache.hadoop.hive.ql.plan.forwardDesc;
-import org.apache.hadoop.hive.ql.plan.groupByDesc;
-import org.apache.hadoop.hive.ql.plan.joinDesc;
-import org.apache.hadoop.hive.ql.plan.lateralViewJoinDesc;
-import org.apache.hadoop.hive.ql.plan.limitDesc;
-import org.apache.hadoop.hive.ql.plan.mapJoinDesc;
-import org.apache.hadoop.hive.ql.plan.reduceSinkDesc;
-import org.apache.hadoop.hive.ql.plan.scriptDesc;
-import org.apache.hadoop.hive.ql.plan.selectDesc;
-import org.apache.hadoop.hive.ql.plan.tableScanDesc;
-import org.apache.hadoop.hive.ql.plan.udtfDesc;
-import org.apache.hadoop.hive.ql.plan.unionDesc;
+import org.apache.hadoop.hive.ql.plan.CollectDesc;
+import org.apache.hadoop.hive.ql.plan.ExtractDesc;
+import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
+import org.apache.hadoop.hive.ql.plan.FilterDesc;
+import org.apache.hadoop.hive.ql.plan.ForwardDesc;
+import org.apache.hadoop.hive.ql.plan.GroupByDesc;
+import org.apache.hadoop.hive.ql.plan.JoinDesc;
+import org.apache.hadoop.hive.ql.plan.LateralViewJoinDesc;
+import org.apache.hadoop.hive.ql.plan.LimitDesc;
+import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
+import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
+import org.apache.hadoop.hive.ql.plan.ScriptDesc;
+import org.apache.hadoop.hive.ql.plan.SelectDesc;
+import org.apache.hadoop.hive.ql.plan.TableScanDesc;
+import org.apache.hadoop.hive.ql.plan.UDTFDesc;
+import org.apache.hadoop.hive.ql.plan.UnionDesc;
public class OperatorFactory {
@@ -54,30 +54,30 @@
public static ArrayList<opTuple> opvec;
static {
opvec = new ArrayList<opTuple>();
- opvec.add(new opTuple<filterDesc>(filterDesc.class, FilterOperator.class));
- opvec.add(new opTuple<selectDesc>(selectDesc.class, SelectOperator.class));
+ opvec.add(new opTuple<FilterDesc>(FilterDesc.class, FilterOperator.class));
+ opvec.add(new opTuple<SelectDesc>(SelectDesc.class, SelectOperator.class));
opvec
- .add(new opTuple<forwardDesc>(forwardDesc.class, ForwardOperator.class));
- opvec.add(new opTuple<fileSinkDesc>(fileSinkDesc.class,
+ .add(new opTuple<ForwardDesc>(ForwardDesc.class, ForwardOperator.class));
+ opvec.add(new opTuple<FileSinkDesc>(FileSinkDesc.class,
FileSinkOperator.class));
opvec
- .add(new opTuple<collectDesc>(collectDesc.class, CollectOperator.class));
- opvec.add(new opTuple<scriptDesc>(scriptDesc.class, ScriptOperator.class));
- opvec.add(new opTuple<reduceSinkDesc>(reduceSinkDesc.class,
+ .add(new opTuple<CollectDesc>(CollectDesc.class, CollectOperator.class));
+ opvec.add(new opTuple<ScriptDesc>(ScriptDesc.class, ScriptOperator.class));
+ opvec.add(new opTuple<ReduceSinkDesc>(ReduceSinkDesc.class,
ReduceSinkOperator.class));
opvec
- .add(new opTuple<extractDesc>(extractDesc.class, ExtractOperator.class));
+ .add(new opTuple<ExtractDesc>(ExtractDesc.class, ExtractOperator.class));
opvec
- .add(new opTuple<groupByDesc>(groupByDesc.class, GroupByOperator.class));
- opvec.add(new opTuple<joinDesc>(joinDesc.class, JoinOperator.class));
+ .add(new opTuple<GroupByDesc>(GroupByDesc.class, GroupByOperator.class));
+ opvec.add(new opTuple<JoinDesc>(JoinDesc.class, JoinOperator.class));
opvec
- .add(new opTuple<mapJoinDesc>(mapJoinDesc.class, MapJoinOperator.class));
- opvec.add(new opTuple<limitDesc>(limitDesc.class, LimitOperator.class));
- opvec.add(new opTuple<tableScanDesc>(tableScanDesc.class,
+ .add(new opTuple<MapJoinDesc>(MapJoinDesc.class, MapJoinOperator.class));
+ opvec.add(new opTuple<LimitDesc>(LimitDesc.class, LimitOperator.class));
+ opvec.add(new opTuple<TableScanDesc>(TableScanDesc.class,
TableScanOperator.class));
- opvec.add(new opTuple<unionDesc>(unionDesc.class, UnionOperator.class));
- opvec.add(new opTuple<udtfDesc>(udtfDesc.class, UDTFOperator.class));
- opvec.add(new opTuple<lateralViewJoinDesc>(lateralViewJoinDesc.class,
+ opvec.add(new opTuple<UnionDesc>(UnionDesc.class, UnionOperator.class));
+ opvec.add(new opTuple<UDTFDesc>(UDTFDesc.class, UDTFOperator.class));
+ opvec.add(new opTuple<LateralViewJoinDesc>(LateralViewJoinDesc.class,
LateralViewJoinOperator.class));
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java Mon Jan 25 18:48:58 2010
@@ -25,9 +25,9 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.io.HiveKey;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.exprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.reduceSinkDesc;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.Serializer;
@@ -42,7 +42,7 @@
/**
* Reduce Sink Operator sends output to the reduce stage
**/
-public class ReduceSinkOperator extends TerminalOperator<reduceSinkDesc>
+public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
implements Serializable {
private static final long serialVersionUID = 1L;
@@ -78,19 +78,19 @@
try {
keyEval = new ExprNodeEvaluator[conf.getKeyCols().size()];
int i = 0;
- for (exprNodeDesc e : conf.getKeyCols()) {
+ for (ExprNodeDesc e : conf.getKeyCols()) {
keyEval[i++] = ExprNodeEvaluatorFactory.get(e);
}
valueEval = new ExprNodeEvaluator[conf.getValueCols().size()];
i = 0;
- for (exprNodeDesc e : conf.getValueCols()) {
+ for (ExprNodeDesc e : conf.getValueCols()) {
valueEval[i++] = ExprNodeEvaluatorFactory.get(e);
}
partitionEval = new ExprNodeEvaluator[conf.getPartitionCols().size()];
i = 0;
- for (exprNodeDesc e : conf.getPartitionCols()) {
+ for (ExprNodeDesc e : conf.getPartitionCols()) {
partitionEval[i++] = ExprNodeEvaluatorFactory.get(e);
}
@@ -98,13 +98,13 @@
tagByte[0] = (byte) tag;
LOG.info("Using tag = " + tag);
- tableDesc keyTableDesc = conf.getKeySerializeInfo();
+ TableDesc keyTableDesc = conf.getKeySerializeInfo();
keySerializer = (Serializer) keyTableDesc.getDeserializerClass()
.newInstance();
keySerializer.initialize(null, keyTableDesc.getProperties());
keyIsText = keySerializer.getSerializedClass().equals(Text.class);
- tableDesc valueTableDesc = conf.getValueSerializeInfo();
+ TableDesc valueTableDesc = conf.getValueSerializeInfo();
valueSerializer = (Serializer) valueTableDesc.getDeserializerClass()
.newInstance();
valueSerializer.initialize(null, valueTableDesc.getProperties());
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java Mon Jan 25 18:48:58 2010
@@ -35,7 +35,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.scriptDesc;
+import org.apache.hadoop.hive.ql.plan.ScriptDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
import org.apache.hadoop.hive.serde2.Deserializer;
import org.apache.hadoop.hive.serde2.SerDeException;
@@ -47,7 +47,7 @@
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.StringUtils;
-public class ScriptOperator extends Operator<scriptDesc> implements
+public class ScriptOperator extends Operator<ScriptDesc> implements
Serializable {
private static final long serialVersionUID = 1L;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java Mon Jan 25 18:48:58 2010
@@ -23,15 +23,15 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.exprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.selectDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.SelectDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
/**
* Select operator implementation
**/
-public class SelectOperator extends Operator<selectDesc> implements
+public class SelectOperator extends Operator<SelectDesc> implements
Serializable {
private static final long serialVersionUID = 1L;
@@ -47,7 +47,7 @@
return;
}
- ArrayList<exprNodeDesc> colList = conf.getColList();
+ ArrayList<ExprNodeDesc> colList = conf.getColList();
eval = new ExprNodeEvaluator[colList.size()];
for (int i = 0; i < colList.size(); i++) {
assert (colList.get(i) != null);
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java Mon Jan 25 18:48:58 2010
@@ -34,8 +34,8 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.persistence.RowContainer;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.joinDesc;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
+import org.apache.hadoop.hive.ql.plan.JoinDesc;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.serde2.SerDe;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -82,7 +82,7 @@
private int skewKeyDefinition = -1;
private Map<Byte, StructObjectInspector> skewKeysTableObjectInspector = null;
private Map<Byte, SerDe> tblSerializers = null;
- private Map<Byte, tableDesc> tblDesc = null;
+ private Map<Byte, TableDesc> tblDesc = null;
private Map<Byte, Boolean> bigKeysExistingMap = null;
@@ -92,7 +92,7 @@
private final CommonJoinOperator<? extends Serializable> joinOp;
private final int numAliases;
- private final joinDesc conf;
+ private final JoinDesc conf;
public SkewJoinHandler(CommonJoinOperator<? extends Serializable> joinOp) {
this.joinOp = joinOp;
@@ -102,7 +102,7 @@
public void initiliaze(Configuration hconf) {
this.hconf = hconf;
- joinDesc desc = joinOp.getConf();
+ JoinDesc desc = joinOp.getConf();
skewKeyDefinition = desc.getSkewKeyDefinition();
skewKeysTableObjectInspector = new HashMap<Byte, StructObjectInspector>(
numAliases);
@@ -123,7 +123,7 @@
for (int k = 0; k < keyFieldSize; k++) {
skewTableKeyInspectors.add(keyFields.get(k).getFieldObjectInspector());
}
- tableDesc joinKeyDesc = desc.getKeyTableDesc();
+ TableDesc joinKeyDesc = desc.getKeyTableDesc();
List<String> keyColNames = Utilities.getColumnNames(joinKeyDesc
.getProperties());
StructObjectInspector structTblKeyInpector = ObjectInspectorFactory
@@ -140,7 +140,7 @@
break;
}
- tableDesc valTblDesc = joinOp.getSpillTableDesc(alias);
+ TableDesc valTblDesc = joinOp.getSpillTableDesc(alias);
List<String> valColNames = new ArrayList<String>();
if (valTblDesc != null) {
valColNames = Utilities.getColumnNames(valTblDesc.getProperties());
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java Mon Jan 25 18:48:58 2010
@@ -21,7 +21,7 @@
import java.io.Serializable;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.tableScanDesc;
+import org.apache.hadoop.hive.ql.plan.TableScanDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
/**
@@ -29,7 +29,7 @@
* forward it. This will be needed as part of local work when data is not being
* read as part of map-reduce framework
**/
-public class TableScanOperator extends Operator<tableScanDesc> implements
+public class TableScanOperator extends Operator<TableScanDesc> implements
Serializable {
private static final long serialVersionUID = 1L;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java Mon Jan 25 18:48:58 2010
@@ -26,11 +26,11 @@
import org.apache.hadoop.hive.ql.plan.ConditionalWork;
import org.apache.hadoop.hive.ql.plan.DDLWork;
import org.apache.hadoop.hive.ql.plan.FunctionWork;
-import org.apache.hadoop.hive.ql.plan.copyWork;
-import org.apache.hadoop.hive.ql.plan.explainWork;
-import org.apache.hadoop.hive.ql.plan.fetchWork;
-import org.apache.hadoop.hive.ql.plan.mapredWork;
-import org.apache.hadoop.hive.ql.plan.moveWork;
+import org.apache.hadoop.hive.ql.plan.CopyWork;
+import org.apache.hadoop.hive.ql.plan.ExplainWork;
+import org.apache.hadoop.hive.ql.plan.FetchWork;
+import org.apache.hadoop.hive.ql.plan.MapredWork;
+import org.apache.hadoop.hive.ql.plan.MoveWork;
/**
* TaskFactory implementation
@@ -50,14 +50,14 @@
public static ArrayList<taskTuple<? extends Serializable>> taskvec;
static {
taskvec = new ArrayList<taskTuple<? extends Serializable>>();
- taskvec.add(new taskTuple<moveWork>(moveWork.class, MoveTask.class));
- taskvec.add(new taskTuple<fetchWork>(fetchWork.class, FetchTask.class));
- taskvec.add(new taskTuple<copyWork>(copyWork.class, CopyTask.class));
+ taskvec.add(new taskTuple<MoveWork>(MoveWork.class, MoveTask.class));
+ taskvec.add(new taskTuple<FetchWork>(FetchWork.class, FetchTask.class));
+ taskvec.add(new taskTuple<CopyWork>(CopyWork.class, CopyTask.class));
taskvec.add(new taskTuple<DDLWork>(DDLWork.class, DDLTask.class));
taskvec.add(new taskTuple<FunctionWork>(FunctionWork.class,
FunctionTask.class));
taskvec
- .add(new taskTuple<explainWork>(explainWork.class, ExplainTask.class));
+ .add(new taskTuple<ExplainWork>(ExplainWork.class, ExplainTask.class));
taskvec.add(new taskTuple<ConditionalWork>(ConditionalWork.class,
ConditionalTask.class));
// we are taking this out to allow us to instantiate either MapRedTask or
@@ -99,7 +99,7 @@
}
}
- if (workClass == mapredWork.class) {
+ if (workClass == MapredWork.class) {
boolean viachild = conf.getBoolVar(HiveConf.ConfVars.SUBMITVIACHILD);
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java Mon Jan 25 18:48:58 2010
@@ -26,7 +26,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.udtfDesc;
+import org.apache.hadoop.hive.ql.plan.UDTFDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
import org.apache.hadoop.hive.ql.udf.generic.UDTFCollector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -34,7 +34,7 @@
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-public class UDTFOperator extends Operator<udtfDesc> implements Serializable {
+public class UDTFOperator extends Operator<UDTFDesc> implements Serializable {
private static final long serialVersionUID = 1L;
protected final Log LOG = LogFactory.getLog(this.getClass().getName());
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java Mon Jan 25 18:48:58 2010
@@ -24,7 +24,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.unionDesc;
+import org.apache.hadoop.hive.ql.plan.UnionDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFUtils.ReturnObjectInspectorResolver;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -35,7 +35,7 @@
/**
* Union Operator Just forwards. Doesn't do anything itself.
**/
-public class UnionOperator extends Operator<unionDesc> implements Serializable {
+public class UnionOperator extends Operator<UnionDesc> implements Serializable {
private static final long serialVersionUID = 1L;
StructObjectInspector[] parentObjInspectors;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Mon Jan 25 18:48:58 2010
@@ -73,10 +73,10 @@
import org.apache.hadoop.hive.ql.parse.ErrorMsg;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.PlanUtils;
-import org.apache.hadoop.hive.ql.plan.groupByDesc;
-import org.apache.hadoop.hive.ql.plan.mapredWork;
-import org.apache.hadoop.hive.ql.plan.partitionDesc;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
+import org.apache.hadoop.hive.ql.plan.GroupByDesc;
+import org.apache.hadoop.hive.ql.plan.MapredWork;
+import org.apache.hadoop.hive.ql.plan.PartitionDesc;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.ql.plan.PlanUtils.ExpressionTypes;
import org.apache.hadoop.hive.serde.Constants;
import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
@@ -103,8 +103,8 @@
KEY, VALUE, ALIAS
};
- private static Map<String, mapredWork> gWorkMap = Collections
- .synchronizedMap(new HashMap<String, mapredWork>());
+ private static Map<String, MapredWork> gWorkMap = Collections
+ .synchronizedMap(new HashMap<String, MapredWork>());
static final private Log LOG = LogFactory.getLog(Utilities.class.getName());
public static void clearMapRedWork(Configuration job) {
@@ -128,8 +128,8 @@
}
}
- public static mapredWork getMapRedWork(Configuration job) {
- mapredWork gWork = null;
+ public static MapredWork getMapRedWork(Configuration job) {
+ MapredWork gWork = null;
try {
synchronized (gWorkMap) {
gWork = gWorkMap.get(getJobName(job));
@@ -141,7 +141,7 @@
}
InputStream in = new FileInputStream("HIVE_PLAN"
+ sanitizedJobId(job));
- mapredWork ret = deserializeMapRedWork(in, job);
+ MapredWork ret = deserializeMapRedWork(in, job);
gWork = ret;
gWork.initialize();
gWorkMap.put(getJobName(job), gWork);
@@ -185,7 +185,7 @@
}
}
- public static void setMapRedWork(Configuration job, mapredWork w) {
+ public static void setMapRedWork(Configuration job, MapredWork w) {
try {
// use the default file system of the job
FileSystem fs = FileSystem.get(job);
@@ -236,7 +236,7 @@
XMLEncoder e = new XMLEncoder(out);
// workaround for java 1.5
e.setPersistenceDelegate(ExpressionTypes.class, new EnumDelegate());
- e.setPersistenceDelegate(groupByDesc.Mode.class, new EnumDelegate());
+ e.setPersistenceDelegate(GroupByDesc.Mode.class, new EnumDelegate());
e
.setPersistenceDelegate(Operator.ProgressCounter.class,
new EnumDelegate());
@@ -250,19 +250,19 @@
* standard output since it closes the output stream DO USE mapredWork.toXML()
* instead
*/
- public static void serializeMapRedWork(mapredWork w, OutputStream out) {
+ public static void serializeMapRedWork(MapredWork w, OutputStream out) {
XMLEncoder e = new XMLEncoder(out);
// workaround for java 1.5
e.setPersistenceDelegate(ExpressionTypes.class, new EnumDelegate());
- e.setPersistenceDelegate(groupByDesc.Mode.class, new EnumDelegate());
+ e.setPersistenceDelegate(GroupByDesc.Mode.class, new EnumDelegate());
e.writeObject(w);
e.close();
}
- public static mapredWork deserializeMapRedWork(InputStream in,
+ public static MapredWork deserializeMapRedWork(InputStream in,
Configuration conf) {
XMLDecoder d = new XMLDecoder(in, null, null, conf.getClassLoader());
- mapredWork ret = (mapredWork) d.readObject();
+ MapredWork ret = (MapredWork) d.readObject();
d.close();
return (ret);
}
@@ -285,7 +285,7 @@
}
}
- public static tableDesc defaultTd;
+ public static TableDesc defaultTd;
static {
// by default we expect ^A separated strings
// This tableDesc does not provide column names. We should always use
@@ -378,14 +378,14 @@
}
}
- public static tableDesc getTableDesc(Table tbl) {
- return (new tableDesc(tbl.getDeserializer().getClass(), tbl
+ public static TableDesc getTableDesc(Table tbl) {
+ return (new TableDesc(tbl.getDeserializer().getClass(), tbl
.getInputFormatClass(), tbl.getOutputFormatClass(), tbl.getSchema()));
}
// column names and column types are all delimited by comma
- public static tableDesc getTableDesc(String cols, String colTypes) {
- return (new tableDesc(LazySimpleSerDe.class, SequenceFileInputFormat.class,
+ public static TableDesc getTableDesc(String cols, String colTypes) {
+ return (new TableDesc(LazySimpleSerDe.class, SequenceFileInputFormat.class,
HiveSequenceFileOutputFormat.class, Utilities.makeProperties(
org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, ""
+ Utilities.ctrlaCode,
@@ -393,15 +393,15 @@
org.apache.hadoop.hive.serde.Constants.LIST_COLUMN_TYPES, colTypes)));
}
- public static partitionDesc getPartitionDesc(Partition part)
+ public static PartitionDesc getPartitionDesc(Partition part)
throws HiveException {
- return (new partitionDesc(part));
+ return (new PartitionDesc(part));
}
- public static void addMapWork(mapredWork mr, Table tbl, String alias,
+ public static void addMapWork(MapredWork mr, Table tbl, String alias,
Operator<?> work) {
mr.addMapWork(tbl.getDataLocation().getPath(), alias, work,
- new partitionDesc(getTableDesc(tbl), null));
+ new PartitionDesc(getTableDesc(tbl), null));
}
private static String getOpTreeSkel_helper(Operator<?> op, String indent) {
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java Mon Jan 25 18:48:58 2010
@@ -35,7 +35,7 @@
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.serde2.SerDe;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
@@ -99,7 +99,7 @@
private List<Object> keyObject;
- private tableDesc tblDesc;
+ private TableDesc tblDesc;
boolean firstCalled = false; // once called first, it will never be able to
// write again.
@@ -498,7 +498,7 @@
this.keyObject = dummyKey;
}
- public void setTableDesc(tableDesc tblDesc) {
+ public void setTableDesc(TableDesc tblDesc) {
this.tblDesc = tblDesc;
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java Mon Jan 25 18:48:58 2010
@@ -32,7 +32,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.plan.partitionDesc;
+import org.apache.hadoop.hive.ql.plan.PartitionDesc;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.hive.shims.HadoopShims.CombineFileInputFormatShim;
import org.apache.hadoop.hive.shims.HadoopShims.InputSplitShim;
@@ -80,14 +80,14 @@
throws IOException {
this.inputSplitShim = inputSplitShim;
if (job != null) {
- Map<String, partitionDesc> pathToPartitionInfo = Utilities
+ Map<String, PartitionDesc> pathToPartitionInfo = Utilities
.getMapRedWork(job).getPathToPartitionInfo();
// extract all the inputFormatClass names for each chunk in the
// CombinedSplit.
Path[] ipaths = inputSplitShim.getPaths();
for (int i = 0; i < ipaths.length; i++) {
- partitionDesc part = null;
+ PartitionDesc part = null;
try {
part = getPartitionDescFromPath(pathToPartitionInfo, ipaths[i]
.getParent());
@@ -209,12 +209,12 @@
inputSplitShim.write(out);
if (inputFormatClassName == null) {
- Map<String, partitionDesc> pathToPartitionInfo = Utilities
+ Map<String, PartitionDesc> pathToPartitionInfo = Utilities
.getMapRedWork(getJob()).getPathToPartitionInfo();
// extract all the inputFormatClass names for each chunk in the
// CombinedSplit.
- partitionDesc part = null;
+ PartitionDesc part = null;
try {
part = getPartitionDescFromPath(pathToPartitionInfo, inputSplitShim
.getPath(0).getParent());
@@ -298,12 +298,12 @@
CombineHiveRecordReader.class);
}
- protected static partitionDesc getPartitionDescFromPath(
- Map<String, partitionDesc> pathToPartitionInfo, Path dir)
+ protected static PartitionDesc getPartitionDescFromPath(
+ Map<String, PartitionDesc> pathToPartitionInfo, Path dir)
throws IOException {
// The format of the keys in pathToPartitionInfo sometimes contains a port
// and sometimes doesn't, so we just compare paths.
- for (Map.Entry<String, partitionDesc> entry : pathToPartitionInfo
+ for (Map.Entry<String, PartitionDesc> entry : pathToPartitionInfo
.entrySet()) {
try {
if (new URI(entry.getKey()).getPath().equals(dir.toUri().getPath())) {
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java Mon Jan 25 18:48:58 2010
@@ -32,8 +32,8 @@
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.fileSinkDesc;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
+import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
@@ -200,8 +200,8 @@
}
public static RecordWriter getHiveRecordWriter(JobConf jc,
- tableDesc tableInfo, Class<? extends Writable> outputClass,
- fileSinkDesc conf, Path outPath) throws HiveException {
+ TableDesc tableInfo, Class<? extends Writable> outputClass,
+ FileSinkDesc conf, Path outPath) throws HiveException {
try {
HiveOutputFormat<?, ?> hiveOutputFormat = tableInfo
.getOutputFileFormatClass().newInstance();
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java Mon Jan 25 18:48:58 2010
@@ -36,8 +36,8 @@
import org.apache.hadoop.hive.ql.exec.Operator;
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.plan.mapredWork;
-import org.apache.hadoop.hive.ql.plan.partitionDesc;
+import org.apache.hadoop.hive.ql.plan.MapredWork;
+import org.apache.hadoop.hive.ql.plan.PartitionDesc;
import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.io.Writable;
@@ -223,8 +223,8 @@
cloneJobConf, reporter));
}
- private Map<String, partitionDesc> pathToPartitionInfo;
- mapredWork mrwork = null;
+ private Map<String, PartitionDesc> pathToPartitionInfo;
+ MapredWork mrwork = null;
protected void init(JobConf job) {
mrwork = Utilities.getMapRedWork(job);
@@ -244,7 +244,7 @@
// for each dir, get the InputFormat, and do getSplits.
for (Path dir : dirs) {
- partitionDesc part = getPartitionDescFromPath(pathToPartitionInfo, dir);
+ PartitionDesc part = getPartitionDescFromPath(pathToPartitionInfo, dir);
// create a new InputFormat instance if this is the first time to see this
// class
Class inputFormatClass = part.getInputFileFormatClass();
@@ -272,7 +272,7 @@
// for each dir, get the InputFormat, and do validateInput.
for (Path dir : dirs) {
- partitionDesc part = getPartitionDescFromPath(pathToPartitionInfo, dir);
+ PartitionDesc part = getPartitionDescFromPath(pathToPartitionInfo, dir);
// create a new InputFormat instance if this is the first time to see this
// class
InputFormat inputFormat = getInputFormatFromCache(part
@@ -284,10 +284,10 @@
}
}
- protected static partitionDesc getPartitionDescFromPath(
- Map<String, partitionDesc> pathToPartitionInfo, Path dir)
+ protected static PartitionDesc getPartitionDescFromPath(
+ Map<String, PartitionDesc> pathToPartitionInfo, Path dir)
throws IOException {
- partitionDesc partDesc = pathToPartitionInfo.get(dir.toString());
+ PartitionDesc partDesc = pathToPartitionInfo.get(dir.toString());
if (partDesc == null) {
partDesc = pathToPartitionInfo.get(dir.toUri().getPath());
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcCtx.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcCtx.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcCtx.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcCtx.java Mon Jan 25 18:48:58 2010
@@ -31,8 +31,8 @@
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.parse.OpParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.exprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.selectDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.SelectDesc;
/**
* This class implements the processor context for Column Pruner.
@@ -111,9 +111,9 @@
*/
public List<String> getColsFromSelectExpr(SelectOperator op) {
List<String> cols = new ArrayList<String>();
- selectDesc conf = op.getConf();
- ArrayList<exprNodeDesc> exprList = conf.getColList();
- for (exprNodeDesc expr : exprList) {
+ SelectDesc conf = op.getConf();
+ ArrayList<ExprNodeDesc> exprList = conf.getColList();
+ for (ExprNodeDesc expr : exprList) {
cols = Utilities.mergeUniqElems(cols, expr.getCols());
}
return cols;
@@ -132,14 +132,14 @@
public List<String> getSelectColsFromChildren(SelectOperator op,
List<String> colList) {
List<String> cols = new ArrayList<String>();
- selectDesc conf = op.getConf();
+ SelectDesc conf = op.getConf();
if (conf.isSelStarNoCompute()) {
cols.addAll(colList);
return cols;
}
- ArrayList<exprNodeDesc> selectExprs = conf.getColList();
+ ArrayList<ExprNodeDesc> selectExprs = conf.getColList();
// The colList is the output columns used by child operators, they are
// different
@@ -148,7 +148,7 @@
ArrayList<String> outputColumnNames = conf.getOutputColumnNames();
for (int i = 0; i < outputColumnNames.size(); i++) {
if (colList.contains(outputColumnNames.get(i))) {
- exprNodeDesc expr = selectExprs.get(i);
+ ExprNodeDesc expr = selectExprs.get(i);
cols = Utilities.mergeUniqElems(cols, expr.getCols());
}
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java?rev=902921&r1=902920&r2=902921&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java Mon Jan 25 18:48:58 2010
@@ -52,14 +52,14 @@
import org.apache.hadoop.hive.ql.parse.RowResolver;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.PlanUtils;
-import org.apache.hadoop.hive.ql.plan.aggregationDesc;
-import org.apache.hadoop.hive.ql.plan.exprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.groupByDesc;
-import org.apache.hadoop.hive.ql.plan.joinDesc;
-import org.apache.hadoop.hive.ql.plan.mapJoinDesc;
-import org.apache.hadoop.hive.ql.plan.reduceSinkDesc;
-import org.apache.hadoop.hive.ql.plan.selectDesc;
-import org.apache.hadoop.hive.ql.plan.tableDesc;
+import org.apache.hadoop.hive.ql.plan.AggregationDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.GroupByDesc;
+import org.apache.hadoop.hive.ql.plan.JoinDesc;
+import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
+import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
+import org.apache.hadoop.hive.ql.plan.SelectDesc;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
/**
* Factory for generating the different node processors used by ColumnPruner.
@@ -74,7 +74,7 @@
Object... nodeOutputs) throws SemanticException {
FilterOperator op = (FilterOperator) nd;
ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx;
- exprNodeDesc condn = op.getConf().getPredicate();
+ ExprNodeDesc condn = op.getConf().getPredicate();
// get list of columns used in the filter
List<String> cl = condn.getCols();
// merge it with the downstream col list
@@ -102,16 +102,16 @@
GroupByOperator op = (GroupByOperator) nd;
ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx;
List<String> colLists = new ArrayList<String>();
- groupByDesc conf = op.getConf();
- ArrayList<exprNodeDesc> keys = conf.getKeys();
- for (exprNodeDesc key : keys) {
+ GroupByDesc conf = op.getConf();
+ ArrayList<ExprNodeDesc> keys = conf.getKeys();
+ for (ExprNodeDesc key : keys) {
colLists = Utilities.mergeUniqElems(colLists, key.getCols());
}
- ArrayList<aggregationDesc> aggrs = conf.getAggregators();
- for (aggregationDesc aggr : aggrs) {
- ArrayList<exprNodeDesc> params = aggr.getParameters();
- for (exprNodeDesc param : params) {
+ ArrayList<AggregationDesc> aggrs = conf.getAggregators();
+ for (AggregationDesc aggr : aggrs) {
+ ArrayList<ExprNodeDesc> params = aggr.getParameters();
+ for (ExprNodeDesc param : params) {
colLists = Utilities.mergeUniqElems(colLists, param.getCols());
}
}
@@ -197,15 +197,15 @@
HashMap<Operator<? extends Serializable>, OpParseContext> opToParseCtxMap = cppCtx
.getOpToParseCtxMap();
RowResolver redSinkRR = opToParseCtxMap.get(op).getRR();
- reduceSinkDesc conf = op.getConf();
+ ReduceSinkDesc conf = op.getConf();
List<Operator<? extends Serializable>> childOperators = op
.getChildOperators();
List<Operator<? extends Serializable>> parentOperators = op
.getParentOperators();
List<String> colLists = new ArrayList<String>();
- ArrayList<exprNodeDesc> keys = conf.getKeyCols();
- for (exprNodeDesc key : keys) {
+ ArrayList<ExprNodeDesc> keys = conf.getKeyCols();
+ for (ExprNodeDesc key : keys) {
colLists = Utilities.mergeUniqElems(colLists, key.getCols());
}
@@ -222,9 +222,9 @@
flags[i] = false;
}
if (childJoinCols != null && childJoinCols.size() > 0) {
- Map<String, exprNodeDesc> exprMap = op.getColumnExprMap();
+ Map<String, ExprNodeDesc> exprMap = op.getColumnExprMap();
for (String childCol : childJoinCols) {
- exprNodeDesc desc = exprMap.get(childCol);
+ ExprNodeDesc desc = exprMap.get(childCol);
int index = conf.getValueCols().indexOf(desc);
flags[index] = true;
String[] nm = redSinkRR.reverseLookup(childCol);
@@ -241,8 +241,8 @@
} else {
// Reduce Sink contains the columns needed - no need to aggregate from
// children
- ArrayList<exprNodeDesc> vals = conf.getValueCols();
- for (exprNodeDesc val : vals) {
+ ArrayList<ExprNodeDesc> vals = conf.getValueCols();
+ for (ExprNodeDesc val : vals) {
colLists = Utilities.mergeUniqElems(colLists, val.getCols());
}
}
@@ -290,7 +290,7 @@
}
cols = cppCtx.genColLists(op);
- selectDesc conf = op.getConf();
+ SelectDesc conf = op.getConf();
// The input to the select does not matter. Go over the expressions
// and return the ones which have a marked column
cppCtx.getPrunedColLists().put(op,
@@ -301,16 +301,16 @@
}
// do we need to prune the select operator?
- List<exprNodeDesc> originalColList = op.getConf().getColList();
+ List<ExprNodeDesc> originalColList = op.getConf().getColList();
List<String> columns = new ArrayList<String>();
- for (exprNodeDesc expr : originalColList) {
+ for (ExprNodeDesc expr : originalColList) {
Utilities.mergeUniqElems(columns, expr.getCols());
}
// by now, 'prunedCols' are columns used by child operators, and 'columns'
// are columns used by this select operator.
ArrayList<String> originalOutputColumnNames = conf.getOutputColumnNames();
if (cols.size() < originalOutputColumnNames.size()) {
- ArrayList<exprNodeDesc> newColList = new ArrayList<exprNodeDesc>();
+ ArrayList<ExprNodeDesc> newColList = new ArrayList<ExprNodeDesc>();
ArrayList<String> newOutputColumnNames = new ArrayList<String>();
Vector<ColumnInfo> rs_oldsignature = op.getSchema().getSignature();
Vector<ColumnInfo> rs_newsignature = new Vector<ColumnInfo>();
@@ -370,8 +370,8 @@
private static boolean[] getPruneReduceSinkOpRetainFlags(
List<String> retainedParentOpOutputCols, ReduceSinkOperator reduce) {
- reduceSinkDesc reduceConf = reduce.getConf();
- java.util.ArrayList<exprNodeDesc> originalValueEval = reduceConf
+ ReduceSinkDesc reduceConf = reduce.getConf();
+ java.util.ArrayList<ExprNodeDesc> originalValueEval = reduceConf
.getValueCols();
boolean[] flags = new boolean[originalValueEval.size()];
for (int i = 0; i < originalValueEval.size(); i++) {
@@ -394,18 +394,18 @@
private static void pruneReduceSinkOperator(boolean[] retainFlags,
ReduceSinkOperator reduce, ColumnPrunerProcCtx cppCtx)
throws SemanticException {
- reduceSinkDesc reduceConf = reduce.getConf();
- Map<String, exprNodeDesc> oldMap = reduce.getColumnExprMap();
- Map<String, exprNodeDesc> newMap = new HashMap<String, exprNodeDesc>();
+ ReduceSinkDesc reduceConf = reduce.getConf();
+ Map<String, ExprNodeDesc> oldMap = reduce.getColumnExprMap();
+ Map<String, ExprNodeDesc> newMap = new HashMap<String, ExprNodeDesc>();
Vector<ColumnInfo> sig = new Vector<ColumnInfo>();
RowResolver oldRR = cppCtx.getOpToParseCtxMap().get(reduce).getRR();
RowResolver newRR = new RowResolver();
ArrayList<String> originalValueOutputColNames = reduceConf
.getOutputValueColumnNames();
- java.util.ArrayList<exprNodeDesc> originalValueEval = reduceConf
+ java.util.ArrayList<ExprNodeDesc> originalValueEval = reduceConf
.getValueCols();
ArrayList<String> newOutputColNames = new ArrayList<String>();
- java.util.ArrayList<exprNodeDesc> newValueEval = new ArrayList<exprNodeDesc>();
+ java.util.ArrayList<ExprNodeDesc> newValueEval = new ArrayList<ExprNodeDesc>();
for (int i = 0; i < retainFlags.length; i++) {
if (retainFlags[i]) {
newValueEval.add(originalValueEval.get(i));
@@ -423,7 +423,7 @@
}
}
- ArrayList<exprNodeDesc> keyCols = reduceConf.getKeyCols();
+ ArrayList<ExprNodeDesc> keyCols = reduceConf.getKeyCols();
List<String> keys = new ArrayList<String>();
RowResolver parResover = cppCtx.getOpToParseCtxMap().get(
reduce.getParentOperators().get(0)).getRR();
@@ -444,7 +444,7 @@
reduce.getSchema().setSignature(sig);
reduceConf.setOutputValueColumnNames(newOutputColNames);
reduceConf.setValueCols(newValueEval);
- tableDesc newValueTable = PlanUtils.getReduceValueTableDesc(PlanUtils
+ TableDesc newValueTable = PlanUtils.getReduceValueTableDesc(PlanUtils
.getFieldSchemasFromColumnList(reduceConf.getValueCols(),
newOutputColNames, 0, ""));
reduceConf.setValueSerializeInfo(newValueTable);
@@ -495,8 +495,8 @@
}
private static void pruneJoinOperator(NodeProcessorCtx ctx,
- CommonJoinOperator op, joinDesc conf,
- Map<String, exprNodeDesc> columnExprMap,
+ CommonJoinOperator op, JoinDesc conf,
+ Map<String, ExprNodeDesc> columnExprMap,
Map<Byte, List<Integer>> retainMap, boolean mapJoin)
throws SemanticException {
ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx;
@@ -516,11 +516,11 @@
RowResolver newJoinRR = new RowResolver();
ArrayList<String> outputCols = new ArrayList<String>();
Vector<ColumnInfo> rs = new Vector<ColumnInfo>();
- Map<String, exprNodeDesc> newColExprMap = new HashMap<String, exprNodeDesc>();
+ Map<String, ExprNodeDesc> newColExprMap = new HashMap<String, ExprNodeDesc>();
for (int i = 0; i < conf.getOutputColumnNames().size(); i++) {
String internalName = conf.getOutputColumnNames().get(i);
- exprNodeDesc desc = columnExprMap.get(internalName);
+ ExprNodeDesc desc = columnExprMap.get(internalName);
Byte tag = conf.getReversedExprs().get(internalName);
if (!childColLists.contains(internalName)) {
int index = conf.getExprs().get(tag).indexOf(desc);
@@ -545,30 +545,30 @@
if (mapJoin) {
// regenerate the valueTableDesc
- List<tableDesc> valueTableDescs = new ArrayList<tableDesc>();
+ List<TableDesc> valueTableDescs = new ArrayList<TableDesc>();
for (int pos = 0; pos < op.getParentOperators().size(); pos++) {
- List<exprNodeDesc> valueCols = conf.getExprs()
+ List<ExprNodeDesc> valueCols = conf.getExprs()
.get(new Byte((byte) pos));
StringBuilder keyOrder = new StringBuilder();
for (int i = 0; i < valueCols.size(); i++) {
keyOrder.append("+");
}
- tableDesc valueTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils
+ TableDesc valueTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils
.getFieldSchemasFromColumnList(valueCols, "mapjoinvalue"));
valueTableDescs.add(valueTableDesc);
}
- ((mapJoinDesc) conf).setValueTblDescs(valueTableDescs);
+ ((MapJoinDesc) conf).setValueTblDescs(valueTableDescs);
- Set<Map.Entry<Byte, List<exprNodeDesc>>> exprs = ((mapJoinDesc) conf)
+ Set<Map.Entry<Byte, List<ExprNodeDesc>>> exprs = ((MapJoinDesc) conf)
.getKeys().entrySet();
- Iterator<Map.Entry<Byte, List<exprNodeDesc>>> iters = exprs.iterator();
+ Iterator<Map.Entry<Byte, List<ExprNodeDesc>>> iters = exprs.iterator();
while (iters.hasNext()) {
- Map.Entry<Byte, List<exprNodeDesc>> entry = iters.next();
- List<exprNodeDesc> lists = entry.getValue();
+ Map.Entry<Byte, List<ExprNodeDesc>> entry = iters.next();
+ List<ExprNodeDesc> lists = entry.getValue();
for (int j = 0; j < lists.size(); j++) {
- exprNodeDesc desc = lists.get(j);
+ ExprNodeDesc desc = lists.get(j);
Byte tag = entry.getKey();
List<String> cols = prunedColLists.get(tag);
cols = Utilities.mergeUniqElems(cols, desc.getCols());