You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by zs...@apache.org on 2010/02/09 08:55:50 UTC
svn commit: r907950 [4/15] - in /hadoop/hive/trunk: ./ checkstyle/
cli/src/java/org/apache/hadoop/hive/cli/
common/src/java/org/apache/hadoop/hive/common/
common/src/java/org/apache/hadoop/hive/conf/
contrib/src/java/org/apache/hadoop/hive/contrib/file...
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java Tue Feb 9 07:55:30 2010
@@ -42,12 +42,17 @@
private static final long serialVersionUID = 1L;
private transient SkewJoinHandler skewJoinKeyContext = null;
-
+
+ /**
+ * SkewkeyTableCounter.
+ *
+ */
public static enum SkewkeyTableCounter {
SKEWJOINFOLLOWUPJOBS
}
- transient private final LongWritable skewjoin_followup_jobs = new LongWritable(0);
-
+
+ private final transient LongWritable skewjoin_followup_jobs = new LongWritable(0);
+
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
@@ -124,7 +129,7 @@
}
/**
- * All done
+ * All done.
*
*/
@Override
@@ -136,8 +141,7 @@
}
@Override
- public void jobClose(Configuration hconf, boolean success)
- throws HiveException {
+ public void jobClose(Configuration hconf, boolean success) throws HiveException {
int numAliases = conf.getExprs().size();
if (conf.getHandleSkewJoin()) {
try {
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java Tue Feb 9 07:55:30 2010
@@ -31,8 +31,8 @@
public class LimitOperator extends Operator<LimitDesc> implements Serializable {
private static final long serialVersionUID = 1L;
- transient protected int limit;
- transient protected int currCount;
+ protected transient int limit;
+ protected transient int currCount;
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java Tue Feb 9 07:55:30 2010
@@ -53,36 +53,39 @@
public class MapJoinOperator extends CommonJoinOperator<MapJoinDesc> implements
Serializable {
private static final long serialVersionUID = 1L;
- static final private Log LOG = LogFactory.getLog(MapJoinOperator.class
+ private static final Log LOG = LogFactory.getLog(MapJoinOperator.class
.getName());
/**
* The expressions for join inputs's join keys.
*/
- transient protected Map<Byte, List<ExprNodeEvaluator>> joinKeys;
+ protected transient Map<Byte, List<ExprNodeEvaluator>> joinKeys;
/**
* The ObjectInspectors for the join inputs's join keys.
*/
- transient protected Map<Byte, List<ObjectInspector>> joinKeysObjectInspectors;
+ protected transient Map<Byte, List<ObjectInspector>> joinKeysObjectInspectors;
/**
* The standard ObjectInspectors for the join inputs's join keys.
*/
- transient protected Map<Byte, List<ObjectInspector>> joinKeysStandardObjectInspectors;
+ protected transient Map<Byte, List<ObjectInspector>> joinKeysStandardObjectInspectors;
- transient private int posBigTable; // one of the tables that is not in memory
+ private transient int posBigTable; // one of the tables that is not in memory
transient int mapJoinRowsKey; // rows for a given key
- transient protected Map<Byte, HashMapWrapper<MapJoinObjectKey, MapJoinObjectValue>> mapJoinTables;
+ protected transient Map<Byte, HashMapWrapper<MapJoinObjectKey, MapJoinObjectValue>> mapJoinTables;
- transient protected RowContainer<ArrayList<Object>> emptyList = null;
+ protected transient RowContainer<ArrayList<Object>> emptyList = null;
- transient static final private String[] fatalErrMsg = {
+ private static final transient String[] FATAL_ERR_MSG = {
null, // counter value 0 means no error
- "Mapside join size exceeds hive.mapjoin.maxsize. Please increase that or remove the mapjoin hint." // counter
- // value
- // 1
- };
+ "Mapside join size exceeds hive.mapjoin.maxsize. "
+ + "Please increase that or remove the mapjoin hint."
+ };
+ /**
+ * MapJoinObjectCtx.
+ *
+ */
public static class MapJoinObjectCtx {
ObjectInspector standardOI;
SerDe serde;
@@ -125,10 +128,10 @@
}
- transient static Map<Integer, MapJoinObjectCtx> mapMetadata = new HashMap<Integer, MapJoinObjectCtx>();
- transient static int nextVal = 0;
+ static transient Map<Integer, MapJoinObjectCtx> mapMetadata = new HashMap<Integer, MapJoinObjectCtx>();
+ static transient int nextVal = 0;
- static public Map<Integer, MapJoinObjectCtx> getMapMetadata() {
+ public static Map<Integer, MapJoinObjectCtx> getMapMetadata() {
return mapMetadata;
}
@@ -207,7 +210,7 @@
}
outputObjInspector = ObjectInspectorFactory
.getStandardStructObjectInspector(conf.getOutputColumnNames(),
- structFieldObjectInspectors);
+ structFieldObjectInspectors);
}
initializeChildren(hconf);
}
@@ -215,7 +218,7 @@
@Override
protected void fatalErrorMessage(StringBuilder errMsg, long counterCode) {
errMsg.append("Operator " + getOperatorId() + " (id=" + id + "): "
- + fatalErrMsg[(int) counterCode]);
+ + FATAL_ERR_MSG[(int) counterCode]);
}
@Override
@@ -246,11 +249,11 @@
mapMetadata.put(Integer.valueOf(metadataKeyTag),
new MapJoinObjectCtx(
- ObjectInspectorUtils
- .getStandardObjectInspector(keySerializer
- .getObjectInspector(),
- ObjectInspectorCopyOption.WRITABLE), keySerializer,
- keyTableDesc, hconf));
+ ObjectInspectorUtils
+ .getStandardObjectInspector(keySerializer
+ .getObjectInspector(),
+ ObjectInspectorCopyOption.WRITABLE), keySerializer,
+ keyTableDesc, hconf));
firstRow = false;
}
@@ -306,9 +309,9 @@
mapMetadata.put(Integer.valueOf(metadataValueTag[tag]),
new MapJoinObjectCtx(ObjectInspectorUtils
- .getStandardObjectInspector(valueSerDe.getObjectInspector(),
- ObjectInspectorCopyOption.WRITABLE), valueSerDe,
- valueTableDesc, hconf));
+ .getStandardObjectInspector(valueSerDe.getObjectInspector(),
+ ObjectInspectorCopyOption.WRITABLE), valueSerDe,
+ valueTableDesc, hconf));
}
// Construct externalizable objects for key and value
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java Tue Feb 9 07:55:30 2010
@@ -53,16 +53,20 @@
private static final long serialVersionUID = 1L;
+ /**
+ * Counter.
+ *
+ */
public static enum Counter {
DESERIALIZE_ERRORS
}
- transient private final LongWritable deserialize_error_count = new LongWritable();
- transient private Deserializer deserializer;
+ private final transient LongWritable deserialize_error_count = new LongWritable();
+ private transient Deserializer deserializer;
- transient private Object[] rowWithPart;
- transient private StructObjectInspector rowObjectInspector;
- transient private boolean isPartitioned;
+ private transient Object[] rowWithPart;
+ private transient StructObjectInspector rowObjectInspector;
+ private transient boolean isPartitioned;
private Map<MapInputPath, MapOpCtx> opCtxMap;
private Map<Operator<? extends Serializable>, java.util.ArrayList<String>> operatorToPaths;
@@ -189,7 +193,7 @@
if ((className == "") || (className == null)) {
throw new HiveException(
"SerDe class or the SerDe class name is not set for table: "
- + td.getProperties().getProperty("name"));
+ + td.getProperties().getProperty("name"));
}
sdclass = hconf.getClassByName(className);
}
@@ -235,8 +239,7 @@
rowWithPart[1] = partValues;
rowObjectInspector = ObjectInspectorFactory
.getUnionStructObjectInspector(Arrays
- .asList(new StructObjectInspector[] { rowObjectInspector,
- partObjectInspector }));
+ .asList(new StructObjectInspector[] {rowObjectInspector, partObjectInspector}));
// LOG.info("dump " + tableName + " " + partName + " " +
// rowObjectInspector.getTypeName());
opCtx = new MapOpCtx(true, rowObjectInspector, rowWithPart, deserializer);
@@ -350,8 +353,7 @@
}
}
if (shouldInit) {
- op.initialize(hconf, new ObjectInspector[] { entry.getValue()
- .getRowObjectInspector() });
+ op.initialize(hconf, new ObjectInspector[] {entry.getValue().getRowObjectInspector()});
}
}
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java Tue Feb 9 07:55:30 2010
@@ -43,9 +43,9 @@
private static final long serialVersionUID = 1L;
- final static String hadoopMemKey = "HADOOP_HEAPSIZE";
- final static String hadoopOptsKey = "HADOOP_OPTS";
- final static String HIVE_SYS_PROP[] = { "build.dir", "build.dir.hive" };
+ static final String HADOOP_MEM_KEY = "HADOOP_HEAPSIZE";
+ static final String HADOOP_OPTS_KEY = "HADOOP_OPTS";
+ static final String[] HIVE_SYS_PROP = {"build.dir", "build.dir.hive"};
public MapRedTask() {
super();
@@ -60,28 +60,24 @@
String hiveJar = conf.getJar();
String libJarsOption;
- {
- String addedJars = ExecDriver.getResourceFiles(conf,
- SessionState.ResourceType.JAR);
- conf.setVar(ConfVars.HIVEADDEDJARS, addedJars);
-
- String auxJars = conf.getAuxJars();
- // Put auxjars and addedjars together into libjars
- if (StringUtils.isEmpty(addedJars)) {
- if (StringUtils.isEmpty(auxJars)) {
- libJarsOption = " ";
- } else {
- libJarsOption = " -libjars " + auxJars + " ";
- }
+ String addedJars = ExecDriver.getResourceFiles(conf,
+ SessionState.ResourceType.JAR);
+ conf.setVar(ConfVars.HIVEADDEDJARS, addedJars);
+ String auxJars = conf.getAuxJars();
+ // Put auxjars and addedjars together into libjars
+ if (StringUtils.isEmpty(addedJars)) {
+ if (StringUtils.isEmpty(auxJars)) {
+ libJarsOption = " ";
} else {
- if (StringUtils.isEmpty(auxJars)) {
- libJarsOption = " -libjars " + addedJars + " ";
- } else {
- libJarsOption = " -libjars " + addedJars + "," + auxJars + " ";
- }
+ libJarsOption = " -libjars " + auxJars + " ";
+ }
+ } else {
+ if (StringUtils.isEmpty(auxJars)) {
+ libJarsOption = " -libjars " + addedJars + " ";
+ } else {
+ libJarsOption = " -libjars " + addedJars + "," + auxJars + " ";
}
}
-
// Generate the hiveConfArgs after potentially adding the jars
String hiveConfArgs = ExecDriver.generateCmdLine(conf);
File scratchDir = new File(conf.getVar(HiveConf.ConfVars.SCRATCHDIR));
@@ -117,47 +113,38 @@
// Inherit Java system variables
String hadoopOpts;
- {
- StringBuilder sb = new StringBuilder();
- Properties p = System.getProperties();
- for (String element : HIVE_SYS_PROP) {
- if (p.containsKey(element)) {
- sb.append(" -D" + element + "=" + p.getProperty(element));
- }
+ StringBuilder sb = new StringBuilder();
+ Properties p = System.getProperties();
+ for (String element : HIVE_SYS_PROP) {
+ if (p.containsKey(element)) {
+ sb.append(" -D" + element + "=" + p.getProperty(element));
}
- hadoopOpts = sb.toString();
}
-
+ hadoopOpts = sb.toString();
// Inherit the environment variables
String[] env;
- {
- Map<String, String> variables = new HashMap(System.getenv());
- // The user can specify the hadoop memory
- int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM);
-
- if (hadoopMem == 0) {
- variables.remove(hadoopMemKey);
- } else {
- // user specified the memory - only applicable for local mode
- variables.put(hadoopMemKey, String.valueOf(hadoopMem));
- }
-
- if (variables.containsKey(hadoopOptsKey)) {
- variables.put(hadoopOptsKey, variables.get(hadoopOptsKey)
- + hadoopOpts);
- } else {
- variables.put(hadoopOptsKey, hadoopOpts);
- }
-
- env = new String[variables.size()];
- int pos = 0;
- for (Map.Entry<String, String> entry : variables.entrySet()) {
- String name = entry.getKey();
- String value = entry.getValue();
- env[pos++] = name + "=" + value;
- }
+ Map<String, String> variables = new HashMap(System.getenv());
+ // The user can specify the hadoop memory
+ int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM);
+ if (hadoopMem == 0) {
+ variables.remove(HADOOP_MEM_KEY);
+ } else {
+ // user specified the memory - only applicable for local mode
+ variables.put(HADOOP_MEM_KEY, String.valueOf(hadoopMem));
+ }
+ if (variables.containsKey(HADOOP_OPTS_KEY)) {
+ variables.put(HADOOP_OPTS_KEY, variables.get(HADOOP_OPTS_KEY)
+ + hadoopOpts);
+ } else {
+ variables.put(HADOOP_OPTS_KEY, hadoopOpts);
+ }
+ env = new String[variables.size()];
+ int pos = 0;
+ for (Map.Entry<String, String> entry : variables.entrySet()) {
+ String name = entry.getKey();
+ String value = entry.getValue();
+ env[pos++] = name + "=" + value;
}
-
// Run ExecDriver in another JVM
executor = Runtime.getRuntime().exec(cmdLine, env);
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java Tue Feb 9 07:55:30 2010
@@ -41,7 +41,7 @@
import org.apache.hadoop.util.StringUtils;
/**
- * MoveTask implementation
+ * MoveTask implementation.
**/
public class MoveTask extends Task<MoveWork> implements Serializable {
@@ -102,7 +102,7 @@
} else {
throw new AccessControlException(
"Unable to delete the existing destination directory: "
- + targetPath);
+ + targetPath);
}
}
}
@@ -113,7 +113,7 @@
String mesg = "Loading data to table "
+ tbd.getTable().getTableName()
+ ((tbd.getPartitionSpec().size() > 0) ? " partition "
- + tbd.getPartitionSpec().toString() : "");
+ + tbd.getPartitionSpec().toString() : "");
String mesg_detail = " from " + tbd.getSourceDir();
console.printInfo(mesg, mesg_detail);
Table table = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tbd
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/NumericOpMethodResolver.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/NumericOpMethodResolver.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/NumericOpMethodResolver.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/NumericOpMethodResolver.java Tue Feb 9 07:55:30 2010
@@ -60,8 +60,7 @@
* .List)
*/
@Override
- public Method getEvalMethod(List<TypeInfo> argTypeInfos)
- throws AmbiguousMethodException, UDFArgumentException {
+ public Method getEvalMethod(List<TypeInfo> argTypeInfos) throws UDFArgumentException {
assert (argTypeInfos.size() == 2);
List<TypeInfo> pTypeInfos = null;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java Tue Feb 9 07:55:30 2010
@@ -42,7 +42,7 @@
import org.apache.hadoop.mapred.Reporter;
/**
- * Base operator implementation
+ * Base operator implementation.
**/
public abstract class Operator<T extends Serializable> implements Serializable,
Node {
@@ -55,28 +55,29 @@
protected List<Operator<? extends Serializable>> parentOperators;
protected String operatorId;
/**
- * List of counter names associated with the operator It contains the
+ * List of counter names associated with the operator. It contains the
* following default counters NUM_INPUT_ROWS NUM_OUTPUT_ROWS TIME_TAKEN
- * Individual operators can add to this list via addToCounterNames methods
+ * Individual operators can add to this list via addToCounterNames methods.
*/
protected ArrayList<String> counterNames;
/**
* Each operator has its own map of its counter names to disjoint
* ProgressCounter - it is populated at compile time and is read in at
- * run-time while extracting the operator specific counts
+ * run-time while extracting the operator specific counts.
*/
protected HashMap<String, ProgressCounter> counterNameToEnum;
private static int seqId;
- // It can be optimized later so that an operator operator (init/close) is
- // performed
- // only after that operation has been performed on all the parents. This will
- // require
- // initializing the whole tree in all the mappers (which might be required for
- // mappers
+ // It can be optimized later so that an operator operator (init/close) is performed
+ // only after that operation has been performed on all the parents. This will require
+ // initializing the whole tree in all the mappers (which might be required for mappers
// spanning multiple files anyway, in future)
+ /**
+ * State.
+ *
+ */
public static enum State {
UNINIT, // initialize() has not been called
INIT, // initialize() has been called and close() has not been called,
@@ -88,10 +89,10 @@
// one of its parent is not in state CLOSE..
};
- transient protected State state = State.UNINIT;
+ protected transient State state = State.UNINIT;
- transient static boolean fatalError = false; // fatalError is shared acorss
- // all operators
+ static transient boolean fatalError = false; // fatalError is shared acorss
+ // all operators
static {
seqId = 0;
@@ -172,7 +173,7 @@
}
// non-bean fields needed during compilation
- transient private RowSchema rowSchema;
+ private transient RowSchema rowSchema;
public void setSchema(RowSchema rowSchema) {
this.rowSchema = rowSchema;
@@ -184,16 +185,16 @@
// non-bean ..
- transient protected HashMap<Enum<?>, LongWritable> statsMap = new HashMap<Enum<?>, LongWritable>();
- transient protected OutputCollector out;
- transient protected Log LOG = LogFactory.getLog(this.getClass().getName());
- transient protected String alias;
- transient protected Reporter reporter;
- transient protected String id;
+ protected transient HashMap<Enum<?>, LongWritable> statsMap = new HashMap<Enum<?>, LongWritable>();
+ protected transient OutputCollector out;
+ protected transient Log LOG = LogFactory.getLog(this.getClass().getName());
+ protected transient String alias;
+ protected transient Reporter reporter;
+ protected transient String id;
// object inspectors for input rows
- transient protected ObjectInspector[] inputObjInspectors = new ObjectInspector[Short.MAX_VALUE];
+ protected transient ObjectInspector[] inputObjInspectors = new ObjectInspector[Short.MAX_VALUE];
// for output rows of this operator
- transient protected ObjectInspector outputObjInspector;
+ protected transient ObjectInspector outputObjInspector;
/**
* A map of output column name to input expression map. This is used by
@@ -243,7 +244,7 @@
}
/**
- * Store the alias this operator is working on behalf of
+ * Store the alias this operator is working on behalf of.
*/
public void setAlias(String alias) {
this.alias = alias;
@@ -266,7 +267,7 @@
}
/**
- * checks whether all parent operators are initialized or not
+ * checks whether all parent operators are initialized or not.
*
* @return true if there are no parents or all parents are initialized. false
* otherwise
@@ -352,7 +353,7 @@
/**
* Calls initialize on each of the children with outputObjetInspector as the
- * output row format
+ * output row format.
*/
protected void initializeChildren(Configuration hconf) throws HiveException {
state = State.INIT;
@@ -372,7 +373,7 @@
/**
* Collects all the parent's output object inspectors and calls actual
- * initialization method
+ * initialization method.
*
* @param hconf
* @param inputOI
@@ -527,7 +528,7 @@
/**
* Unlike other operator interfaces which are called from map or reduce task,
- * jobClose is called from the jobclient side once the job has completed
+ * jobClose is called from the jobclient side once the job has completed.
*
* @param conf
* Configuration with with which job was submitted
@@ -549,12 +550,12 @@
* Cache childOperators in an array for faster access. childOperatorsArray is
* accessed per row, so it's important to make the access efficient.
*/
- transient protected Operator<? extends Serializable>[] childOperatorsArray = null;
- transient protected int[] childOperatorsTag;
+ protected transient Operator<? extends Serializable>[] childOperatorsArray = null;
+ protected transient int[] childOperatorsTag;
// counters for debugging
- transient private long cntr = 0;
- transient private long nextCntr = 1;
+ private transient long cntr = 0;
+ private transient long nextCntr = 1;
/**
* Replace one child with another at the same position. The parent of the
@@ -672,8 +673,12 @@
}
}
+ /**
+ * OperatorFunc.
+ *
+ */
public static interface OperatorFunc {
- public void func(Operator<? extends Serializable> op);
+ void func(Operator<? extends Serializable> op);
}
public void preorderMap(OperatorFunc opFunc) {
@@ -702,7 +707,7 @@
/**
* Returns a map of output column name to input expression map Note that
- * currently it returns only key columns for ReduceSink and GroupBy operators
+ * currently it returns only key columns for ReduceSink and GroupBy operators.
*
* @return null if the operator doesn't change columns
*/
@@ -799,34 +804,73 @@
*/
/**
- * TODO This is a hack for hadoop 0.17 which only supports enum counters
+ * TODO This is a hack for hadoop 0.17 which only supports enum counters.
*/
public static enum ProgressCounter {
- C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19, C20, C21, C22, C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34, C35, C36, C37, C38, C39, C40, C41, C42, C43, C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56, C57, C58, C59, C60, C61, C62, C63, C64, C65, C66, C67, C68, C69, C70, C71, C72, C73, C74, C75, C76, C77, C78, C79, C80, C81, C82, C83, C84, C85, C86, C87, C88, C89, C90, C91, C92, C93, C94, C95, C96, C97, C98, C99, C100, C101, C102, C103, C104, C105, C106, C107, C108, C109, C110, C111, C112, C113, C114, C115, C116, C117, C118, C119, C120, C121, C122, C123, C124, C125, C126, C127, C128, C129, C130, C131, C132, C133, C134, C135, C136, C137, C138, C139, C140, C141, C142, C143, C144, C145, C146, C147, C148, C149, C150, C151, C152, C153, C154, C155, C156, C157, C158, C159, C160, C161, C162, C163, C164, C165, C166, C167, C168, C169, C170, C171, C172, C173, C174, C175, C176, C177, C178, C179, C180, C181, C182, C
183, C184, C185, C186, C187, C188, C189, C190, C191, C192, C193, C194, C195, C196, C197, C198, C199, C200, C201, C202, C203, C204, C205, C206, C207, C208, C209, C210, C211, C212, C213, C214, C215, C216, C217, C218, C219, C220, C221, C222, C223, C224, C225, C226, C227, C228, C229, C230, C231, C232, C233, C234, C235, C236, C237, C238, C239, C240, C241, C242, C243, C244, C245, C246, C247, C248, C249, C250, C251, C252, C253, C254, C255, C256, C257, C258, C259, C260, C261, C262, C263, C264, C265, C266, C267, C268, C269, C270, C271, C272, C273, C274, C275, C276, C277, C278, C279, C280, C281, C282, C283, C284, C285, C286, C287, C288, C289, C290, C291, C292, C293, C294, C295, C296, C297, C298, C299, C300, C301, C302, C303, C304, C305, C306, C307, C308, C309, C310, C311, C312, C313, C314, C315, C316, C317, C318, C319, C320, C321, C322, C323, C324, C325, C326, C327, C328, C329, C330, C331, C332, C333, C334, C335, C336, C337, C338, C339, C340, C341, C342, C343, C344, C345, C346, C347,
C348, C349, C350, C351, C352, C353, C354, C355, C356, C357, C358, C359, C360, C361, C362, C363, C364, C365, C366, C367, C368, C369, C370, C371, C372, C373, C374, C375, C376, C377, C378, C379, C380, C381, C382, C383, C384, C385, C386, C387, C388, C389, C390, C391, C392, C393, C394, C395, C396, C397, C398, C399, C400
+ C1, C2, C3, C4, C5, C6, C7, C8, C9, C10,
+ C11, C12, C13, C14, C15, C16, C17, C18, C19, C20,
+ C21, C22, C23, C24, C25, C26, C27, C28, C29, C30,
+ C31, C32, C33, C34, C35, C36, C37, C38, C39, C40,
+ C41, C42, C43, C44, C45, C46, C47, C48, C49, C50,
+ C51, C52, C53, C54, C55, C56, C57, C58, C59, C60,
+ C61, C62, C63, C64, C65, C66, C67, C68, C69, C70,
+ C71, C72, C73, C74, C75, C76, C77, C78, C79, C80,
+ C81, C82, C83, C84, C85, C86, C87, C88, C89, C90,
+ C91, C92, C93, C94, C95, C96, C97, C98, C99, C100,
+ C101, C102, C103, C104, C105, C106, C107, C108, C109, C110,
+ C111, C112, C113, C114, C115, C116, C117, C118, C119, C120,
+ C121, C122, C123, C124, C125, C126, C127, C128, C129, C130,
+ C131, C132, C133, C134, C135, C136, C137, C138, C139, C140,
+ C141, C142, C143, C144, C145, C146, C147, C148, C149, C150,
+ C151, C152, C153, C154, C155, C156, C157, C158, C159, C160,
+ C161, C162, C163, C164, C165, C166, C167, C168, C169, C170,
+ C171, C172, C173, C174, C175, C176, C177, C178, C179, C180,
+ C181, C182, C183, C184, C185, C186, C187, C188, C189, C190,
+ C191, C192, C193, C194, C195, C196, C197, C198, C199, C200,
+ C201, C202, C203, C204, C205, C206, C207, C208, C209, C210,
+ C211, C212, C213, C214, C215, C216, C217, C218, C219, C220,
+ C221, C222, C223, C224, C225, C226, C227, C228, C229, C230,
+ C231, C232, C233, C234, C235, C236, C237, C238, C239, C240,
+ C241, C242, C243, C244, C245, C246, C247, C248, C249, C250,
+ C251, C252, C253, C254, C255, C256, C257, C258, C259, C260,
+ C261, C262, C263, C264, C265, C266, C267, C268, C269, C270,
+ C271, C272, C273, C274, C275, C276, C277, C278, C279, C280,
+ C281, C282, C283, C284, C285, C286, C287, C288, C289, C290,
+ C291, C292, C293, C294, C295, C296, C297, C298, C299, C300,
+ C301, C302, C303, C304, C305, C306, C307, C308, C309, C310,
+ C311, C312, C313, C314, C315, C316, C317, C318, C319, C320,
+ C321, C322, C323, C324, C325, C326, C327, C328, C329, C330,
+ C331, C332, C333, C334, C335, C336, C337, C338, C339, C340,
+ C341, C342, C343, C344, C345, C346, C347, C348, C349, C350,
+ C351, C352, C353, C354, C355, C356, C357, C358, C359, C360,
+ C361, C362, C363, C364, C365, C366, C367, C368, C369, C370,
+ C371, C372, C373, C374, C375, C376, C377, C378, C379, C380,
+ C381, C382, C383, C384, C385, C386, C387, C388, C389, C390,
+ C391, C392, C393, C394, C395, C396, C397, C398, C399, C400
};
private static int totalNumCntrs = 400;
/**
- * populated at runtime from hadoop counters at run time in the client
+ * populated at runtime from hadoop counters at run time in the client.
*/
- transient protected HashMap<String, Long> counters;
+ protected transient HashMap<String, Long> counters;
/**
* keeps track of unique ProgressCounter enums used this value is used at
- * compile time while assigning ProgressCounter enums to counter names
+ * compile time while assigning ProgressCounter enums to counter names.
*/
private static int lastEnumUsed;
- transient protected long inputRows = 0;
- transient protected long outputRows = 0;
- transient protected long beginTime = 0;
- transient protected long totalTime = 0;
+ protected transient long inputRows = 0;
+ protected transient long outputRows = 0;
+ protected transient long beginTime = 0;
+ protected transient long totalTime = 0;
- transient protected Object groupKeyObject;
+ protected transient Object groupKeyObject;
/**
- * this is called before operator process to buffer some counters
+ * this is called before operator process to buffer some counters.
*/
private void preProcessCounter() {
inputRows++;
@@ -843,7 +887,7 @@
}
/**
- * this is called after operator process to buffer some counters
+ * this is called after operator process to buffer some counters.
*/
private void postProcessCounter() {
if (counterNameToEnum != null) {
@@ -852,7 +896,7 @@
}
/**
- * this is called in operators in map or reduce tasks
+ * this is called in operators in map or reduce tasks.
*
* @param name
* @param amount
@@ -866,7 +910,7 @@
if (pc == null) {
LOG
.warn("Using too many counters. Increase the total number of counters for "
- + counterName);
+ + counterName);
} else if (reporter != null) {
reporter.incrCounter(pc, amount);
}
@@ -897,7 +941,7 @@
}
/**
- * called in ExecDriver.progress periodically
+ * called in ExecDriver.progress periodically.
*
* @param ctrs
* counters from the running job
@@ -947,7 +991,7 @@
if (pc == null) {
LOG
.warn("Using too many counters. Increase the total number of counters for "
- + counterName);
+ + counterName);
} else {
long value = ctrs.getCounter(pc);
fatalErrorMessage(errMsg, value);
@@ -985,7 +1029,7 @@
/**
* Called only in SemanticAnalyzer after all operators have added their own
- * set of counter names
+ * set of counter names.
*/
public void assignCounterNameToEnum() {
if (counterNameToEnum != null) {
@@ -1048,7 +1092,7 @@
/**
* Should be overridden to return the type of the specific operator among the
- * types in OperatorType
+ * types in OperatorType.
*
* @return OperatorType.* or -1 if not overridden
*/
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java Tue Feb 9 07:55:30 2010
@@ -39,51 +39,52 @@
import org.apache.hadoop.hive.ql.plan.UDTFDesc;
import org.apache.hadoop.hive.ql.plan.UnionDesc;
-public class OperatorFactory {
+/**
+ * OperatorFactory.
+ *
+ */
+public final class OperatorFactory {
- public final static class opTuple<T extends Serializable> {
+ /**
+ * OpTuple.
+ *
+ * @param <T>
+ */
+ public static final class OpTuple<T extends Serializable> {
public Class<T> descClass;
public Class<? extends Operator<T>> opClass;
- public opTuple(Class<T> descClass, Class<? extends Operator<T>> opClass) {
+ public OpTuple(Class<T> descClass, Class<? extends Operator<T>> opClass) {
this.descClass = descClass;
this.opClass = opClass;
}
}
- public static ArrayList<opTuple> opvec;
+ public static ArrayList<OpTuple> opvec;
static {
- opvec = new ArrayList<opTuple>();
- opvec.add(new opTuple<FilterDesc>(FilterDesc.class, FilterOperator.class));
- opvec.add(new opTuple<SelectDesc>(SelectDesc.class, SelectOperator.class));
- opvec
- .add(new opTuple<ForwardDesc>(ForwardDesc.class, ForwardOperator.class));
- opvec.add(new opTuple<FileSinkDesc>(FileSinkDesc.class,
- FileSinkOperator.class));
- opvec
- .add(new opTuple<CollectDesc>(CollectDesc.class, CollectOperator.class));
- opvec.add(new opTuple<ScriptDesc>(ScriptDesc.class, ScriptOperator.class));
- opvec.add(new opTuple<ReduceSinkDesc>(ReduceSinkDesc.class,
- ReduceSinkOperator.class));
- opvec
- .add(new opTuple<ExtractDesc>(ExtractDesc.class, ExtractOperator.class));
- opvec
- .add(new opTuple<GroupByDesc>(GroupByDesc.class, GroupByOperator.class));
- opvec.add(new opTuple<JoinDesc>(JoinDesc.class, JoinOperator.class));
- opvec
- .add(new opTuple<MapJoinDesc>(MapJoinDesc.class, MapJoinOperator.class));
- opvec.add(new opTuple<LimitDesc>(LimitDesc.class, LimitOperator.class));
- opvec.add(new opTuple<TableScanDesc>(TableScanDesc.class,
- TableScanOperator.class));
- opvec.add(new opTuple<UnionDesc>(UnionDesc.class, UnionOperator.class));
- opvec.add(new opTuple<UDTFDesc>(UDTFDesc.class, UDTFOperator.class));
- opvec.add(new opTuple<LateralViewJoinDesc>(LateralViewJoinDesc.class,
+ opvec = new ArrayList<OpTuple>();
+ opvec.add(new OpTuple<FilterDesc>(FilterDesc.class, FilterOperator.class));
+ opvec.add(new OpTuple<SelectDesc>(SelectDesc.class, SelectOperator.class));
+ opvec.add(new OpTuple<ForwardDesc>(ForwardDesc.class, ForwardOperator.class));
+ opvec.add(new OpTuple<FileSinkDesc>(FileSinkDesc.class, FileSinkOperator.class));
+ opvec.add(new OpTuple<CollectDesc>(CollectDesc.class, CollectOperator.class));
+ opvec.add(new OpTuple<ScriptDesc>(ScriptDesc.class, ScriptOperator.class));
+ opvec.add(new OpTuple<ReduceSinkDesc>(ReduceSinkDesc.class, ReduceSinkOperator.class));
+ opvec.add(new OpTuple<ExtractDesc>(ExtractDesc.class, ExtractOperator.class));
+ opvec.add(new OpTuple<GroupByDesc>(GroupByDesc.class, GroupByOperator.class));
+ opvec.add(new OpTuple<JoinDesc>(JoinDesc.class, JoinOperator.class));
+ opvec.add(new OpTuple<MapJoinDesc>(MapJoinDesc.class, MapJoinOperator.class));
+ opvec.add(new OpTuple<LimitDesc>(LimitDesc.class, LimitOperator.class));
+ opvec.add(new OpTuple<TableScanDesc>(TableScanDesc.class, TableScanOperator.class));
+ opvec.add(new OpTuple<UnionDesc>(UnionDesc.class, UnionOperator.class));
+ opvec.add(new OpTuple<UDTFDesc>(UDTFDesc.class, UDTFOperator.class));
+ opvec.add(new OpTuple<LateralViewJoinDesc>(LateralViewJoinDesc.class,
LateralViewJoinOperator.class));
}
public static <T extends Serializable> Operator<T> get(Class<T> opClass) {
- for (opTuple o : opvec) {
+ for (OpTuple o : opvec) {
if (o.descClass == opClass) {
try {
Operator<T> op = (Operator<T>) o.opClass.newInstance();
@@ -188,4 +189,7 @@
return (ret);
}
+ private OperatorFactory() {
+ // prevent instantiation
+ }
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RecordReader.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RecordReader.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RecordReader.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RecordReader.java Tue Feb 9 07:55:30 2010
@@ -25,18 +25,21 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Writable;
+/**
+ * RecordReader.
+ *
+ */
public interface RecordReader {
- public void initialize(InputStream in, Configuration conf, Properties tbl)
- throws IOException;
+ void initialize(InputStream in, Configuration conf, Properties tbl) throws IOException;
- public Writable createRow() throws IOException;
+ Writable createRow() throws IOException;
/**
* Returns the number of bytes that we consumed.
* -1 means end of stream.
*/
- public int next(Writable row) throws IOException;
+ int next(Writable row) throws IOException;
- public void close() throws IOException;
+ void close() throws IOException;
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RecordWriter.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RecordWriter.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RecordWriter.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RecordWriter.java Tue Feb 9 07:55:30 2010
@@ -24,12 +24,15 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Writable;
+/**
+ * RecordWriter.
+ *
+ */
public interface RecordWriter {
- public void initialize(OutputStream in, Configuration conf)
- throws IOException;
+ void initialize(OutputStream in, Configuration conf) throws IOException;
- public void write(Writable row) throws IOException;
+ void write(Writable row) throws IOException;
- public void close() throws IOException;
+ void close() throws IOException;
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java Tue Feb 9 07:55:30 2010
@@ -40,7 +40,7 @@
import org.apache.hadoop.io.Writable;
/**
- * Reduce Sink Operator sends output to the reduce stage
+ * Reduce Sink Operator sends output to the reduce stage.
**/
public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
implements Serializable {
@@ -51,18 +51,18 @@
* The evaluators for the key columns. Key columns decide the sort order on
* the reducer side. Key columns are passed to the reducer in the "key".
*/
- transient protected ExprNodeEvaluator[] keyEval;
+ protected transient ExprNodeEvaluator[] keyEval;
/**
* The evaluators for the value columns. Value columns are passed to reducer
* in the "value".
*/
- transient protected ExprNodeEvaluator[] valueEval;
+ protected transient ExprNodeEvaluator[] valueEval;
/**
* The evaluators for the partition columns (CLUSTER BY or DISTRIBUTE BY in
* Hive language). Partition columns decide the reducer that the current row
* goes to. Partition columns are not passed to reducer.
*/
- transient protected ExprNodeEvaluator[] partitionEval;
+ protected transient ExprNodeEvaluator[] partitionEval;
// TODO: we use MetadataTypedColumnsetSerDe for now, till DynamicSerDe is
// ready
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RowSchema.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RowSchema.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RowSchema.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RowSchema.java Tue Feb 9 07:55:30 2010
@@ -22,7 +22,7 @@
import java.util.ArrayList;
/**
- * RowSchema Implementation
+ * RowSchema Implementation.
*/
public class RowSchema implements Serializable {
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java Tue Feb 9 07:55:30 2010
@@ -47,17 +47,25 @@
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.StringUtils;
+/**
+ * ScriptOperator.
+ *
+ */
public class ScriptOperator extends Operator<ScriptDesc> implements
Serializable {
private static final long serialVersionUID = 1L;
+ /**
+ * Counter.
+ *
+ */
public static enum Counter {
DESERIALIZE_ERRORS, SERIALIZE_ERRORS
}
- transient private final LongWritable deserialize_error_count = new LongWritable();
- transient private final LongWritable serialize_error_count = new LongWritable();
+ private final transient LongWritable deserialize_error_count = new LongWritable();
+ private final transient LongWritable serialize_error_count = new LongWritable();
transient Thread outThread = null;
transient Thread errThread = null;
@@ -135,14 +143,14 @@
}
/**
- * Appends the specified component to the path list
+ * Appends the specified component to the path list.
*/
public void prependPathComponent(String str) {
pathenv = str + pathSep + pathenv;
}
/**
- * Returns the full path name of this file if it is listed in the path
+ * Returns the full path name of this file if it is listed in the path.
*/
public File getAbsolutePath(String filename) {
if (pathenv == null || pathSep == null || fileSep == null) {
@@ -287,7 +295,7 @@
outThread = new StreamThread(scriptOutputReader,
new OutputStreamProcessor(scriptOutputDeserializer
- .getObjectInspector()), "OutputProcessor");
+ .getObjectInspector()), "OutputProcessor");
RecordReader scriptErrReader = conf.getOutRecordReaderClass()
.newInstance();
@@ -373,7 +381,6 @@
LOG.error("Script failed with code " + exitVal);
new_abort = true;
}
- ;
} catch (IOException e) {
LOG.error("Got ioexception: " + e.getMessage());
e.printStackTrace();
@@ -446,9 +453,9 @@
}
interface StreamProcessor {
- public void processLine(Writable line) throws HiveException;
+ void processLine(Writable line) throws HiveException;
- public void close() throws HiveException;
+ void close() throws HiveException;
}
class OutputStreamProcessor implements StreamProcessor {
@@ -558,7 +565,7 @@
} catch (Throwable th) {
scriptError = th;
- LOG.warn("Exception in StreamThread.run(): " + th.getMessage() +
+ LOG.warn("Exception in StreamThread.run(): " + th.getMessage() +
"\nCause: " + th.getCause());
LOG.warn(StringUtils.stringifyException(th));
} finally {
@@ -576,8 +583,8 @@
}
/**
- * Wrap the script in a wrapper that allows admins to control
- **/
+ * Wrap the script in a wrapper that allows admins to control.
+ */
protected String[] addWrapper(String[] inArgs) {
String wrapper = HiveConf.getVar(hconf, HiveConf.ConfVars.SCRIPTWRAPPER);
if (wrapper == null) {
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java Tue Feb 9 07:55:30 2010
@@ -29,13 +29,13 @@
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
/**
- * Select operator implementation
- **/
+ * Select operator implementation.
+ */
public class SelectOperator extends Operator<SelectDesc> implements
Serializable {
private static final long serialVersionUID = 1L;
- transient protected ExprNodeEvaluator[] eval;
+ protected transient ExprNodeEvaluator[] eval;
transient Object[] output;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java Tue Feb 9 07:55:30 2010
@@ -72,7 +72,7 @@
*/
public class SkewJoinHandler {
- static final protected Log LOG = LogFactory.getLog(SkewJoinHandler.class
+ protected static final Log LOG = LogFactory.getLog(SkewJoinHandler.class
.getName());
public int currBigKeyTag = -1;
@@ -86,7 +86,7 @@
private Map<Byte, TableDesc> tblDesc = null;
private Map<Byte, Boolean> bigKeysExistingMap = null;
-
+
private LongWritable skewjoinFollowupJobs;
Configuration hconf = null;
@@ -150,12 +150,11 @@
}
StructObjectInspector structTblValInpector = ObjectInspectorFactory
.getStandardStructObjectInspector(valColNames,
- joinOp.joinValuesStandardObjectInspectors.get((byte) i));
+ joinOp.joinValuesStandardObjectInspectors.get((byte) i));
StructObjectInspector structTblInpector = ObjectInspectorFactory
.getUnionStructObjectInspector(Arrays
- .asList(new StructObjectInspector[] { structTblValInpector,
- structTblKeyInpector }));
+ .asList(new StructObjectInspector[] {structTblValInpector, structTblKeyInpector}));
skewKeysTableObjectInspector.put((byte) i, structTblInpector);
}
@@ -305,7 +304,7 @@
}
specPath = conf.getSmallKeysDirMap()
.get(Byte.valueOf((byte) bigKeyTbl)).get(
- Byte.valueOf((byte) smallKeyTbl));
+ Byte.valueOf((byte) smallKeyTbl));
// the file may not exist, and we just ignore this
commitOutputPathToFinalPath(specPath, true);
}
@@ -348,9 +347,9 @@
public void setSkewJoinJobCounter(LongWritable skewjoinFollowupJobs) {
this.skewjoinFollowupJobs = skewjoinFollowupJobs;
}
-
+
public void updateSkewJoinJobCounter(int tag) {
- this.skewjoinFollowupJobs.set(this.skewjoinFollowupJobs.get()+1);
+ this.skewjoinFollowupJobs.set(this.skewjoinFollowupJobs.get() + 1);
}
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java Tue Feb 9 07:55:30 2010
@@ -23,7 +23,6 @@
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
-import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -38,25 +37,25 @@
import org.apache.hadoop.util.StringUtils;
/**
- * Task implementation
+ * Task implementation.
**/
public abstract class Task<T extends Serializable> implements Serializable,
Node {
private static final long serialVersionUID = 1L;
- transient protected boolean started;
- transient protected boolean initialized;
- transient protected boolean isdone;
- transient protected boolean queued;
- transient protected HiveConf conf;
- transient protected Hive db;
- transient protected Log LOG;
- transient protected LogHelper console;
- transient protected QueryPlan queryPlan;
- transient protected TaskHandle taskHandle;
- transient protected HashMap<String, Long> taskCounters;
- transient protected DriverContext driverContext;
+ protected transient boolean started;
+ protected transient boolean initialized;
+ protected transient boolean isdone;
+ protected transient boolean queued;
+ protected transient HiveConf conf;
+ protected transient Hive db;
+ protected transient Log LOG;
+ protected transient LogHelper console;
+ protected transient QueryPlan queryPlan;
+ protected transient TaskHandle taskHandle;
+ protected transient HashMap<String, Long> taskCounters;
+ protected transient DriverContext driverContext;
// Bean methods
protected List<Task<? extends Serializable>> childTasks;
@@ -126,7 +125,7 @@
/**
* Update the progress of the task within taskHandle and also dump the
- * progress information to the history file
+ * progress information to the history file.
*
* @param taskHandle
* task handle returned by execute
@@ -187,7 +186,7 @@
}
/**
- * remove the dependent task
+ * Remove the dependent task.
*
* @param dependent
* the task to remove
@@ -284,7 +283,7 @@
/**
* Should be overridden to return the type of the specific task among the
- * types in TaskType
+ * types in TaskType.
*
* @return TaskTypeType.* or -1 if not overridden
*/
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java Tue Feb 9 07:55:30 2010
@@ -24,20 +24,25 @@
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.plan.ConditionalWork;
-import org.apache.hadoop.hive.ql.plan.DDLWork;
-import org.apache.hadoop.hive.ql.plan.FunctionWork;
import org.apache.hadoop.hive.ql.plan.CopyWork;
+import org.apache.hadoop.hive.ql.plan.DDLWork;
import org.apache.hadoop.hive.ql.plan.ExplainWork;
import org.apache.hadoop.hive.ql.plan.FetchWork;
+import org.apache.hadoop.hive.ql.plan.FunctionWork;
import org.apache.hadoop.hive.ql.plan.MapredWork;
import org.apache.hadoop.hive.ql.plan.MoveWork;
/**
- * TaskFactory implementation
+ * TaskFactory implementation.
**/
-public class TaskFactory {
+public final class TaskFactory {
- public final static class taskTuple<T extends Serializable> {
+ /**
+ * taskTuple.
+ *
+ * @param <T>
+ */
+ public static final class taskTuple<T extends Serializable> {
public Class<T> workClass;
public Class<? extends Task<T>> taskClass;
@@ -161,4 +166,8 @@
return (ret);
}
+ private TaskFactory() {
+ // prevent instantiation
+ }
+
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskHandle.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskHandle.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskHandle.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskHandle.java Tue Feb 9 07:55:30 2010
@@ -4,6 +4,10 @@
import org.apache.hadoop.mapred.Counters;
+/**
+ * TaskHandle.
+ *
+ */
public class TaskHandle {
// The eventual goal is to monitor the progress of all the tasks, not only the
// map reduce task.
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskResult.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskResult.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskResult.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskResult.java Tue Feb 9 07:55:30 2010
@@ -18,9 +18,8 @@
package org.apache.hadoop.hive.ql.exec;
-
/**
- * TaskResult implementation
+ * TaskResult implementation.
**/
public class TaskResult {
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java Tue Feb 9 07:55:30 2010
@@ -23,7 +23,7 @@
import org.apache.hadoop.hive.ql.session.SessionState;
/**
- * TaskRunner implementation
+ * TaskRunner implementation.
**/
public class TaskRunner extends Thread {
@@ -48,7 +48,7 @@
}
/**
- * Launches a task, and sets its exit value in the result variable
+ * Launches a task, and sets its exit value in the result variable.
*/
public void runSequential() {
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TerminalOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TerminalOperator.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TerminalOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TerminalOperator.java Tue Feb 9 07:55:30 2010
@@ -21,7 +21,7 @@
import java.io.Serializable;
/**
- * Terminal Operator Base Class
+ * Terminal Operator Base Class.
**/
public abstract class TerminalOperator<T extends Serializable> extends
Operator<T> implements Serializable {
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordReader.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordReader.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordReader.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordReader.java Tue Feb 9 07:55:30 2010
@@ -27,6 +27,10 @@
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.LineRecordReader.LineReader;
+/**
+ * TextRecordReader.
+ *
+ */
public class TextRecordReader implements RecordReader {
private LineReader lineReader;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordWriter.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordWriter.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordWriter.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordWriter.java Tue Feb 9 07:55:30 2010
@@ -25,6 +25,10 @@
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
+/**
+ * TextRecordWriter.
+ *
+ */
public class TextRecordWriter implements RecordWriter {
private OutputStream out;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Throttle.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Throttle.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Throttle.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Throttle.java Tue Feb 9 07:55:30 2010
@@ -26,31 +26,29 @@
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.JobConf;
-/*
+/**
* Intelligence to make clients wait if the cluster is in a bad state.
*/
-public class Throttle {
+public final class Throttle {
// The percentage of maximum allocated memory that triggers GC
// on job tracker. This could be overridden thru the jobconf.
// The default is such that there is no throttling.
- static private int DEFAULT_MEMORY_GC_PERCENT = 100;
+ private static final int DEFAULT_MEMORY_GC_PERCENT = 100;
// sleep this many seconds between each retry.
// This could be overridden thru the jobconf.
- static private int DEFAULT_RETRY_PERIOD = 60;
+ private static final int DEFAULT_RETRY_PERIOD = 60;
/**
- * fetch http://tracker.om:/gc.jsp?threshold=period
+ * Fetch http://tracker.om:/gc.jsp?threshold=period.
*/
static void checkJobTracker(JobConf conf, Log LOG) {
try {
- byte buffer[] = new byte[1024];
- int threshold = conf.getInt("mapred.throttle.threshold.percent",
- DEFAULT_MEMORY_GC_PERCENT);
- int retry = conf.getInt("mapred.throttle.retry.period",
- DEFAULT_RETRY_PERIOD);
+ byte[] buffer = new byte[1024];
+ int threshold = conf.getInt("mapred.throttle.threshold.percent", DEFAULT_MEMORY_GC_PERCENT);
+ int retry = conf.getInt("mapred.throttle.retry.period", DEFAULT_RETRY_PERIOD);
// If the threshold is 100 percent, then there is no throttling
if (threshold == 100) {
@@ -58,8 +56,7 @@
}
// This is the Job Tracker URL
- String tracker = JobTrackerURLResolver.getURL(conf)
- + "/gc.jsp?threshold=" + threshold;
+ String tracker = JobTrackerURLResolver.getURL(conf) + "/gc.jsp?threshold=" + threshold;
while (true) {
// read in the first 1K characters from the URL
@@ -102,4 +99,8 @@
LOG.warn("Job is not being throttled. " + e);
}
}
+
+ private Throttle() {
+ // prevent instantiation
+ }
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java Tue Feb 9 07:55:30 2010
@@ -71,7 +71,7 @@
}
/**
- * Sets the resolver
+ * Sets the resolver.
*
* @param rslv
* The method resolver to use for method resolution.
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDAFEvaluator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDAFEvaluator.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDAFEvaluator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDAFEvaluator.java Tue Feb 9 07:55:30 2010
@@ -29,5 +29,5 @@
/**
* Initializer. Initializes the state for the evaluator.
*/
- public void init();
+ void init();
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDF.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDF.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDF.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDF.java Tue Feb 9 07:55:30 2010
@@ -42,7 +42,7 @@
private UDFMethodResolver rslv;
/**
- * The constructor
+ * The constructor.
*/
public UDF() {
rslv = new DefaultUDFMethodResolver(this.getClass());
@@ -56,7 +56,7 @@
}
/**
- * Sets the resolver
+ * Sets the resolver.
*
* @param rslv
* The method resolver to use for method resolution.
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDFMethodResolver.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDFMethodResolver.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDFMethodResolver.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDFMethodResolver.java Tue Feb 9 07:55:30 2010
@@ -45,6 +45,5 @@
* The list of the argument types that need to matched with the
* evaluate function signature.
*/
- public Method getEvalMethod(List<TypeInfo> argClasses)
- throws AmbiguousMethodException, UDFArgumentException;
+ Method getEvalMethod(List<TypeInfo> argClasses) throws UDFArgumentException;
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java Tue Feb 9 07:55:30 2010
@@ -34,6 +34,10 @@
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+/**
+ * UDTFOperator.
+ *
+ */
public class UDTFOperator extends Operator<UDTFDesc> implements Serializable {
private static final long serialVersionUID = 1L;
@@ -54,8 +58,8 @@
conf.getGenericUDTF().setCollector(new UDTFCollector(this));
// Make an object inspector [] of the arguments to the UDTF
- List<? extends StructField> inputFields = ((StandardStructObjectInspector) inputObjInspectors[0])
- .getAllStructFieldRefs();
+ List<? extends StructField> inputFields =
+ ((StandardStructObjectInspector) inputObjInspectors[0]).getAllStructFieldRefs();
udtfInputOIs = new ObjectInspector[inputFields.size()];
for (int i = 0; i < inputFields.size(); i++) {
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Tue Feb 9 07:55:30 2010
@@ -72,10 +72,10 @@
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.ErrorMsg;
import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.PlanUtils;
import org.apache.hadoop.hive.ql.plan.GroupByDesc;
import org.apache.hadoop.hive.ql.plan.MapredWork;
import org.apache.hadoop.hive.ql.plan.PartitionDesc;
+import org.apache.hadoop.hive.ql.plan.PlanUtils;
import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.ql.plan.PlanUtils.ExpressionTypes;
import org.apache.hadoop.hive.serde.Constants;
@@ -90,22 +90,34 @@
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.util.ReflectionUtils;
+/**
+ * Utilities.
+ *
+ */
@SuppressWarnings("nls")
-public class Utilities {
+public final class Utilities {
/**
- * The object in the reducer are composed of these top level fields
+ * The object in the reducer are composed of these top level fields.
*/
public static String HADOOP_LOCAL_FS = "file:///";
+ /**
+ * ReduceField.
+ *
+ */
public static enum ReduceField {
KEY, VALUE, ALIAS
};
+ private Utilities() {
+ // prevent instantiation
+ }
+
private static Map<String, MapredWork> gWorkMap = Collections
.synchronizedMap(new HashMap<String, MapredWork>());
- static final private Log LOG = LogFactory.getLog(Utilities.class.getName());
+ private static final Log LOG = LogFactory.getLog(Utilities.class.getName());
public static void clearMapRedWork(Configuration job) {
try {
@@ -176,7 +188,7 @@
@Override
protected Expression instantiate(Object oldInstance, Encoder out) {
return new Expression(Enum.class, "valueOf", new Object[] {
- oldInstance.getClass(), ((Enum<?>) oldInstance).name() });
+ oldInstance.getClass(), ((Enum<?>) oldInstance).name()});
}
@Override
@@ -239,7 +251,7 @@
e.setPersistenceDelegate(GroupByDesc.Mode.class, new EnumDelegate());
e
.setPersistenceDelegate(Operator.ProgressCounter.class,
- new EnumDelegate());
+ new EnumDelegate());
e.writeObject(t);
e.close();
@@ -267,6 +279,12 @@
return (ret);
}
+ /**
+ * Tuple.
+ *
+ * @param <T>
+ * @param <V>
+ */
public static class Tuple<T, V> {
private final T one;
private final V two;
@@ -295,11 +313,11 @@
defaultTd = PlanUtils.getDefaultTableDesc("" + Utilities.ctrlaCode);
}
- public final static int newLineCode = 10;
- public final static int tabCode = 9;
- public final static int ctrlaCode = 1;
+ public static final int newLineCode = 10;
+ public static final int tabCode = 9;
+ public static final int ctrlaCode = 1;
- public final static String INDENT = " ";
+ public static final String INDENT = " ";
// Note: When DDL supports specifying what string to represent null,
// we should specify "NULL" to represent null in the temp table, and then
@@ -346,6 +364,10 @@
return (ret);
}
+ /**
+ * StreamPrinter.
+ *
+ */
public static class StreamPrinter extends Thread {
InputStream is;
String type;
@@ -387,10 +409,10 @@
public static TableDesc getTableDesc(String cols, String colTypes) {
return (new TableDesc(LazySimpleSerDe.class, SequenceFileInputFormat.class,
HiveSequenceFileOutputFormat.class, Utilities.makeProperties(
- org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, ""
- + Utilities.ctrlaCode,
- org.apache.hadoop.hive.serde.Constants.LIST_COLUMNS, cols,
- org.apache.hadoop.hive.serde.Constants.LIST_COLUMN_TYPES, colTypes)));
+ org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, ""
+ + Utilities.ctrlaCode,
+ org.apache.hadoop.hive.serde.Constants.LIST_COLUMNS, cols,
+ org.apache.hadoop.hive.serde.Constants.LIST_COLUMN_TYPES, colTypes)));
}
public static PartitionDesc getPartitionDesc(Partition part)
@@ -492,13 +514,17 @@
return prefix + suffix;
}
- public final static String NSTR = "";
+ public static final String NSTR = "";
- public static enum streamStatus {
+ /**
+ * StreamStatus.
+ *
+ */
+ public static enum StreamStatus {
EOF, TERMINATED
}
- public static streamStatus readColumn(DataInput in, OutputStream out)
+ public static StreamStatus readColumn(DataInput in, OutputStream out)
throws IOException {
while (true) {
@@ -506,11 +532,11 @@
try {
b = in.readByte();
} catch (EOFException e) {
- return streamStatus.EOF;
+ return StreamStatus.EOF;
}
if (b == Utilities.newLineCode) {
- return streamStatus.TERMINATED;
+ return StreamStatus.TERMINATED;
}
out.write(b);
@@ -583,7 +609,7 @@
}
/**
- * Create a sequencefile output stream based on job configuration
+ * Create a sequencefile output stream based on job configuration.
*
* @param jc
* Job configuration
@@ -607,7 +633,7 @@
/**
* Create a sequencefile output stream based on job configuration Uses user
* supplied compression flag (rather than obtaining it from the Job
- * Configuration)
+ * Configuration).
*
* @param jc
* Job configuration
@@ -640,7 +666,7 @@
/**
* Create a RCFile output stream based on job configuration Uses user supplied
- * compression flag (rather than obtaining it from the Job Configuration)
+ * compression flag (rather than obtaining it from the Job Configuration).
*
* @param jc
* Job configuration
@@ -663,7 +689,7 @@
}
/**
- * Shamelessly cloned from GenericOptionsParser
+ * Shamelessly cloned from GenericOptionsParser.
*/
public static String realFile(String newFile, Configuration conf)
throws IOException {
@@ -685,8 +711,6 @@
fs.close();
} catch (IOException e) {
}
- ;
-
String file = path.makeQualified(fs).toString();
// For compatibility with hadoop 0.17, change file:/a/b/c to file:///a/b/c
if (StringUtils.startsWith(file, "file:/")
@@ -725,14 +749,14 @@
}
/**
- * Given a path, convert to a temporary path
+ * Given a path, convert to a temporary path.
*/
public static Path toTempPath(String orig) {
return toTempPath(new Path(orig));
}
/**
- * Detect if the supplied file is a temporary path
+ * Detect if the supplied file is a temporary path.
*/
public static boolean isTempPath(FileStatus file) {
String name = file.getPath().getName();
@@ -754,7 +778,7 @@
* the target directory
* @throws IOException
*/
- static public void rename(FileSystem fs, Path src, Path dst)
+ public static void rename(FileSystem fs, Path src, Path dst)
throws IOException, HiveException {
if (!fs.rename(src, dst)) {
throw new HiveException("Unable to move: " + src + " to: " + dst);
@@ -774,7 +798,7 @@
* the target directory
* @throws IOException
*/
- static public void renameOrMoveFiles(FileSystem fs, Path src, Path dst)
+ public static void renameOrMoveFiles(FileSystem fs, Path src, Path dst)
throws IOException, HiveException {
if (!fs.exists(dst)) {
if (!fs.rename(src, dst)) {
@@ -806,8 +830,7 @@
* extension. The file name looks like: "24931_r_000000_0" or
* "24931_r_000000_0.gz"
*/
- static Pattern fileNameTaskIdRegex = Pattern
- .compile("^.*_([0-9]*)_[0-9](\\..*)?$");
+ private static Pattern fileNameTaskIdRegex = Pattern.compile("^.*_([0-9]*)_[0-9](\\..*)?$");
/**
* Get the task id from the filename. E.g., get "000000" out of
@@ -836,7 +859,7 @@
return;
}
- FileStatus items[] = fs.listStatus(path);
+ FileStatus[] items = fs.listStatus(path);
if (items == null) {
return;
}
@@ -870,7 +893,7 @@
}
/**
- * Add new elements to the classpath
+ * Add new elements to the classpath.
*
* @param newPaths
* Array of classpath elements
@@ -903,7 +926,7 @@
}
/**
- * remove elements from the classpath
+ * remove elements from the classpath.
*
* @param pathsToRemove
* Array of classpath elements
@@ -1007,7 +1030,7 @@
* tracker. Useful for operators that may not output data for a while.
*
* @param hconf
- * @return the interval in miliseconds
+ * @return the interval in milliseconds
*/
public static int getDefaultNotificationInterval(Configuration hconf) {
int notificationInterval;
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/DCLLItem.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/DCLLItem.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/DCLLItem.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/DCLLItem.java Tue Feb 9 07:55:30 2010
@@ -59,7 +59,7 @@
}
/**
- * Set the previous item as itm
+ * Set the previous item as itm.
*
* @param itm
* the item to be set as previous.
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java Tue Feb 9 07:55:30 2010
@@ -48,12 +48,12 @@
private static final int THRESHOLD = 25000;
private int threshold; // threshold to put data into persistent hash table
- // instead
+ // instead
private HashMap<K, MRUItem> mHash; // main memory HashMap
private HTree pHash; // persistent HashMap
private RecordManager recman; // record manager required by HTree
private File tmpFile; // temp file holding the persistent data from record
- // manager.
+ // manager.
private MRU<MRUItem> MRUList; // MRU cache entry
/**
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MRU.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MRU.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MRU.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MRU.java Tue Feb 9 07:55:30 2010
@@ -18,7 +18,6 @@
package org.apache.hadoop.hive.ql.exec.persistence;
-
/**
* An MRU (Most Recently Used) cache implementation. This implementation
* maintains a doubly circular linked list and it can be used with an auxiliary
@@ -79,7 +78,7 @@
}
/**
- * Insert a new item as the head
+ * Insert a new item as the head.
*
* @param v
* the new linked list item to be added to the head.
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectKey.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectKey.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectKey.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectKey.java Tue Feb 9 07:55:30 2010
@@ -32,12 +32,12 @@
import org.apache.hadoop.io.Writable;
/**
- * Map Join Object used for both key
+ * Map Join Object used for both key.
*/
public class MapJoinObjectKey implements Externalizable {
- transient protected int metadataTag;
- transient protected ArrayList<Object> obj;
+ protected transient int metadataTag;
+ protected transient ArrayList<Object> obj;
public MapJoinObjectKey() {
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectValue.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectValue.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectValue.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectValue.java Tue Feb 9 07:55:30 2010
@@ -34,13 +34,13 @@
import org.apache.hadoop.io.Writable;
/**
- * Map Join Object used for both key and value
+ * Map Join Object used for both key and value.
*/
public class MapJoinObjectValue implements Externalizable {
- transient protected int metadataTag;
- transient protected RowContainer obj;
- transient protected Configuration conf;
+ protected transient int metadataTag;
+ protected transient RowContainer obj;
+ protected transient Configuration conf;
public MapJoinObjectValue() {
}
@@ -97,8 +97,8 @@
ArrayList<Object> memObj = (ArrayList<Object>) ObjectInspectorUtils
.copyToStandardObject(ctx.getSerDe().deserialize(val), ctx
- .getSerDe().getObjectInspector(),
- ObjectInspectorCopyOption.WRITABLE);
+ .getSerDe().getObjectInspector(),
+ ObjectInspectorCopyOption.WRITABLE);
res.add(memObj);
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java Tue Feb 9 07:55:30 2010
@@ -85,7 +85,7 @@
// orginal read block
private Row[] firstReadBlockPointer;
private int blockSize; // number of objects in the block before it is spilled
- // to disk
+ // to disk
private int numFlushedBlocks; // total # of blocks
private int size; // total # of elements in the RowContainer
private File tmpFile; // temporary file holding the spilled blocks
@@ -102,7 +102,7 @@
private TableDesc tblDesc;
boolean firstCalled = false; // once called first, it will never be able to
- // write again.
+ // write again.
int acutalSplitNum = 0;
int currentSplitPointer = 0;
org.apache.hadoop.mapred.RecordReader rr = null; // record reader
@@ -201,15 +201,15 @@
} else {
if (inputSplits == null) {
if (this.inputFormat == null) {
- inputFormat = (InputFormat<WritableComparable, Writable>) ReflectionUtils
+ inputFormat = (InputFormat<WritableComparable, Writable>)ReflectionUtils
.newInstance(tblDesc.getInputFileFormatClass(),
- jobCloneUsingLocalFs);
+ jobCloneUsingLocalFs);
}
HiveConf.setVar(jobCloneUsingLocalFs,
HiveConf.ConfVars.HADOOPMAPREDINPUTDIR,
org.apache.hadoop.util.StringUtils.escapeString(parentFile
- .getAbsolutePath()));
+ .getAbsolutePath()));
inputSplits = inputFormat.getSplits(jobCloneUsingLocalFs, 1);
acutalSplitNum = inputSplits.length;
}
@@ -315,7 +315,7 @@
tempOutPath = new Path(tmpFile.toString());
rw = HiveFileFormatUtils.getRecordWriter(this.jobCloneUsingLocalFs,
hiveOutputFormat, serde.getSerializedClass(), false, tblDesc
- .getProperties(), tempOutPath);
+ .getProperties(), tempOutPath);
} else if (rw == null) {
throw new HiveException(
"RowContainer has already been closed for writing.");
@@ -382,7 +382,7 @@
nextSplit = false;
this.currentReadBlock[i++] = (Row) ObjectInspectorUtils
.copyToStandardObject(serde.deserialize(val), serde
- .getObjectInspector(), ObjectInspectorCopyOption.WRITABLE);
+ .getObjectInspector(), ObjectInspectorCopyOption.WRITABLE);
}
}
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java Tue Feb 9 07:55:30 2010
@@ -43,13 +43,17 @@
import org.apache.hadoop.mapred.Counters.Counter;
import org.apache.hadoop.mapred.Counters.Group;
+/**
+ * HiveHistory.
+ *
+ */
public class HiveHistory {
PrintWriter histStream; // History File stream
String histFileName; // History file name
- static final private Log LOG = LogFactory.getLog("hive.ql.exec.HiveHistory");
+ private static final Log LOG = LogFactory.getLog("hive.ql.exec.HiveHistory");
private LogHelper console;
@@ -63,12 +67,40 @@
private static final String DELIMITER = " ";
+ /**
+ * RecordTypes.
+ *
+ */
public static enum RecordTypes {
- QueryStart, QueryEnd, TaskStart, TaskEnd, TaskProgress, SessionStart, SessionEnd, Counters
+ QueryStart,
+ QueryEnd,
+ TaskStart,
+ TaskEnd,
+ TaskProgress,
+ SessionStart,
+ SessionEnd,
+ Counters
};
+ /**
+ * Keys.
+ *
+ */
public static enum Keys {
- SESSION_ID, QUERY_ID, TASK_ID, QUERY_RET_CODE, QUERY_NUM_TASKS, QUERY_STRING, TIME, TASK_RET_CODE, TASK_NAME, TASK_HADOOP_ID, TASK_HADOOP_PROGRESS, TASK_COUNTERS, TASK_NUM_REDUCERS, ROWS_INSERTED
+ SESSION_ID,
+ QUERY_ID,
+ TASK_ID,
+ QUERY_RET_CODE,
+ QUERY_NUM_TASKS,
+ QUERY_STRING,
+ TIME,
+ TASK_RET_CODE,
+ TASK_NAME,
+ TASK_HADOOP_ID,
+ TASK_HADOOP_PROGRESS,
+ TASK_COUNTERS,
+ TASK_NUM_REDUCERS,
+ ROWS_INSERTED
};
private static final String KEY = "(\\w+)";
@@ -78,30 +110,27 @@
private static final Pattern pattern = Pattern.compile(KEY + "=" + "\""
+ VALUE + "\"");
- private static final Pattern rowCountPattern = Pattern
- .compile(ROW_COUNT_PATTERN);
+ private static final Pattern rowCountPattern = Pattern.compile(ROW_COUNT_PATTERN);
// temp buffer for parsed dataa
private static Map<String, String> parseBuffer = new HashMap<String, String>();
/**
- * Listner interface Parser will call handle function for each record type
+ * Listner interface Parser will call handle function for each record type.
*/
public static interface Listener {
- public void handle(RecordTypes recType, Map<String, String> values)
- throws IOException;
+ void handle(RecordTypes recType, Map<String, String> values) throws IOException;
}
/**
- * Parses history file and calls call back functions
+ * Parses history file and calls call back functions.
*
* @param path
* @param l
* @throws IOException
*/
- public static void parseHiveHistory(String path, Listener l)
- throws IOException {
+ public static void parseHiveHistory(String path, Listener l) throws IOException {
FileInputStream fi = new FileInputStream(path);
BufferedReader reader = new BufferedReader(new InputStreamReader(fi));
try {
@@ -151,19 +180,35 @@
parseBuffer.clear();
}
+ /**
+ * Info.
+ *
+ */
public static class Info {
}
+ /**
+ * SessionInfo.
+ *
+ */
public static class SessionInfo extends Info {
public String sessionId;
};
+ /**
+ * QueryInfo.
+ *
+ */
public static class QueryInfo extends Info {
public Map<String, String> hm = new HashMap<String, String>();
public Map<String, Long> rowCountMap = new HashMap<String, Long>();
};
+ /**
+ * TaskInfo.
+ *
+ */
public static class TaskInfo extends Info {
public Map<String, String> hm = new HashMap<String, String>();
@@ -218,7 +263,7 @@
}
/**
- * Write the a history record to history file
+ * Write the a history record to history file.
*
* @param rt
* @param keyValMap
@@ -249,7 +294,7 @@
}
/**
- * Called at the start of job Driver.run()
+ * Called at the start of job Driver.run().
*/
public void startQuery(String cmd, String id) {
SessionState ss = SessionState.get();
@@ -268,7 +313,7 @@
}
/**
- * Used to set job status and other attributes of a job
+ * Used to set job status and other attributes of a job.
*
* @param queryId
* @param propName
@@ -433,7 +478,7 @@
}
/**
- * write out counters
+ * write out counters.
*/
static Map<String, String> ctrmap = null;
@@ -446,7 +491,7 @@
}
/**
- * Set the table to id map
+ * Set the table to id map.
*
* @param map
*/
@@ -455,7 +500,7 @@
}
/**
- * Returns table name for the counter name
+ * Returns table name for the counter name.
*
* @param name
* @return tableName
Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryViewer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryViewer.java?rev=907950&r1=907949&r2=907950&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryViewer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryViewer.java Tue Feb 9 07:55:30 2010
@@ -23,12 +23,16 @@
import java.util.Map;
import org.apache.hadoop.hive.ql.history.HiveHistory.Keys;
+import org.apache.hadoop.hive.ql.history.HiveHistory.Listener;
import org.apache.hadoop.hive.ql.history.HiveHistory.QueryInfo;
import org.apache.hadoop.hive.ql.history.HiveHistory.RecordTypes;
import org.apache.hadoop.hive.ql.history.HiveHistory.TaskInfo;
-public class HiveHistoryViewer implements
- org.apache.hadoop.hive.ql.history.HiveHistory.Listener {
+/**
+ * HiveHistoryViewer.
+ *
+ */
+public class HiveHistoryViewer implements Listener {
String historyFile;
@@ -58,7 +62,7 @@
}
/**
- * parse history files
+ * Parse history files.
*/
void init() {
@@ -72,7 +76,7 @@
}
/**
- * Implementation Listner interface function
+ * Implementation Listner interface function.
*
* @see org.apache.hadoop.hive.ql.history.HiveHistory.Listener#handle(org.apache.hadoop.hive.ql.history.HiveHistory.RecordTypes,
* java.util.Map)