You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by zs...@apache.org on 2010/01/21 11:38:15 UTC

svn commit: r901644 [12/37] - in /hadoop/hive/trunk: ./ ql/src/java/org/apache/hadoop/hive/ql/ ql/src/java/org/apache/hadoop/hive/ql/exec/ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/ ql/src/java/org/apache/hadoop/hive/ql/history/ ql/src/jav...

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java Thu Jan 21 10:37:58 2010
@@ -31,12 +31,14 @@
   /**
    * starting point for walking.
    * 
-   * @param startNodes list of starting operators
-   * @param nodeOutput If this parameter is not null, the call to the function returns the 
-   * map from node to objects returned by the processors.
+   * @param startNodes
+   *          list of starting operators
+   * @param nodeOutput
+   *          If this parameter is not null, the call to the function returns
+   *          the map from node to objects returned by the processors.
    * @throws SemanticException
    */
-  public void startWalking(Collection<Node> startNodes, HashMap<Node, Object> nodeOutput)
-      throws SemanticException;
+  public void startWalking(Collection<Node> startNodes,
+      HashMap<Node, Object> nodeOutput) throws SemanticException;
 
 }
\ No newline at end of file

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java Thu Jan 21 10:37:58 2010
@@ -25,14 +25,15 @@
  * These are implemented by the node of the graph that needs to be walked.
  */
 public interface Node {
-  
+
   /**
-   * Gets the vector of children nodes. This is used in the graph walker algorithms.
+   * Gets the vector of children nodes. This is used in the graph walker
+   * algorithms.
    * 
    * @return Vector<Node>
    */
   public List<? extends Node> getChildren();
-  
+
   /**
    * Gets the name of the node. This is used in the rule dispatchers.
    * 

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java Thu Jan 21 10:37:58 2010
@@ -17,25 +17,28 @@
  */
 package org.apache.hadoop.hive.ql.lib;
 
-
 import java.util.Stack;
 
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 
 /**
- * Base class for processing operators which is no-op. The specific processors can register their own context with
- * the dispatcher.
+ * Base class for processing operators which is no-op. The specific processors
+ * can register their own context with the dispatcher.
  */
 public interface NodeProcessor {
-  
+
   /**
    * generic process for all ops that don't have specific implementations
-   * @param nd operator to process
-   * @param procCtx operator processor context
-   * @param nodeOutputs A variable argument list of outputs from other nodes in the walk
+   * 
+   * @param nd
+   *          operator to process
+   * @param procCtx
+   *          operator processor context
+   * @param nodeOutputs
+   *          A variable argument list of outputs from other nodes in the walk
    * @return Object to be returned by the process call
    * @throws SemanticException
    */
-  public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) 
-    throws SemanticException;
+  public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
+      Object... nodeOutputs) throws SemanticException;
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderWalker.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderWalker.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderWalker.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderWalker.java Thu Jan 21 10:37:58 2010
@@ -18,31 +18,24 @@
 
 package org.apache.hadoop.hive.ql.lib;
 
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.Stack;
-import java.lang.Object;
-
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 
 /**
- * base class for operator graph walker
- * this class takes list of starting ops and walks them one by one.
+ * base class for operator graph walker this class takes list of starting ops
+ * and walks them one by one.
  */
 public class PreOrderWalker extends DefaultGraphWalker {
 
-  /* 
-   * Since the operator tree is a DAG, nodes with mutliple parents will be visited more than once.
-   * This can be made configurable.
+  /*
+   * Since the operator tree is a DAG, nodes with mutliple parents will be
+   * visited more than once. This can be made configurable.
    */
 
   /**
    * Constructor
-   * @param disp dispatcher to call for each op encountered
+   * 
+   * @param disp
+   *          dispatcher to call for each op encountered
    */
   public PreOrderWalker(Dispatcher disp) {
     super(disp);
@@ -50,17 +43,22 @@
 
   /**
    * walk the current operator and its descendants
-   * @param nd current operator in the graph
+   * 
+   * @param nd
+   *          current operator in the graph
    * @throws SemanticException
    */
+  @Override
   public void walk(Node nd) throws SemanticException {
     opStack.push(nd);
     dispatch(nd, opStack);
-    
+
     // move all the children to the front of queue
-    if (nd.getChildren() != null) 
-      for (Node n : nd.getChildren())
+    if (nd.getChildren() != null) {
+      for (Node n : nd.getChildren()) {
         walk(n);
+      }
+    }
 
     opStack.pop();
   }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Rule.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Rule.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Rule.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/Rule.java Thu Jan 21 10:37:58 2010
@@ -19,16 +19,18 @@
 package org.apache.hadoop.hive.ql.lib;
 
 import java.util.Stack;
+
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 
 /**
- * Rule interface for Operators
- * Used in operator dispatching to dispatch process/visitor functions for operators
+ * Rule interface for Operators Used in operator dispatching to dispatch
+ * process/visitor functions for operators
  */
 public interface Rule {
 
   /**
-   * @return the cost of the rule - the lower the cost, the better the rule matches
+   * @return the cost of the rule - the lower the cost, the better the rule
+   *         matches
    * @throws SemanticException
    */
   public int cost(Stack<Node> stack) throws SemanticException;

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java Thu Jan 21 10:37:58 2010
@@ -25,28 +25,36 @@
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 
 /**
- * Rule interface for Nodes
- * Used in Node dispatching to dispatch process/visitor functions for Nodes
+ * Rule interface for Nodes Used in Node dispatching to dispatch process/visitor
+ * functions for Nodes
  */
 public class RuleRegExp implements Rule {
-  
-  private String    ruleName;
-  private Pattern   pattern;
+
+  private final String ruleName;
+  private final Pattern pattern;
 
   /**
-   * The rule specified by the regular expression. Note that, the regular expression is specified in terms of Node
-   * name. For eg: TS.*RS -> means TableScan Node followed by anything any number of times followed by ReduceSink
-   * @param ruleName name of the rule
-   * @param regExp regular expression for the rule
+   * The rule specified by the regular expression. Note that, the regular
+   * expression is specified in terms of Node name. For eg: TS.*RS -> means
+   * TableScan Node followed by anything any number of times followed by
+   * ReduceSink
+   * 
+   * @param ruleName
+   *          name of the rule
+   * @param regExp
+   *          regular expression for the rule
    **/
   public RuleRegExp(String ruleName, String regExp) {
     this.ruleName = ruleName;
-    pattern       = Pattern.compile(regExp);
+    pattern = Pattern.compile(regExp);
   }
 
   /**
-   * This function returns the cost of the rule for the specified stack. Lower the cost, the better the rule is matched
-   * @param stack Node stack encountered so far
+   * This function returns the cost of the rule for the specified stack. Lower
+   * the cost, the better the rule is matched
+   * 
+   * @param stack
+   *          Node stack encountered so far
    * @return cost of the function
    * @throws SemanticException
    */
@@ -56,8 +64,9 @@
     for (int pos = numElems - 1; pos >= 0; pos--) {
       name = stack.get(pos).getName() + "%" + name;
       Matcher m = pattern.matcher(name);
-      if (m.matches()) 
+      if (m.matches()) {
         return m.group().length();
+      }
     }
 
     return -1;

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/CheckResult.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/CheckResult.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/CheckResult.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/CheckResult.java Thu Jan 21 10:37:58 2010
@@ -12,107 +12,114 @@
   private List<String> tablesNotInMs = new ArrayList<String>();
   private List<PartitionResult> partitionsNotOnFs = new ArrayList<PartitionResult>();
   private List<PartitionResult> partitionsNotInMs = new ArrayList<PartitionResult>();
-  
+
   /**
    * @return a list of tables not found on the filesystem.
    */
   public List<String> getTablesNotOnFs() {
     return tablesNotOnFs;
   }
-  
+
   /**
-   * @param tablesNotOnFs a list of tables not found on the filesystem.
+   * @param tablesNotOnFs
+   *          a list of tables not found on the filesystem.
    */
   public void setTablesNotOnFs(List<String> tablesNotOnFs) {
     this.tablesNotOnFs = tablesNotOnFs;
   }
-  
+
   /**
    * @return a list of tables not found in the metastore.
    */
   public List<String> getTablesNotInMs() {
     return tablesNotInMs;
   }
-  
+
   /**
-   * @param tablesNotInMs a list of tables not found in the metastore.
+   * @param tablesNotInMs
+   *          a list of tables not found in the metastore.
    */
   public void setTablesNotInMs(List<String> tablesNotInMs) {
     this.tablesNotInMs = tablesNotInMs;
   }
-  
+
   /**
    * @return a list of partitions not found on the fs
    */
   public List<PartitionResult> getPartitionsNotOnFs() {
     return partitionsNotOnFs;
   }
-  
+
   /**
-   * @param partitionsNotOnFs a list of partitions not found on the fs
+   * @param partitionsNotOnFs
+   *          a list of partitions not found on the fs
    */
   public void setPartitionsNotOnFs(List<PartitionResult> partitionsNotOnFs) {
     this.partitionsNotOnFs = partitionsNotOnFs;
   }
-  
+
   /**
    * @return a list of partitions not found in the metastore
    */
   public List<PartitionResult> getPartitionsNotInMs() {
     return partitionsNotInMs;
   }
-  
+
   /**
-   * @param partitionsNotInMs a list of partitions not found in the metastore
+   * @param partitionsNotInMs
+   *          a list of partitions not found in the metastore
    */
   public void setPartitionsNotInMs(List<PartitionResult> partitionsNotInMs) {
     this.partitionsNotInMs = partitionsNotInMs;
-  } 
-  
+  }
+
   /**
-   * A basic description of a partition that is 
-   * missing from either the fs or the ms.
+   * A basic description of a partition that is missing from either the fs or
+   * the ms.
    */
   public static class PartitionResult implements Comparable<PartitionResult> {
     private String partitionName;
     private String tableName;
-    
+
     /**
      * @return name of partition
      */
     public String getPartitionName() {
       return partitionName;
     }
-    
+
     /**
-     * @param partitionName name of partition
+     * @param partitionName
+     *          name of partition
      */
     public void setPartitionName(String partitionName) {
       this.partitionName = partitionName;
     }
-    
+
     /**
      * @return table name
      */
     public String getTableName() {
       return tableName;
     }
-    
+
     /**
-     * @param tableName table name
+     * @param tableName
+     *          table name
      */
     public void setTableName(String tableName) {
       this.tableName = tableName;
     }
-    
+
+    @Override
     public String toString() {
       return tableName + ":" + partitionName;
     }
-    
+
     public int compareTo(PartitionResult o) {
       int ret = tableName.compareTo(o.tableName);
       return ret != 0 ? ret : partitionName.compareTo(o.partitionName);
     }
   }
-  
+
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Dimension.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Dimension.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Dimension.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Dimension.java Thu Jan 21 10:37:58 2010
@@ -19,48 +19,61 @@
 package org.apache.hadoop.hive.ql.metadata;
 
 /**
- * Hive consists of a fixed, well defined set of Dimensions.
- * Each dimension has a type and id. Dimensions link columns in different tables
- *
+ * Hive consists of a fixed, well defined set of Dimensions. Each dimension has
+ * a type and id. Dimensions link columns in different tables
+ * 
  */
 public class Dimension {
 
-    protected Class<?> dimensionType;
-    protected String dimensionId;
+  protected Class<?> dimensionType;
+  protected String dimensionId;
 
-    public Dimension (Class<?> t, String id) {
-        this.dimensionType = t;
-        this.dimensionId = id;
+  public Dimension(Class<?> t, String id) {
+    dimensionType = t;
+    dimensionId = id;
+  }
+
+  public Class<?> getDimensionType() {
+    return dimensionType;
+  }
+
+  public String getDimensionId() {
+    return dimensionId;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (super.equals(o)) {
+      return true;
     }
-
-    public Class<?> getDimensionType() { return this.dimensionType; }
-    public String getDimensionId() { return this.dimensionId; }
-
-    @Override
-    public boolean equals(Object o) {
-      if (super.equals(o))
-        return true;
-      if (o == null)
-        return false;
-      if(o instanceof Dimension) {
-        Dimension d = (Dimension) o;
-        return (this.dimensionId.equals(d.dimensionId) && (this.dimensionType == d.dimensionType));
-      }
+    if (o == null) {
       return false;
     }
-
-    @Override
-    @SuppressWarnings("nls")
-    public String toString() { return "Type="+this.dimensionType.getName()+","+"Id="+this.dimensionId; }
-
-    @Override
-    public int hashCode() {
-      final int prime = 31;
-      int result = 1;
-      result = prime * result + ((this.dimensionId == null) ? 0 : this.dimensionId.hashCode());
-      result = prime * result + ((this.dimensionType == null) ? 0 : this.dimensionType.hashCode());
-      return result;
+    if (o instanceof Dimension) {
+      Dimension d = (Dimension) o;
+      return (dimensionId.equals(d.dimensionId) && (dimensionType == d.dimensionType));
     }
+    return false;
+  }
 
-    public int hashCode(Object o) { return this.dimensionType.hashCode() ^ this.dimensionId.hashCode(); }
+  @Override
+  @SuppressWarnings("nls")
+  public String toString() {
+    return "Type=" + dimensionType.getName() + "," + "Id=" + dimensionId;
+  }
+
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result
+        + ((dimensionId == null) ? 0 : dimensionId.hashCode());
+    result = prime * result
+        + ((dimensionType == null) ? 0 : dimensionType.hashCode());
+    return result;
+  }
+
+  public int hashCode(Object o) {
+    return dimensionType.hashCode() ^ dimensionId.hashCode();
+  }
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Thu Jan 21 10:37:58 2010
@@ -31,8 +31,8 @@
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.common.JavaUtils;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaException;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
@@ -44,7 +44,6 @@
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
 import org.apache.hadoop.hive.serde2.Deserializer;
 import org.apache.hadoop.hive.serde2.SerDeException;
@@ -53,14 +52,13 @@
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.util.StringUtils;
-
 import org.apache.thrift.TException;
 
 /**
- * The Hive class contains information about this instance of Hive.
- * An instance of Hive represents a set of data in a file system (usually HDFS)
- * organized for easy query processing
- *
+ * The Hive class contains information about this instance of Hive. An instance
+ * of Hive represents a set of data in a file system (usually HDFS) organized
+ * for easy query processing
+ * 
  */
 
 public class Hive {
@@ -71,34 +69,39 @@
   private IMetaStoreClient metaStoreClient;
 
   private static ThreadLocal<Hive> hiveDB = new ThreadLocal() {
+    @Override
     protected synchronized Object initialValue() {
-        return null;
+      return null;
     }
 
+    @Override
     public synchronized void remove() {
-      if( this.get() != null ) {
-        ((Hive)this.get()).close();
+      if (this.get() != null) {
+        ((Hive) this.get()).close();
       }
       super.remove();
     }
   };
 
   /**
-   * Gets hive object for the current thread. If one is not initialized then a new one is created
-   * If the new configuration is different in metadata conf vars then a new one is created.
-   * @param c new Hive Configuration
+   * Gets hive object for the current thread. If one is not initialized then a
+   * new one is created If the new configuration is different in metadata conf
+   * vars then a new one is created.
+   * 
+   * @param c
+   *          new Hive Configuration
    * @return Hive object for current thread
    * @throws HiveException
-   *
+   * 
    */
   public static Hive get(HiveConf c) throws HiveException {
     boolean needsRefresh = false;
     Hive db = hiveDB.get();
-    if(db != null) {
-      for(HiveConf.ConfVars oneVar: HiveConf.metaVars) {
+    if (db != null) {
+      for (HiveConf.ConfVars oneVar : HiveConf.metaVars) {
         String oldVar = db.getConf().getVar(oneVar);
         String newVar = c.getVar(oneVar);
-        if(oldVar.compareToIgnoreCase(newVar) != 0) {
+        if (oldVar.compareToIgnoreCase(newVar) != 0) {
           needsRefresh = true;
           break;
         }
@@ -109,16 +112,19 @@
 
   /**
    * get a connection to metastore. see get(HiveConf) function for comments
-   * @param c new conf
-   * @param needsRefresh if true then creates a new one
+   * 
+   * @param c
+   *          new conf
+   * @param needsRefresh
+   *          if true then creates a new one
    * @return The connection to the metastore
    * @throws HiveException
    */
   public static Hive get(HiveConf c, boolean needsRefresh) throws HiveException {
     Hive db = hiveDB.get();
-    if(db == null || needsRefresh) {
+    if (db == null || needsRefresh) {
       closeCurrent();
-      c.set("fs.scheme.class","dfs");
+      c.set("fs.scheme.class", "dfs");
       db = new Hive(c);
       hiveDB.set(db);
     }
@@ -127,7 +133,7 @@
 
   public static Hive get() throws HiveException {
     Hive db = hiveDB.get();
-    if(db == null) {
+    if (db == null) {
       db = new Hive(new HiveConf(Hive.class));
       hiveDB.set(db);
     }
@@ -140,13 +146,13 @@
 
   /**
    * Hive
-   *
+   * 
    * @param argFsRoot
    * @param c
-   *
+   * 
    */
-  private Hive(HiveConf c) throws  HiveException {
-    this.conf = c;
+  private Hive(HiveConf c) throws HiveException {
+    conf = c;
   }
 
   /**
@@ -159,31 +165,53 @@
 
   /**
    * Creates a table metdata and the directory for the table data
-   * @param tableName name of the table
-   * @param columns list of fields of the table
-   * @param partCols partition keys of the table
-   * @param fileInputFormat Class of the input format of the table data file
-   * @param fileOutputFormat Class of the output format of the table data file
-   * @throws HiveException thrown if the args are invalid or if the metadata or the data directory couldn't be created
-   */
-  public void createTable(String tableName, List<String> columns, List<String> partCols,
-      Class<? extends InputFormat> fileInputFormat, Class<?> fileOutputFormat) throws HiveException {
-    this.createTable(tableName, columns, partCols, fileInputFormat, fileOutputFormat, -1, null);
+   * 
+   * @param tableName
+   *          name of the table
+   * @param columns
+   *          list of fields of the table
+   * @param partCols
+   *          partition keys of the table
+   * @param fileInputFormat
+   *          Class of the input format of the table data file
+   * @param fileOutputFormat
+   *          Class of the output format of the table data file
+   * @throws HiveException
+   *           thrown if the args are invalid or if the metadata or the data
+   *           directory couldn't be created
+   */
+  public void createTable(String tableName, List<String> columns,
+      List<String> partCols, Class<? extends InputFormat> fileInputFormat,
+      Class<?> fileOutputFormat) throws HiveException {
+    this.createTable(tableName, columns, partCols, fileInputFormat,
+        fileOutputFormat, -1, null);
   }
 
   /**
    * Creates a table metdata and the directory for the table data
-   * @param tableName name of the table
-   * @param columns list of fields of the table
-   * @param partCols partition keys of the table
-   * @param fileInputFormat Class of the input format of the table data file
-   * @param fileOutputFormat Class of the output format of the table data file
-   * @param bucketCount number of buckets that each partition (or the table itself) should be divided into
-   * @throws HiveException thrown if the args are invalid or if the metadata or the data directory couldn't be created
-   */
-  public void createTable(String tableName, List<String> columns, List<String> partCols,
-      Class<? extends InputFormat> fileInputFormat, Class<?> fileOutputFormat, int bucketCount, List<String> bucketCols) throws HiveException {
-    if(columns == null) {
+   * 
+   * @param tableName
+   *          name of the table
+   * @param columns
+   *          list of fields of the table
+   * @param partCols
+   *          partition keys of the table
+   * @param fileInputFormat
+   *          Class of the input format of the table data file
+   * @param fileOutputFormat
+   *          Class of the output format of the table data file
+   * @param bucketCount
+   *          number of buckets that each partition (or the table itself) should
+   *          be divided into
+   * @throws HiveException
+   *           thrown if the args are invalid or if the metadata or the data
+   *           directory couldn't be created
+   */
+  public void createTable(String tableName, List<String> columns,
+      List<String> partCols, Class<? extends InputFormat> fileInputFormat,
+      Class<?> fileOutputFormat, int bucketCount, List<String> bucketCols)
+      throws HiveException {
+    if (columns == null) {
       throw new HiveException("columns not specified for table " + tableName);
     }
 
@@ -191,16 +219,19 @@
     tbl.setInputFormatClass(fileInputFormat.getName());
     tbl.setOutputFormatClass(fileOutputFormat.getName());
 
-    for (String col: columns) {
-      FieldSchema field = new FieldSchema(col, org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME, "default");
+    for (String col : columns) {
+      FieldSchema field = new FieldSchema(col,
+          org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME, "default");
       tbl.getCols().add(field);
     }
 
-    if(partCols != null) {
+    if (partCols != null) {
       for (String partCol : partCols) {
         FieldSchema part = new FieldSchema();
         part.setName(partCol);
-        part.setType(org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME); // default partition key
+        part.setType(org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME); // default
+                                                                               // partition
+                                                                               // key
         tbl.getPartCols().add(part);
       }
     }
@@ -210,19 +241,22 @@
     createTable(tbl);
   }
 
-
   /**
    * Updates the existing table metadata with the new metadata.
-   * @param tblName name of the existing table
-   * @param newTbl new name of the table. could be the old name
-   * @throws InvalidOperationException if the changes in metadata is not acceptable
+   * 
+   * @param tblName
+   *          name of the existing table
+   * @param newTbl
+   *          new name of the table. could be the old name
+   * @throws InvalidOperationException
+   *           if the changes in metadata is not acceptable
    * @throws TException
    */
-  public void alterTable(String tblName,
-      Table newTbl) throws InvalidOperationException,
-      HiveException {
+  public void alterTable(String tblName, Table newTbl)
+      throws InvalidOperationException, HiveException {
     try {
-      getMSC().alter_table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, newTbl.getTTable());
+      getMSC().alter_table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName,
+          newTbl.getTTable());
     } catch (MetaException e) {
       throw new HiveException("Unable to alter table.", e);
     } catch (TException e) {
@@ -232,16 +266,20 @@
 
   /**
    * Updates the existing table metadata with the new metadata.
-   * @param tblName name of the existing table
-   * @param newTbl new name of the table. could be the old name
-   * @throws InvalidOperationException if the changes in metadata is not acceptable
+   * 
+   * @param tblName
+   *          name of the existing table
+   * @param newTbl
+   *          new name of the table. could be the old name
+   * @throws InvalidOperationException
+   *           if the changes in metadata is not acceptable
    * @throws TException
    */
   public void alterPartition(String tblName, Partition newPart)
-    throws InvalidOperationException, HiveException {
+      throws InvalidOperationException, HiveException {
     try {
       getMSC().alter_partition(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName,
-                               newPart.getTPartition());
+          newPart.getTPartition());
 
     } catch (MetaException e) {
       throw new HiveException("Unable to alter partition.", e);
@@ -252,7 +290,9 @@
 
   /**
    * Creates the table with the give objects
-   * @param tbl a table object
+   * 
+   * @param tbl
+   *          a table object
    * @throws HiveException
    */
   public void createTable(Table tbl) throws HiveException {
@@ -261,15 +301,19 @@
 
   /**
    * Creates the table with the give objects
-   * @param tbl a table object
-   * @param ifNotExists if true, ignore AlreadyExistsException
+   * 
+   * @param tbl
+   *          a table object
+   * @param ifNotExists
+   *          if true, ignore AlreadyExistsException
    * @throws HiveException
    */
   public void createTable(Table tbl, boolean ifNotExists) throws HiveException {
     try {
       tbl.initSerDe();
-      if(tbl.getCols().size() == 0) {
-        tbl.setFields(MetaStoreUtils.getFieldsFromDeserializer(tbl.getName(), tbl.getDeserializer()));
+      if (tbl.getCols().size() == 0) {
+        tbl.setFields(MetaStoreUtils.getFieldsFromDeserializer(tbl.getName(),
+            tbl.getDeserializer()));
       }
       tbl.checkValidity();
       getMSC().createTable(tbl.getTTable());
@@ -283,11 +327,15 @@
   }
 
   /**
-   * Drops table along with the data in it. If the table doesn't exist
-   * then it is a no-op
-   * @param dbName database where the table lives
-   * @param tableName table to drop
-   * @throws HiveException thrown if the drop fails
+   * Drops table along with the data in it. If the table doesn't exist then it
+   * is a no-op
+   * 
+   * @param dbName
+   *          database where the table lives
+   * @param tableName
+   *          table to drop
+   * @throws HiveException
+   *           thrown if the drop fails
    */
   public void dropTable(String dbName, String tableName) throws HiveException {
     dropTable(dbName, tableName, true, true);
@@ -295,10 +343,12 @@
 
   /**
    * Drops the table.
+   * 
    * @param tableName
-   * @param deleteData deletes the underlying data along with metadata
-   * @param ignoreUnknownTab an exception if thrown if this is falser and
-   * table doesn't exist
+   * @param deleteData
+   *          deletes the underlying data along with metadata
+   * @param ignoreUnknownTab
+   *          an exception if thrown if this is falser and table doesn't exist
    * @throws HiveException
    */
   public void dropTable(String dbName, String tableName, boolean deleteData,
@@ -321,31 +371,37 @@
 
   /**
    * Returns metadata of the table.
-   * @param dbName the name of the database
-   * @param tableName the name of the table
+   * 
+   * @param dbName
+   *          the name of the database
+   * @param tableName
+   *          the name of the table
    * @return the table
-   * @exception HiveException if there's an internal error or if the
-   * table doesn't exist
+   * @exception HiveException
+   *              if there's an internal error or if the table doesn't exist
    */
   public Table getTable(final String dbName, final String tableName)
-    throws HiveException {
+      throws HiveException {
 
     return this.getTable(dbName, tableName, true);
   }
 
   /**
    * Returns metadata of the table
-   * @param dbName the name of the database
-   * @param tableName the name of the table
-   * @param throwException controls whether an exception is thrown
-   * or a returns a null
+   * 
+   * @param dbName
+   *          the name of the database
+   * @param tableName
+   *          the name of the table
+   * @param throwException
+   *          controls whether an exception is thrown or a returns a null
    * @return the table or if throwException is false a null value.
    * @throws HiveException
    */
   public Table getTable(final String dbName, final String tableName,
       boolean throwException) throws HiveException {
 
-    if(tableName == null || tableName.equals("")) {
+    if (tableName == null || tableName.equals("")) {
       throw new HiveException("empty table creation??");
     }
     Table table = new Table();
@@ -353,7 +409,7 @@
     try {
       tTable = getMSC().getTable(dbName, tableName);
     } catch (NoSuchObjectException e) {
-      if(throwException) {
+      if (throwException) {
         LOG.error(StringUtils.stringifyException(e));
         throw new InvalidTableException("Table not found ", tableName);
       }
@@ -362,17 +418,20 @@
       throw new HiveException("Unable to fetch table " + tableName, e);
     }
     // just a sanity check
-    assert(tTable != null);
+    assert (tTable != null);
     try {
 
       // Use LazySimpleSerDe for MetadataTypedColumnsetSerDe.
-      // NOTE: LazySimpleSerDe does not support tables with a single column of col
-      // of type "array<string>".  This happens when the table is created using an
+      // NOTE: LazySimpleSerDe does not support tables with a single column of
+      // col
+      // of type "array<string>". This happens when the table is created using
+      // an
       // earlier version of Hive.
       if (tTable.getSd().getSerdeInfo().getSerializationLib().equals(
-          org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe.class.getName())
+          org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe.class
+              .getName())
           && tTable.getSd().getColsSize() > 0
-          && tTable.getSd().getCols().get(0).getType().indexOf('<') == -1 ) {
+          && tTable.getSd().getCols().get(0).getType().indexOf('<') == -1) {
         tTable.getSd().getSerdeInfo().setSerializationLib(
             org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
       }
@@ -388,25 +447,33 @@
         return table;
       }
 
-      table.setInputFormatClass((Class<? extends InputFormat<WritableComparable, Writable>>)
-          Class.forName(table.getSchema().getProperty(org.apache.hadoop.hive.metastore.api.Constants.FILE_INPUT_FORMAT,
-                                                      org.apache.hadoop.mapred.SequenceFileInputFormat.class.getName()),
-                        true, JavaUtils.getClassLoader()));
-      table.setOutputFormatClass((Class<? extends HiveOutputFormat>)
-          Class.forName(table.getSchema().getProperty(org.apache.hadoop.hive.metastore.api.Constants.FILE_OUTPUT_FORMAT,
-                                                      HiveSequenceFileOutputFormat.class.getName()),
-                        true, JavaUtils.getClassLoader()));
+      table
+          .setInputFormatClass((Class<? extends InputFormat<WritableComparable, Writable>>) Class
+              .forName(
+                  table
+                      .getSchema()
+                      .getProperty(
+                          org.apache.hadoop.hive.metastore.api.Constants.FILE_INPUT_FORMAT,
+                          org.apache.hadoop.mapred.SequenceFileInputFormat.class
+                              .getName()), true, JavaUtils.getClassLoader()));
+      table.setOutputFormatClass(Class.forName(table.getSchema().getProperty(
+          org.apache.hadoop.hive.metastore.api.Constants.FILE_OUTPUT_FORMAT,
+          HiveSequenceFileOutputFormat.class.getName()), true, JavaUtils
+          .getClassLoader()));
       table.setDeserializer(MetaStoreUtils.getDeserializer(getConf(), p));
       table.setDataLocation(new URI(tTable.getSd().getLocation()));
-    } catch(Exception e) {
+    } catch (Exception e) {
       LOG.error(StringUtils.stringifyException(e));
       throw new HiveException(e);
     }
-    String sf = table.getSerdeParam(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT);
-    if(sf != null) {
+    String sf = table
+        .getSerdeParam(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT);
+    if (sf != null) {
       char[] b = sf.toCharArray();
-      if ((b.length == 1) && (b[0] < 10)){ // ^A, ^B, ^C, ^D, \t
-        table.setSerdeParam(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, Integer.toString(b[0]));
+      if ((b.length == 1) && (b[0] < 10)) { // ^A, ^B, ^C, ^D, \t
+        table.setSerdeParam(
+            org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT,
+            Integer.toString(b[0]));
       }
     }
     table.checkValidity();
@@ -420,20 +487,21 @@
   /**
    * returns all existing tables from default database which match the given
    * pattern. The matching occurs as per Java regular expressions
-   *
+   * 
    * @param tablePattern
    *          java re pattern
    * @return list of table names
    * @throws HiveException
    */
-  public List<String> getTablesByPattern(String tablePattern) throws HiveException {
+  public List<String> getTablesByPattern(String tablePattern)
+      throws HiveException {
     return getTablesForDb(MetaStoreUtils.DEFAULT_DATABASE_NAME, tablePattern);
   }
 
   /**
    * returns all existing tables from the given database which match the given
    * pattern. The matching occurs as per Java regular expressions
-   *
+   * 
    * @param database
    *          the database name
    * @param tablePattern
@@ -441,10 +509,11 @@
    * @return list of table names
    * @throws HiveException
    */
-  public List<String> getTablesForDb(String database, String tablePattern) throws HiveException {
+  public List<String> getTablesForDb(String database, String tablePattern)
+      throws HiveException {
     try {
       return getMSC().getTables(database, tablePattern);
-    } catch(Exception e) {
+    } catch (Exception e) {
       throw new HiveException(e);
     }
   }
@@ -456,10 +525,11 @@
    * @throws AlreadyExistsException
    * @throws MetaException
    * @throws TException
-   * @see org.apache.hadoop.hive.metastore.HiveMetaStoreClient#createDatabase(java.lang.String, java.lang.String)
+   * @see org.apache.hadoop.hive.metastore.HiveMetaStoreClient#createDatabase(java.lang.String,
+   *      java.lang.String)
    */
-  protected boolean createDatabase(String name, String locationUri) throws AlreadyExistsException,
-      MetaException, TException {
+  protected boolean createDatabase(String name, String locationUri)
+      throws AlreadyExistsException, MetaException, TException {
     return getMSC().createDatabase(name, locationUri);
   }
 
@@ -475,39 +545,47 @@
   }
 
   /**
-   * Load a directory into a Hive Table Partition
-   * - Alters existing content of the partition with the contents of loadPath.
-   * - If he partition does not exist - one is created
-   * - files in loadPath are moved into Hive. But the directory itself is not removed.
-   *
-   * @param loadPath Directory containing files to load into Table
-   * @param tableName name of table to be loaded.
-   * @param partSpec defines which partition needs to be loaded
-   * @param replace if true - replace files in the partition, otherwise add files to the partition
-   * @param tmpDirPath The temporary directory.
+   * Load a directory into a Hive Table Partition - Alters existing content of
+   * the partition with the contents of loadPath. - If he partition does not
+   * exist - one is created - files in loadPath are moved into Hive. But the
+   * directory itself is not removed.
+   * 
+   * @param loadPath
+   *          Directory containing files to load into Table
+   * @param tableName
+   *          name of table to be loaded.
+   * @param partSpec
+   *          defines which partition needs to be loaded
+   * @param replace
+   *          if true - replace files in the partition, otherwise add files to
+   *          the partition
+   * @param tmpDirPath
+   *          The temporary directory.
    */
   public void loadPartition(Path loadPath, String tableName,
-      AbstractMap<String, String> partSpec, boolean replace,
-      Path tmpDirPath)
-  throws HiveException {
+      AbstractMap<String, String> partSpec, boolean replace, Path tmpDirPath)
+      throws HiveException {
     Table tbl = getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
     try {
-      /** Move files before creating the partition since down stream processes check
-       *  for existence of partition in metadata before accessing the data. If partition
-       *  is created before data is moved, downstream waiting processes might move forward
-       *  with partial data
+      /**
+       * Move files before creating the partition since down stream processes
+       * check for existence of partition in metadata before accessing the data.
+       * If partition is created before data is moved, downstream waiting
+       * processes might move forward with partial data
        */
 
       FileSystem fs;
       Path partPath;
 
       // check if partition exists without creating it
-      Partition part = getPartition (tbl, partSpec, false);
+      Partition part = getPartition(tbl, partSpec, false);
       if (part == null) {
-        // Partition does not exist currently. The partition name is extrapolated from
+        // Partition does not exist currently. The partition name is
+        // extrapolated from
         // the table's location (even if the table is marked external)
         fs = FileSystem.get(tbl.getDataLocation(), getConf());
-        partPath = new Path(tbl.getDataLocation().getPath(), Warehouse.makePartName(partSpec));
+        partPath = new Path(tbl.getDataLocation().getPath(), Warehouse
+            .makePartName(partSpec));
       } else {
         // Partition exists already. Get the path from the partition. This will
         // get the default path for Hive created partitions or the external path
@@ -515,7 +593,7 @@
         partPath = part.getPath()[0];
         fs = partPath.getFileSystem(getConf());
       }
-      if(replace) {
+      if (replace) {
         Hive.replaceFiles(loadPath, partPath, fs, tmpDirPath);
       } else {
         Hive.copyFiles(loadPath, partPath, fs);
@@ -536,22 +614,25 @@
   }
 
   /**
-   * Load a directory into a Hive Table.
-   * - Alters existing content of table with the contents of loadPath.
-   * - If table does not exist - an exception is thrown
-   * - files in loadPath are moved into Hive. But the directory itself is not removed.
-   *
-   * @param loadPath Directory containing files to load into Table
-   * @param tableName name of table to be loaded.
-   * @param replace if true - replace files in the table, otherwise add files to table
-   * @param tmpDirPath The temporary directory.
-   */
-  public void loadTable(Path loadPath, String tableName,
-       boolean replace,
-       Path tmpDirPath) throws HiveException {
+   * Load a directory into a Hive Table. - Alters existing content of table with
+   * the contents of loadPath. - If table does not exist - an exception is
+   * thrown - files in loadPath are moved into Hive. But the directory itself is
+   * not removed.
+   * 
+   * @param loadPath
+   *          Directory containing files to load into Table
+   * @param tableName
+   *          name of table to be loaded.
+   * @param replace
+   *          if true - replace files in the table, otherwise add files to table
+   * @param tmpDirPath
+   *          The temporary directory.
+   */
+  public void loadTable(Path loadPath, String tableName, boolean replace,
+      Path tmpDirPath) throws HiveException {
     Table tbl = getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
 
-    if(replace) {
+    if (replace) {
       tbl.replaceFiles(loadPath, tmpDirPath);
     } else {
       tbl.copyFiles(loadPath);
@@ -560,23 +641,32 @@
 
   /**
    * Creates a partition.
-   * @param tbl table for which partition needs to be created
-   * @param partSpec partition keys and their values
+   * 
+   * @param tbl
+   *          table for which partition needs to be created
+   * @param partSpec
+   *          partition keys and their values
    * @return created partition object
-   * @throws HiveException if table doesn't exist or partition already exists
+   * @throws HiveException
+   *           if table doesn't exist or partition already exists
    */
   public Partition createPartition(Table tbl, Map<String, String> partSpec)
-    throws HiveException {
-      return createPartition(tbl, partSpec, null);
+      throws HiveException {
+    return createPartition(tbl, partSpec, null);
   }
 
   /**
    * Creates a partition
-   * @param tbl table for which partition needs to be created
-   * @param partSpec partition keys and their values
-   * @param location location of this partition
+   * 
+   * @param tbl
+   *          table for which partition needs to be created
+   * @param partSpec
+   *          partition keys and their values
+   * @param location
+   *          location of this partition
    * @return created partition object
-   * @throws HiveException if table doesn't exist or partition already exists
+   * @throws HiveException
+   *           if table doesn't exist or partition already exists
    */
   public Partition createPartition(Table tbl, Map<String, String> partSpec,
       Path location) throws HiveException {
@@ -585,8 +675,9 @@
 
     for (FieldSchema field : tbl.getPartCols()) {
       String val = partSpec.get(field.getName());
-      if(val == null || val.length() == 0) {
-        throw new HiveException("add partition: Value for key " + field.getName() + " is null or empty");
+      if (val == null || val.length() == 0) {
+        throw new HiveException("add partition: Value for key "
+            + field.getName() + " is null or empty");
       }
     }
 
@@ -603,33 +694,41 @@
 
   /**
    * Returns partition metadata
-   * @param tbl the partition's table
-   * @param partSpec partition keys and values
-   * @param forceCreate if this is true and partition doesn't exist then a partition is created
+   * 
+   * @param tbl
+   *          the partition's table
+   * @param partSpec
+   *          partition keys and values
+   * @param forceCreate
+   *          if this is true and partition doesn't exist then a partition is
+   *          created
    * @return result partition object or null if there is no partition
    * @throws HiveException
    */
-  public Partition getPartition(Table tbl, Map<String, String> partSpec, boolean forceCreate)
-      throws HiveException {
-    if(!tbl.isValidSpec(partSpec)) {
+  public Partition getPartition(Table tbl, Map<String, String> partSpec,
+      boolean forceCreate) throws HiveException {
+    if (!tbl.isValidSpec(partSpec)) {
       throw new HiveException("Invalid partition: " + partSpec);
     }
     List<String> pvals = new ArrayList<String>();
     for (FieldSchema field : tbl.getPartCols()) {
       String val = partSpec.get(field.getName());
-      if(val == null || val.length() == 0) {
-        throw new HiveException("get partition: Value for key " + field.getName() + " is null or empty");
+      if (val == null || val.length() == 0) {
+        throw new HiveException("get partition: Value for key "
+            + field.getName() + " is null or empty");
       }
       pvals.add(val);
     }
     org.apache.hadoop.hive.metastore.api.Partition tpart = null;
     try {
       tpart = getMSC().getPartition(tbl.getDbName(), tbl.getName(), pvals);
-      if(tpart == null && forceCreate) {
-        LOG.debug("creating partition for table "  + tbl.getName() + " with partition spec : " + partSpec);
-        tpart = getMSC().appendPartition(tbl.getDbName(), tbl.getName(), pvals);;
+      if (tpart == null && forceCreate) {
+        LOG.debug("creating partition for table " + tbl.getName()
+            + " with partition spec : " + partSpec);
+        tpart = getMSC().appendPartition(tbl.getDbName(), tbl.getName(), pvals);
+        ;
       }
-      if(tpart == null){
+      if (tpart == null) {
         return null;
       }
     } catch (Exception e) {
@@ -639,8 +738,8 @@
     return new Partition(tbl, tpart);
   }
 
-  public boolean dropPartition(String db_name, String tbl_name, List<String> part_vals,
-      boolean deleteData) throws HiveException {
+  public boolean dropPartition(String db_name, String tbl_name,
+      List<String> part_vals, boolean deleteData) throws HiveException {
     try {
       return getMSC().dropPartition(db_name, tbl_name, part_vals, deleteData);
     } catch (NoSuchObjectException e) {
@@ -650,7 +749,8 @@
     }
   }
 
-  public List<String> getPartitionNames(String dbName, String tblName, short max) throws HiveException {
+  public List<String> getPartitionNames(String dbName, String tblName, short max)
+      throws HiveException {
     List names = null;
     try {
       names = getMSC().listPartitionNames(dbName, tblName, max);
@@ -663,15 +763,18 @@
 
   /**
    * get all the partitions that the table has
-   * @param tbl object for which partition is needed
+   * 
+   * @param tbl
+   *          object for which partition is needed
    * @return list of partition objects
    * @throws HiveException
    */
   public List<Partition> getPartitions(Table tbl) throws HiveException {
-    if(tbl.isPartitioned()) {
+    if (tbl.isPartitioned()) {
       List<org.apache.hadoop.hive.metastore.api.Partition> tParts;
       try {
-        tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getName(), (short) -1);
+        tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getName(),
+            (short) -1);
       } catch (Exception e) {
         LOG.error(StringUtils.stringifyException(e));
         throw new HiveException(e);
@@ -683,7 +786,8 @@
       return parts;
     } else {
       // create an empty partition.
-      // HACK, HACK. SemanticAnalyzer code requires that an empty partition when the table is not partitioned
+      // HACK, HACK. SemanticAnalyzer code requires that an empty partition when
+      // the table is not partitioned
       org.apache.hadoop.hive.metastore.api.Partition tPart = new org.apache.hadoop.hive.metastore.api.Partition();
       tPart.setSd(tbl.getTTable().getSd()); // TODO: get a copy
       Partition part = new Partition(tbl, tPart);
@@ -693,40 +797,47 @@
     }
   }
 
-  static private void checkPaths(FileSystem fs, FileStatus [] srcs, Path destf, boolean replace) throws HiveException {
+  static private void checkPaths(FileSystem fs, FileStatus[] srcs, Path destf,
+      boolean replace) throws HiveException {
     try {
-        for(int i=0; i<srcs.length; i++) {
-            FileStatus [] items = fs.listStatus(srcs[i].getPath());
-            for(int j=0; j<items.length; j++) {
-
-                if (Utilities.isTempPath(items[j])) {
-                      // This check is redundant because temp files are removed by execution layer before
-                      // calling loadTable/Partition. But leaving it in just in case.
-                      fs.delete(items[j].getPath(), true);
-                      continue;
-                }
-                if(items[j].isDir()) {
-                    throw new HiveException("checkPaths: "+srcs[i].toString()+" has nested directory"+
-                                            items[j].toString());
-                }
-                Path tmpDest = new Path(destf, items[j].getPath().getName());
-                if (!replace && fs.exists(tmpDest)) {
-                  throw new HiveException("checkPaths: " + tmpDest + " already exists");
-                }
-            }
+      for (FileStatus src : srcs) {
+        FileStatus[] items = fs.listStatus(src.getPath());
+        for (FileStatus item : items) {
+
+          if (Utilities.isTempPath(item)) {
+            // This check is redundant because temp files are removed by
+            // execution layer before
+            // calling loadTable/Partition. But leaving it in just in case.
+            fs.delete(item.getPath(), true);
+            continue;
+          }
+          if (item.isDir()) {
+            throw new HiveException("checkPaths: " + src.toString()
+                + " has nested directory" + item.toString());
+          }
+          Path tmpDest = new Path(destf, item.getPath().getName());
+          if (!replace && fs.exists(tmpDest)) {
+            throw new HiveException("checkPaths: " + tmpDest
+                + " already exists");
+          }
         }
+      }
     } catch (IOException e) {
-        throw new HiveException("checkPaths: filesystem error in check phase", e);
+      throw new HiveException("checkPaths: filesystem error in check phase", e);
     }
-}
+  }
 
-  static protected void copyFiles(Path srcf, Path destf, FileSystem fs) throws HiveException {
+  static protected void copyFiles(Path srcf, Path destf, FileSystem fs)
+      throws HiveException {
     try {
       // create the destination if it does not exist
-      if (!fs.exists(destf))
+      if (!fs.exists(destf)) {
         fs.mkdirs(destf);
+      }
     } catch (IOException e) {
-      throw new HiveException("copyFiles: error while checking/creating destination directory!!!", e);
+      throw new HiveException(
+          "copyFiles: error while checking/creating destination directory!!!",
+          e);
     }
 
     FileStatus[] srcs;
@@ -739,18 +850,18 @@
     if (srcs == null) {
       LOG.info("No sources specified to move: " + srcf);
       return;
-      //srcs = new FileStatus[0]; Why is this needed?
+      // srcs = new FileStatus[0]; Why is this needed?
     }
     // check that source and target paths exist
     checkPaths(fs, srcs, destf, false);
 
     // move it, move it
     try {
-      for(int i=0; i<srcs.length; i++) {
-        FileStatus [] items = fs.listStatus(srcs[i].getPath());
-        for(int j=0; j<items.length; j++) {
-          Path source = items[j].getPath();
-          Path target = new Path(destf, items[j].getPath().getName());
+      for (FileStatus src : srcs) {
+        FileStatus[] items = fs.listStatus(src.getPath());
+        for (FileStatus item : items) {
+          Path source = item.getPath();
+          Path target = new Path(destf, item.getPath().getName());
           if (!fs.rename(source, target)) {
             throw new IOException("Cannot move " + source + " to " + target);
           }
@@ -762,96 +873,106 @@
   }
 
   /**
-   * Replaces files in the partition with new data set specifed by srcf. Works by moving files
-   *
-   * @param srcf Files to be moved. Leaf Directories or Globbed File Paths
-   * @param destf The directory where the final data needs to go
-   * @param fs The filesystem handle
-   * @param tmppath Temporary directory
+   * Replaces files in the partition with new data set specifed by srcf. Works
+   * by moving files
+   * 
+   * @param srcf
+   *          Files to be moved. Leaf Directories or Globbed File Paths
+   * @param destf
+   *          The directory where the final data needs to go
+   * @param fs
+   *          The filesystem handle
+   * @param tmppath
+   *          Temporary directory
    */
   static protected void replaceFiles(Path srcf, Path destf, FileSystem fs,
       Path tmppath) throws HiveException {
-      FileStatus [] srcs;
-      try {
-          srcs = fs.globStatus(srcf);
-      } catch (IOException e) {
-          throw new HiveException("addFiles: filesystem error in check phase", e);
-      }
-      if (srcs == null) {
-        LOG.info("No sources specified to move: " + srcf);
-        return;
-        //srcs = new FileStatus[0]; Why is this needed?
-      }
-      checkPaths(fs, srcs, destf, true);
-
-      try {
-          fs.mkdirs(tmppath);
-          for(int i=0; i<srcs.length; i++) {
-              FileStatus[] items = fs.listStatus(srcs[i].getPath());
-              for(int j=0; j<items.length; j++) {
-                if (!fs.rename(items[j].getPath(),
-                               new Path(tmppath, items[j].getPath().getName()))) {
-                  throw new HiveException ("Error moving: " + items[j].getPath()
-                                           + " into: " + tmppath);
-                }
-              }
-          }
-
-          // point of no return
-          boolean b = fs.delete(destf, true);
-          LOG.debug("Deleting:"+destf.toString()+",Status:"+b);
-
-          // create the parent directory otherwise rename can fail if the parent doesn't exist
-          if (!fs.mkdirs(destf.getParent())) {
-            throw new HiveException("Unable to create destination directory: "
-                  + destf.getParent().toString());
-          }
+    FileStatus[] srcs;
+    try {
+      srcs = fs.globStatus(srcf);
+    } catch (IOException e) {
+      throw new HiveException("addFiles: filesystem error in check phase", e);
+    }
+    if (srcs == null) {
+      LOG.info("No sources specified to move: " + srcf);
+      return;
+      // srcs = new FileStatus[0]; Why is this needed?
+    }
+    checkPaths(fs, srcs, destf, true);
 
-          b = fs.rename(tmppath, destf);
-          if (!b) {
-            throw new HiveException("Unable to move results from " + tmppath  
-                + " to destination directory: "
-                + destf.getParent().toString());
+    try {
+      fs.mkdirs(tmppath);
+      for (FileStatus src : srcs) {
+        FileStatus[] items = fs.listStatus(src.getPath());
+        for (int j = 0; j < items.length; j++) {
+          if (!fs.rename(items[j].getPath(), new Path(tmppath, items[j]
+              .getPath().getName()))) {
+            throw new HiveException("Error moving: " + items[j].getPath()
+                + " into: " + tmppath);
           }
-          LOG.debug("Renaming:"+tmppath.toString()+",Status:"+b);
+        }
+      }
 
-      } catch (IOException e) {
-          throw new HiveException("replaceFiles: error while moving files from "
-              + tmppath + " to " + destf + "!!!", e);
+      // point of no return
+      boolean b = fs.delete(destf, true);
+      LOG.debug("Deleting:" + destf.toString() + ",Status:" + b);
+
+      // create the parent directory otherwise rename can fail if the parent
+      // doesn't exist
+      if (!fs.mkdirs(destf.getParent())) {
+        throw new HiveException("Unable to create destination directory: "
+            + destf.getParent().toString());
+      }
+
+      b = fs.rename(tmppath, destf);
+      if (!b) {
+        throw new HiveException("Unable to move results from " + tmppath
+            + " to destination directory: " + destf.getParent().toString());
       }
-      // In case of error, we should leave the temporary data there, so 
-      // that user can recover the data if necessary. 
+      LOG.debug("Renaming:" + tmppath.toString() + ",Status:" + b);
+
+    } catch (IOException e) {
+      throw new HiveException("replaceFiles: error while moving files from "
+          + tmppath + " to " + destf + "!!!", e);
+    }
+    // In case of error, we should leave the temporary data there, so
+    // that user can recover the data if necessary.
   }
 
   /**
-   * Creates a metastore client. Currently it creates only JDBC based client as File based store
-   * support is removed
+   * Creates a metastore client. Currently it creates only JDBC based client as
+   * File based store support is removed
+   * 
    * @returns a Meta Store Client
-   * @throws HiveMetaException  if a working client can't be created
+   * @throws HiveMetaException
+   *           if a working client can't be created
    */
   private IMetaStoreClient createMetaStoreClient() throws MetaException {
-      return new HiveMetaStoreClient(this.conf);
+    return new HiveMetaStoreClient(conf);
   }
 
   /**
-   *
+   * 
    * @return the metastore client for the current thread
    * @throws MetaException
    */
   private IMetaStoreClient getMSC() throws MetaException {
-    if(metaStoreClient == null) {
-      metaStoreClient = this.createMetaStoreClient();
+    if (metaStoreClient == null) {
+      metaStoreClient = createMetaStoreClient();
     }
     return metaStoreClient;
   }
 
-  public static List<FieldSchema> getFieldsFromDeserializer(String name, Deserializer serde) throws HiveException {
+  public static List<FieldSchema> getFieldsFromDeserializer(String name,
+      Deserializer serde) throws HiveException {
     try {
       return MetaStoreUtils.getFieldsFromDeserializer(name, serde);
     } catch (SerDeException e) {
-      throw new HiveException("Error in getting fields from serde. " + e.getMessage(), e);
+      throw new HiveException("Error in getting fields from serde. "
+          + e.getMessage(), e);
     } catch (MetaException e) {
-      throw new HiveException("Error in getting fields from serde." + e.getMessage(), e);
+      throw new HiveException("Error in getting fields from serde."
+          + e.getMessage(), e);
     }
   }
 };

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java Thu Jan 21 10:37:58 2010
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hive.ql.metadata;
 
-/**                                                                                     
+/**
  * Generic exception class for Hive
  */
 
@@ -26,7 +26,7 @@
   public HiveException() {
     super();
   }
-  
+
   public HiveException(String message) {
     super(message);
   }
@@ -39,4 +39,3 @@
     super(message, cause);
   }
 }
-

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java Thu Jan 21 10:37:58 2010
@@ -13,31 +13,30 @@
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.ql.metadata.CheckResult.PartitionResult;
-import org.apache.hadoop.hive.conf.HiveConf;
-
 import org.apache.thrift.TException;
 
 /**
- * Verify that the information in the metastore matches what
- * is on the filesystem. Return a CheckResult object
- * containing lists of missing and any unexpected tables and partitions.
+ * Verify that the information in the metastore matches what is on the
+ * filesystem. Return a CheckResult object containing lists of missing and any
+ * unexpected tables and partitions.
  */
 public class HiveMetaStoreChecker {
 
   public static final Log LOG = LogFactory.getLog(HiveMetaStoreChecker.class);
 
-  private Hive hive;
-  private HiveConf conf;
+  private final Hive hive;
+  private final HiveConf conf;
 
   public HiveMetaStoreChecker(Hive hive) {
     super();
     this.hive = hive;
-    this.conf = hive.getConf();
+    conf = hive.getConf();
   }
 
   /**
@@ -49,16 +48,19 @@
    * @param tableName
    *          Table we want to run the check for. If null we'll check all the
    *          tables in the database.
-   * @param partitions List of partition name value pairs, 
-   * if null or empty check all partitions
-   * @param result Fill this with the results of the check 
-   * @throws HiveException Failed to get required information 
-   * from the metastore.
-   * @throws IOException Most likely filesystem related
+   * @param partitions
+   *          List of partition name value pairs, if null or empty check all
+   *          partitions
+   * @param result
+   *          Fill this with the results of the check
+   * @throws HiveException
+   *           Failed to get required information from the metastore.
+   * @throws IOException
+   *           Most likely filesystem related
    */
   public void checkMetastore(String dbName, String tableName,
-      List<Map<String, String>> partitions, CheckResult result) 
-    throws HiveException, IOException {
+      List<Map<String, String>> partitions, CheckResult result)
+      throws HiveException, IOException {
 
     if (dbName == null || "".equalsIgnoreCase(dbName)) {
       dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME;
@@ -71,7 +73,7 @@
         for (String currentTableName : tables) {
           checkTable(dbName, currentTableName, null, result);
         }
-  
+
         findUnknownTables(dbName, tables, result);
       } else if (partitions == null || partitions.isEmpty()) {
         // only one table, let's check all partitions
@@ -93,21 +95,26 @@
 
   /**
    * Check for table directories that aren't in the metastore.
-   * @param dbName Name of the database
-   * @param tables List of table names
-   * @param result Add any found tables to this
-   * @throws HiveException Failed to get required information 
-   * from the metastore.
-   * @throws IOException Most likely filesystem related
-   * @throws MetaException Failed to get required information 
-   * from the metastore.
-   * @throws NoSuchObjectException Failed to get required information 
-   * from the metastore.
-   * @throws TException Thrift communication error.
+   * 
+   * @param dbName
+   *          Name of the database
+   * @param tables
+   *          List of table names
+   * @param result
+   *          Add any found tables to this
+   * @throws HiveException
+   *           Failed to get required information from the metastore.
+   * @throws IOException
+   *           Most likely filesystem related
+   * @throws MetaException
+   *           Failed to get required information from the metastore.
+   * @throws NoSuchObjectException
+   *           Failed to get required information from the metastore.
+   * @throws TException
+   *           Thrift communication error.
    */
-  void findUnknownTables(String dbName, List<String> tables,
-      CheckResult result) throws IOException, MetaException, TException,
-      HiveException {
+  void findUnknownTables(String dbName, List<String> tables, CheckResult result)
+      throws IOException, MetaException, TException, HiveException {
 
     Set<Path> dbPaths = new HashSet<Path>();
     Set<String> tableNames = new HashSet<String>(tables);
@@ -125,10 +132,9 @@
       FileSystem fs = dbPath.getFileSystem(conf);
       FileStatus[] statuses = fs.listStatus(dbPath);
       for (FileStatus status : statuses) {
-        
-        if (status.isDir() 
-            && !tableNames.contains(status.getPath().getName())) {
-          
+
+        if (status.isDir() && !tableNames.contains(status.getPath().getName())) {
+
           result.getTablesNotInMs().add(status.getPath().getName());
         }
       }
@@ -139,16 +145,20 @@
    * Check the metastore for inconsistencies, data missing in either the
    * metastore or on the dfs.
    * 
-   * @param dbName Name of the database
-   * @param tableName Name of the table
-   * @param partitions Partitions to check, if null or empty
-   * get all the partitions.
-   * @param result Result object
-   * @throws HiveException Failed to get required information 
-   * from the metastore.
-   * @throws IOException Most likely filesystem related
-   * @throws MetaException Failed to get required information 
-   * from the metastore.
+   * @param dbName
+   *          Name of the database
+   * @param tableName
+   *          Name of the table
+   * @param partitions
+   *          Partitions to check, if null or empty get all the partitions.
+   * @param result
+   *          Result object
+   * @throws HiveException
+   *           Failed to get required information from the metastore.
+   * @throws IOException
+   *           Most likely filesystem related
+   * @throws MetaException
+   *           Failed to get required information from the metastore.
    */
   void checkTable(String dbName, String tableName,
       List<Map<String, String>> partitions, CheckResult result)
@@ -165,18 +175,18 @@
 
     List<Partition> parts = new ArrayList<Partition>();
     boolean findUnknownPartitions = true;
-    
+
     if (table.isPartitioned()) {
       if (partitions == null || partitions.isEmpty()) {
         // no partitions specified, let's get all
         parts = hive.getPartitions(table);
       } else {
-        //we're interested in specific partitions,
-        //don't check for any others
+        // we're interested in specific partitions,
+        // don't check for any others
         findUnknownPartitions = false;
         for (Map<String, String> map : partitions) {
           Partition part = hive.getPartition(table, map, false);
-          if(part == null) {
+          if (part == null) {
             PartitionResult pr = new PartitionResult();
             pr.setTableName(tableName);
             pr.setPartitionName(Warehouse.makePartName(map));
@@ -195,16 +205,22 @@
    * Check the metastore for inconsistencies, data missing in either the
    * metastore or on the dfs.
    * 
-   * @param table Table to check
-   * @param parts Partitions to check
-   * @param result Result object
-   * @param findUnknownPartitions Should we try to find unknown partitions?
-   * @throws IOException Could not get information from filesystem
-   * @throws HiveException Could not create Partition object
+   * @param table
+   *          Table to check
+   * @param parts
+   *          Partitions to check
+   * @param result
+   *          Result object
+   * @param findUnknownPartitions
+   *          Should we try to find unknown partitions?
+   * @throws IOException
+   *           Could not get information from filesystem
+   * @throws HiveException
+   *           Could not create Partition object
    */
-  void checkTable(Table table, List<Partition> parts, 
-      boolean findUnknownPartitions, CheckResult result) 
-    throws IOException, HiveException {
+  void checkTable(Table table, List<Partition> parts,
+      boolean findUnknownPartitions, CheckResult result) throws IOException,
+      HiveException {
 
     Path tablePath = table.getPath();
     FileSystem fs = tablePath.getFileSystem(conf);
@@ -217,8 +233,8 @@
 
     // check that the partition folders exist on disk
     for (Partition partition : parts) {
-      if(partition == null) {
-        //most likely the user specified an invalid partition
+      if (partition == null) {
+        // most likely the user specified an invalid partition
         continue;
       }
       Path partPath = partition.getPartitionPath();
@@ -236,23 +252,26 @@
       }
     }
 
-    if(findUnknownPartitions) {
+    if (findUnknownPartitions) {
       findUnknownPartitions(table, partPaths, result);
     }
   }
 
   /**
-   * Find partitions on the fs that are
-   * unknown to the metastore
-   * @param table Table where the partitions would be located
-   * @param partPaths Paths of the partitions the ms knows about
-   * @param result Result object 
-   * @throws IOException Thrown if we fail at fetching listings from
-   * the fs.
+   * Find partitions on the fs that are unknown to the metastore
+   * 
+   * @param table
+   *          Table where the partitions would be located
+   * @param partPaths
+   *          Paths of the partitions the ms knows about
+   * @param result
+   *          Result object
+   * @throws IOException
+   *           Thrown if we fail at fetching listings from the fs.
    */
-  void findUnknownPartitions(Table table, Set<Path> partPaths, 
+  void findUnknownPartitions(Table table, Set<Path> partPaths,
       CheckResult result) throws IOException {
-    
+
     Path tablePath = table.getPath();
     // now check the table folder and see if we find anything
     // that isn't in the metastore
@@ -263,18 +282,18 @@
 
     // remove the partition paths we know about
     allPartDirs.removeAll(partPaths);
-    
+
     // we should now only have the unexpected folders left
     for (Path partPath : allPartDirs) {
       FileSystem fs = partPath.getFileSystem(conf);
-      String partitionName = getPartitionName(fs.makeQualified(tablePath), 
+      String partitionName = getPartitionName(fs.makeQualified(tablePath),
           partPath);
-      
+
       if (partitionName != null) {
         PartitionResult pr = new PartitionResult();
         pr.setPartitionName(partitionName);
         pr.setTableName(table.getName());
-        
+
         result.getPartitionsNotInMs().add(pr);
       }
     }
@@ -283,36 +302,39 @@
   /**
    * Get the partition name from the path.
    * 
-   * @param tablePath Path of the table.
-   * @param partitionPath Path of the partition.
+   * @param tablePath
+   *          Path of the table.
+   * @param partitionPath
+   *          Path of the partition.
    * @return Partition name, for example partitiondate=2008-01-01
    */
   private String getPartitionName(Path tablePath, Path partitionPath) {
     String result = null;
     Path currPath = partitionPath;
     while (currPath != null && !tablePath.equals(currPath)) {
-      if(result == null) {
+      if (result == null) {
         result = currPath.getName();
       } else {
         result = currPath.getName() + Path.SEPARATOR + result;
       }
-      
+
       currPath = currPath.getParent();
     }
     return result;
   }
 
   /**
-   * Recursive method to get the leaf directories of a base path.
-   * Example:
-   * base/dir1/dir2
-   * base/dir3
+   * Recursive method to get the leaf directories of a base path. Example:
+   * base/dir1/dir2 base/dir3
    * 
    * This will return dir2 and dir3 but not dir1.
    * 
-   * @param basePath Start directory
-   * @param allDirs This set will contain the leaf paths at the end.
-   * @throws IOException Thrown if we can't get lists from the fs.
+   * @param basePath
+   *          Start directory
+   * @param allDirs
+   *          This set will contain the leaf paths at the end.
+   * @throws IOException
+   *           Thrown if we can't get lists from the fs.
    */
 
   private void getAllLeafDirs(Path basePath, Set<Path> allDirs)
@@ -322,7 +344,7 @@
 
   private void getAllLeafDirs(Path basePath, Set<Path> allDirs, FileSystem fs)
       throws IOException {
-    
+
     FileStatus[] statuses = fs.listStatus(basePath);
 
     if (statuses.length == 0) {

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java Thu Jan 21 10:37:58 2010
@@ -20,7 +20,7 @@
 
 /**
  * General collection of helper functions
- *
+ * 
  */
 public class HiveUtils {
 
@@ -32,7 +32,6 @@
   public static final String RBRACE = "}";
   public static final String LINE_SEP = System.getProperty("line.separator");
 
-
   public static String escapeString(String str) {
     int length = str.length();
     StringBuilder escape = new StringBuilder(length + 16);
@@ -78,13 +77,12 @@
         } else {
           escape.append(c);
         }
-      break;
+        break;
       }
     }
     return (escape.toString());
   }
 
-
   public static String lightEscapeString(String str) {
     int length = str.length();
     StringBuilder escape = new StringBuilder(length + 16);
@@ -106,7 +104,7 @@
         break;
       default:
         escape.append(c);
-      break;
+        break;
       }
     }
     return (escape.toString());

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/InvalidTableException.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/InvalidTableException.java?rev=901644&r1=901643&r2=901644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/InvalidTableException.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/InvalidTableException.java Thu Jan 21 10:37:58 2010
@@ -18,9 +18,9 @@
 
 package org.apache.hadoop.hive.ql.metadata;
 
-/**                                                                                     
+/**
  * Generic exception class for Hive
- *                                                                                      
+ * 
  */
 
 public class InvalidTableException extends HiveException {
@@ -30,7 +30,7 @@
     super();
     this.tableName = tableName;
   }
-  
+
   public InvalidTableException(String message, String tableName) {
     super(message);
     this.tableName = tableName;