You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2010/05/07 21:17:55 UTC

svn commit: r942184 [7/15] - in /hadoop/hbase/branches/0.20: ./ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/filter/ src/java/org/apache/hadoop/hbase/io/ src/java/org/apache/hadoop/hbase/io...

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java Fri May  7 19:17:48 2010
@@ -35,11 +35,11 @@ import org.apache.lucene.index.IndexWrit
 /**
  * Writes the records into a Lucene index writer.
  */
-public class IndexRecordWriter 
+public class IndexRecordWriter
 extends RecordWriter<ImmutableBytesWritable, LuceneDocumentWrapper> {
 
   static final Log LOG = LogFactory.getLog(IndexRecordWriter.class);
-  
+
   private long docCount = 0;
   private TaskAttemptContext context = null;
   private FileSystem fs = null;
@@ -47,10 +47,10 @@ extends RecordWriter<ImmutableBytesWrita
   private IndexConfiguration indexConf = null;
   private Path perm = null;
   private Path temp = null;
-  
+
   /**
    * Creates a new instance.
-   * 
+   *
    * @param context  The task context.
    * @param fs  The file system.
    * @param writer  The index writer.
@@ -58,7 +58,7 @@ extends RecordWriter<ImmutableBytesWrita
    * @param perm  The permanent path in the DFS.
    * @param temp  The temporary local path.
    */
-  public IndexRecordWriter(TaskAttemptContext context, FileSystem fs, 
+  public IndexRecordWriter(TaskAttemptContext context, FileSystem fs,
       IndexWriter writer, IndexConfiguration indexConf, Path perm, Path temp) {
     this.context = context;
     this.fs = fs;
@@ -67,10 +67,10 @@ extends RecordWriter<ImmutableBytesWrita
     this.perm = perm;
     this.temp = temp;
   }
-  
+
   /**
    * Writes the record into an index.
-   * 
+   *
    * @param key  The current key.
    * @param value  The current value.
    * @throws IOException When the index is faulty.
@@ -81,14 +81,14 @@ extends RecordWriter<ImmutableBytesWrita
   throws IOException {
     // unwrap and index doc
     Document doc = value.get();
-    writer.addDocument(doc); 
+    writer.addDocument(doc);
     docCount++;
     context.progress();
-  } 
+  }
 
   /**
    * Closes the writer.
-   * 
+   *
    * @param context  The current context.
    * @throws IOException When closing the writer fails.
    * @see org.apache.hadoop.mapreduce.RecordWriter#close(org.apache.hadoop.mapreduce.TaskAttemptContext)
@@ -128,10 +128,10 @@ extends RecordWriter<ImmutableBytesWrita
 
     /** Flag to track when to finish. */
     private boolean closed = false;
-    
+
     /**
      * Runs the thread. Sending heart beats to the framework.
-     * 
+     *
      * @see java.lang.Runnable#run()
      */
     @Override
@@ -143,7 +143,7 @@ extends RecordWriter<ImmutableBytesWrita
         }
         while (!closed) {
         try {
-          context.progress();            
+          context.progress();
           Thread.sleep(1000);
         } catch (InterruptedException e) {
           continue;
@@ -152,14 +152,14 @@ extends RecordWriter<ImmutableBytesWrita
         }
       }
     }
-    
+
     /**
-     * Switches the flag. 
+     * Switches the flag.
      */
     public void setClosed() {
       closed = true;
     }
-    
+
   }
-  
+
 }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexTableReducer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexTableReducer.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexTableReducer.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexTableReducer.java Fri May  7 19:17:48 2010
@@ -37,23 +37,23 @@ import org.apache.commons.logging.LogFac
  * Construct a Lucene document per row, which is consumed by IndexOutputFormat
  * to build a Lucene index
  */
-public class IndexTableReducer 
-extends Reducer<ImmutableBytesWritable, Result, 
+public class IndexTableReducer
+extends Reducer<ImmutableBytesWritable, Result,
     ImmutableBytesWritable, LuceneDocumentWrapper>
 implements Configurable {
-  
+
   private static final Log LOG = LogFactory.getLog(IndexTableReducer.class);
-  
+
   private IndexConfiguration indexConf;
   private Configuration conf = null;
-  
+
   /**
    * Writes each given record, consisting of the key and the given values, to
    * the index.
-   * 
+   *
    * @param key  The current row key.
    * @param values  The values for the given row.
-   * @param context  The context of the reduce. 
+   * @param context  The context of the reduce.
    * @throws IOException When writing the record fails.
    * @throws InterruptedException When the job gets interrupted.
    */
@@ -85,7 +85,7 @@ implements Configurable {
             Field.Index.NO;
 
         // UTF-8 encode value
-        Field field = new Field(column, Bytes.toString(columnValue), 
+        Field field = new Field(column, Bytes.toString(columnValue),
           store, index);
         field.setBoost(indexConf.getBoost(column));
         field.setOmitNorms(indexConf.isOmitNorms(column));
@@ -98,7 +98,7 @@ implements Configurable {
 
   /**
    * Returns the current configuration.
-   *  
+   *
    * @return The current configuration.
    * @see org.apache.hadoop.conf.Configurable#getConf()
    */
@@ -109,7 +109,7 @@ implements Configurable {
 
   /**
    * Sets the configuration. This is used to set up the index configuration.
-   * 
+   *
    * @param configuration  The configuration to set.
    * @see org.apache.hadoop.conf.Configurable#setConf(
    *   org.apache.hadoop.conf.Configuration)
@@ -126,5 +126,5 @@ implements Configurable {
       LOG.debug("Index conf: " + indexConf);
     }
   }
-  
+
 }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/LuceneDocumentWrapper.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/LuceneDocumentWrapper.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/LuceneDocumentWrapper.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/LuceneDocumentWrapper.java Fri May  7 19:17:48 2010
@@ -29,7 +29,7 @@ import org.apache.lucene.document.Docume
  * It doesn't really serialize/deserialize a lucene document.
  */
 public class LuceneDocumentWrapper implements Writable {
-  
+
   /** The document to add to the index. */
   protected Document doc;
 

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java Fri May  7 19:17:48 2010
@@ -33,7 +33,7 @@ import org.apache.hadoop.mapreduce.lib.o
 import org.apache.hadoop.util.GenericOptionsParser;
 
 /**
- * A job with a just a map phase to count rows. Map outputs table rows IF the 
+ * A job with a just a map phase to count rows. Map outputs table rows IF the
  * input row has columns that have content.
  */
 public class RowCounter {
@@ -45,18 +45,18 @@ public class RowCounter {
    */
   static class RowCounterMapper
   extends TableMapper<ImmutableBytesWritable, Result> {
-    
+
     /** Counter enumeration to count the actual rows. */
     private static enum Counters {ROWS}
 
     /**
      * Maps the data.
-     * 
+     *
      * @param row  The current table row key.
      * @param values  The columns.
      * @param context  The current context.
      * @throws IOException When something is broken with the data.
-     * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, 
+     * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN,
      *   org.apache.hadoop.mapreduce.Mapper.Context)
      */
     @Override
@@ -75,13 +75,13 @@ public class RowCounter {
 
   /**
    * Sets up the actual job.
-   * 
+   *
    * @param conf  The current configuration.
    * @param args  The command line parameters.
    * @return The newly created job.
    * @throws IOException When setting up the job fails.
    */
-  public static Job createSubmittableJob(Configuration conf, String[] args) 
+  public static Job createSubmittableJob(Configuration conf, String[] args)
   throws IOException {
     String tableName = args[0];
     Job job = new Job(conf, NAME + "_" + tableName);
@@ -110,7 +110,7 @@ public class RowCounter {
 
   /**
    * Main entry point.
-   * 
+   *
    * @param args  The command line parameters.
    * @throws Exception When running the job fails.
    */

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java Fri May  7 19:17:48 2010
@@ -34,11 +34,11 @@ import org.apache.hadoop.util.StringUtil
 /**
  * Convert HBase tabular data into a format that is consumable by Map/Reduce.
  */
-public class TableInputFormat extends TableInputFormatBase 
+public class TableInputFormat extends TableInputFormatBase
 implements Configurable {
-  
+
   private final Log LOG = LogFactory.getLog(TableInputFormat.class);
-  
+
   /** Job parameter that specifies the input table. */
   public static final String INPUT_TABLE = "hbase.mapreduce.inputtable";
   /** Base-64 encoded scanner. All other SCAN_ confs are ignored if this is specified.
@@ -61,13 +61,13 @@ implements Configurable {
   public static final String SCAN_CACHEBLOCKS = "hbase.mapreduce.scan.cacheblocks";
   /** The number of rows for caching that will be passed to scanners. */
   public static final String SCAN_CACHEDROWS = "hbase.mapreduce.scan.cachedrows";
-  
+
   /** The configuration. */
   private Configuration conf = null;
 
   /**
    * Returns the current configuration.
-   *  
+   *
    * @return The current configuration.
    * @see org.apache.hadoop.conf.Configurable#getConf()
    */
@@ -79,7 +79,7 @@ implements Configurable {
   /**
    * Sets the configuration. This is used to set the details for the table to
    * be scanned.
-   * 
+   *
    * @param configuration  The configuration to set.
    * @see org.apache.hadoop.conf.Configurable#setConf(
    *   org.apache.hadoop.conf.Configuration)
@@ -93,9 +93,9 @@ implements Configurable {
     } catch (Exception e) {
       LOG.error(StringUtils.stringifyException(e));
     }
-    
+
     Scan scan = null;
-    
+
     if (conf.get(SCAN) != null) {
       try {
         scan = TableMapReduceUtil.convertStringToScan(conf.get(SCAN));
@@ -105,22 +105,22 @@ implements Configurable {
     } else {
       try {
         scan = new Scan();
-        
+
         if (conf.get(SCAN_COLUMNS) != null) {
           scan.addColumns(conf.get(SCAN_COLUMNS));
         }
-        
-        if (conf.get(SCAN_COLUMN_FAMILY) != null) { 
+
+        if (conf.get(SCAN_COLUMN_FAMILY) != null) {
           scan.addFamily(Bytes.toBytes(conf.get(SCAN_COLUMN_FAMILY)));
         }
-        
+
         if (conf.get(SCAN_TIMESTAMP) != null) {
           scan.setTimeStamp(Long.parseLong(conf.get(SCAN_TIMESTAMP)));
         }
-        
+
         if (conf.get(SCAN_TIMERANGE_START) != null && conf.get(SCAN_TIMERANGE_END) != null) {
           scan.setTimeRange(
-              Long.parseLong(conf.get(SCAN_TIMERANGE_START)), 
+              Long.parseLong(conf.get(SCAN_TIMERANGE_START)),
               Long.parseLong(conf.get(SCAN_TIMERANGE_END)));
         }
 
@@ -141,5 +141,5 @@ implements Configurable {
 
     setScan(scan);
   }
-  
+
 }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java Fri May  7 19:17:48 2010
@@ -45,8 +45,8 @@ import org.apache.hadoop.hbase.util.Pair
 import org.apache.hadoop.util.StringUtils;
 
 /**
- * A base for {@link TableInputFormat}s. Receives a {@link HTable}, an 
- * {@link Scan} instance that defines the input columns etc. Subclasses may use 
+ * A base for {@link TableInputFormat}s. Receives a {@link HTable}, an
+ * {@link Scan} instance that defines the input columns etc. Subclasses may use
  * other TableRecordReader implementations.
  * <p>
  * An example of a subclass:
@@ -74,7 +74,7 @@ import org.apache.hadoop.util.StringUtil
  */
 public abstract class TableInputFormatBase
 extends InputFormat<ImmutableBytesWritable, Result> {
-  
+
   final Log LOG = LogFactory.getLog(TableInputFormatBase.class);
 
   /** Holds the details for the internal scanner. */
@@ -85,12 +85,12 @@ extends InputFormat<ImmutableBytesWritab
   private TableRecordReader tableRecordReader = null;
 
   /**
-   * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) 
+   * Iterate over an HBase table data, return (ImmutableBytesWritable, Result)
    * pairs.
    */
   protected class TableRecordReader
   extends RecordReader<ImmutableBytesWritable, Result> {
-    
+
     private ResultScanner scanner = null;
     private Scan scan = null;
     private HTable htable = null;
@@ -113,7 +113,7 @@ extends InputFormat<ImmutableBytesWritab
     /**
      * Build the scanner. Not done in constructor to allow for extension.
      *
-     * @throws IOException When restarting the scan fails. 
+     * @throws IOException When restarting the scan fails.
      */
     public void init() throws IOException {
       restart(scan.getStartRow());
@@ -121,7 +121,7 @@ extends InputFormat<ImmutableBytesWritab
 
     /**
      * Sets the HBase table.
-     * 
+     *
      * @param htable  The {@link HTable} to scan.
      */
     public void setHTable(HTable htable) {
@@ -130,7 +130,7 @@ extends InputFormat<ImmutableBytesWritab
 
     /**
      * Sets the scan defining the actual details like columns etc.
-     *  
+     *
      * @param scan  The scan to set.
      */
     public void setScan(Scan scan) {
@@ -139,7 +139,7 @@ extends InputFormat<ImmutableBytesWritab
 
     /**
      * Closes the split.
-     * 
+     *
      * @see org.apache.hadoop.mapreduce.RecordReader#close()
      */
     @Override
@@ -149,7 +149,7 @@ extends InputFormat<ImmutableBytesWritab
 
     /**
      * Returns the current key.
-     *  
+     *
      * @return The current key.
      * @throws IOException
      * @throws InterruptedException When the job is aborted.
@@ -163,7 +163,7 @@ extends InputFormat<ImmutableBytesWritab
 
     /**
      * Returns the current value.
-     * 
+     *
      * @return The current value.
      * @throws IOException When the value is faulty.
      * @throws InterruptedException When the job is aborted.
@@ -176,13 +176,13 @@ extends InputFormat<ImmutableBytesWritab
 
     /**
      * Initializes the reader.
-     * 
+     *
      * @param inputsplit  The split to work with.
      * @param context  The current task context.
      * @throws IOException When setting up the reader fails.
      * @throws InterruptedException When the job is aborted.
      * @see org.apache.hadoop.mapreduce.RecordReader#initialize(
-     *   org.apache.hadoop.mapreduce.InputSplit, 
+     *   org.apache.hadoop.mapreduce.InputSplit,
      *   org.apache.hadoop.mapreduce.TaskAttemptContext)
      */
     @Override
@@ -193,7 +193,7 @@ extends InputFormat<ImmutableBytesWritab
 
     /**
      * Positions the record reader to the next record.
-     *  
+     *
      * @return <code>true</code> if there was another record.
      * @throws IOException When reading the record failed.
      * @throws InterruptedException When the job was aborted.
@@ -206,7 +206,7 @@ extends InputFormat<ImmutableBytesWritab
       try {
         value = this.scanner.next();
       } catch (IOException e) {
-        LOG.debug("recovered from " + StringUtils.stringifyException(e));  
+        LOG.debug("recovered from " + StringUtils.stringifyException(e));
         restart(lastRow);
         scanner.next();    // skip presumed already mapped row
         value = scanner.next();
@@ -221,7 +221,7 @@ extends InputFormat<ImmutableBytesWritab
 
     /**
      * The current progress of the record reader through its data.
-     * 
+     *
      * @return A number between 0.0 and 1.0, the fraction of the data read.
      * @see org.apache.hadoop.mapreduce.RecordReader#getProgress()
      */
@@ -235,13 +235,13 @@ extends InputFormat<ImmutableBytesWritab
   /**
    * Builds a TableRecordReader. If no TableRecordReader was provided, uses
    * the default.
-   * 
+   *
    * @param split  The split to work with.
    * @param context  The current context.
    * @return The newly created record reader.
    * @throws IOException When creating the reader fails.
    * @see org.apache.hadoop.mapreduce.InputFormat#createRecordReader(
-   *   org.apache.hadoop.mapreduce.InputSplit, 
+   *   org.apache.hadoop.mapreduce.InputSplit,
    *   org.apache.hadoop.mapreduce.TaskAttemptContext)
    */
   @Override
@@ -276,7 +276,7 @@ extends InputFormat<ImmutableBytesWritab
   @Override
   public List<InputSplit> getSplits(JobContext context) throws IOException {
     Pair<byte[][], byte[][]> keys = table.getStartEndKeys();
-    if (keys == null || keys.getFirst() == null || 
+    if (keys == null || keys.getFirst() == null ||
         keys.getFirst().length == 0) {
       throw new IOException("Expecting at least one region.");
     }
@@ -284,7 +284,7 @@ extends InputFormat<ImmutableBytesWritab
       throw new IOException("No table was provided.");
     }
     int count = 0;
-    List<InputSplit> splits = new ArrayList<InputSplit>(keys.getFirst().length); 
+    List<InputSplit> splits = new ArrayList<InputSplit>(keys.getFirst().length);
     for (int i = 0; i < keys.getFirst().length; i++) {
       if ( !includeRegionInSplit(keys.getFirst()[i], keys.getSecond()[i])) {
         continue;
@@ -296,19 +296,19 @@ extends InputFormat<ImmutableBytesWritab
       // determine if the given start an stop key fall into the region
       if ((startRow.length == 0 || keys.getSecond()[i].length == 0 ||
            Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) &&
-          (stopRow.length == 0 || 
+          (stopRow.length == 0 ||
            Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0)) {
-        byte[] splitStart = startRow.length == 0 || 
-          Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 ? 
+        byte[] splitStart = startRow.length == 0 ||
+          Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 ?
             keys.getFirst()[i] : startRow;
-        byte[] splitStop = (stopRow.length == 0 || 
+        byte[] splitStop = (stopRow.length == 0 ||
           Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) &&
-          keys.getSecond()[i].length > 0 ? 
+          keys.getSecond()[i].length > 0 ?
             keys.getSecond()[i] : stopRow;
         InputSplit split = new TableSplit(table.getTableName(),
           splitStart, splitStop, regionLocation);
         splits.add(split);
-        if (LOG.isDebugEnabled()) 
+        if (LOG.isDebugEnabled())
           LOG.debug("getSplits: split -> " + (count++) + " -> " + split);
       }
     }
@@ -361,7 +361,7 @@ extends InputFormat<ImmutableBytesWritab
 
   /**
    * Gets the scan defining the actual details like columns etc.
-   *  
+   *
    * @return The internal scan instance.
    */
   public Scan getScan() {
@@ -371,7 +371,7 @@ extends InputFormat<ImmutableBytesWritab
 
   /**
    * Sets the scan defining the actual details like columns etc.
-   *  
+   *
    * @param scan  The scan to set.
    */
   public void setScan(Scan scan) {
@@ -381,7 +381,7 @@ extends InputFormat<ImmutableBytesWritab
   /**
    * Allows subclasses to set the {@link TableRecordReader}.
    *
-   * @param tableRecordReader A different {@link TableRecordReader} 
+   * @param tableRecordReader A different {@link TableRecordReader}
    *   implementation.
    */
   protected void setTableRecordReader(TableRecordReader tableRecordReader) {

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java Fri May  7 19:17:48 2010
@@ -39,11 +39,11 @@ import org.apache.hadoop.mapreduce.Job;
  */
 @SuppressWarnings("unchecked")
 public class TableMapReduceUtil {
-  
+
   /**
-   * Use this before submitting a TableMap job. It will appropriately set up 
+   * Use this before submitting a TableMap job. It will appropriately set up
    * the job.
-   * 
+   *
    * @param table  The table name to read from.
    * @param scan  The scan instance with the columns, time range etc.
    * @param mapper  The mapper class to use.
@@ -53,8 +53,8 @@ public class TableMapReduceUtil {
    * @throws IOException When setting up the details fails.
    */
   public static void initTableMapperJob(String table, Scan scan,
-      Class<? extends TableMapper> mapper, 
-      Class<? extends WritableComparable> outputKeyClass, 
+      Class<? extends TableMapper> mapper,
+      Class<? extends WritableComparable> outputKeyClass,
       Class<? extends Writable> outputValueClass, Job job) throws IOException {
     job.setInputFormatClass(TableInputFormat.class);
     if (outputValueClass != null) job.setMapOutputValueClass(outputValueClass);
@@ -67,13 +67,13 @@ public class TableMapReduceUtil {
 
   /**
    * Writes the given scan into a Base64 encoded string.
-   * 
+   *
    * @param scan  The scan to write out.
    * @return The scan saved in a Base64 encoded string.
    * @throws IOException When writing the scan fails.
    */
   static String convertScanToString(Scan scan) throws IOException {
-    ByteArrayOutputStream out = new ByteArrayOutputStream();  
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
     DataOutputStream dos = new DataOutputStream(out);
     scan.write(dos);
     return Base64.encodeBytes(out.toByteArray());
@@ -81,7 +81,7 @@ public class TableMapReduceUtil {
 
   /**
    * Converts the given Base64 string back into a Scan instance.
-   * 
+   *
    * @param base64  The scan details.
    * @return The newly created Scan instance.
    * @throws IOException When reading the scan instance fails.
@@ -93,15 +93,15 @@ public class TableMapReduceUtil {
     scan.readFields(dis);
     return scan;
   }
-  
+
   /**
    * Use this before submitting a TableReduce job. It will
    * appropriately set up the JobConf.
-   * 
+   *
    * @param table  The output table.
    * @param reducer  The reducer class to use.
    * @param job  The current job to adjust.
-   * @throws IOException When determining the region count fails. 
+   * @throws IOException When determining the region count fails.
    */
   public static void initTableReducerJob(String table,
     Class<? extends TableReducer> reducer, Job job)
@@ -112,13 +112,13 @@ public class TableMapReduceUtil {
   /**
    * Use this before submitting a TableReduce job. It will
    * appropriately set up the JobConf.
-   * 
+   *
    * @param table  The output table.
    * @param reducer  The reducer class to use.
    * @param job  The current job to adjust.
-   * @param partitioner  Partitioner to use. Pass <code>null</code> to use 
+   * @param partitioner  Partitioner to use. Pass <code>null</code> to use
    * default partitioner.
-   * @throws IOException When determining the region count fails. 
+   * @throws IOException When determining the region count fails.
    */
   public static void initTableReducerJob(String table,
     Class<? extends TableReducer> reducer, Job job, Class partitioner)
@@ -140,17 +140,17 @@ public class TableMapReduceUtil {
       job.setPartitionerClass(partitioner);
     }
   }
-  
+
   /**
-   * Ensures that the given number of reduce tasks for the given job 
-   * configuration does not exceed the number of regions for the given table. 
-   * 
+   * Ensures that the given number of reduce tasks for the given job
+   * configuration does not exceed the number of regions for the given table.
+   *
    * @param table  The table to get the region count for.
    * @param job  The current job to adjust.
    * @throws IOException When retrieving the table details fails.
    */
-  public static void limitNumReduceTasks(String table, Job job) 
-  throws IOException { 
+  public static void limitNumReduceTasks(String table, Job job)
+  throws IOException {
     HTable outputTable = new HTable(new HBaseConfiguration(
       job.getConfiguration()), table);
     int regions = outputTable.getRegionsInfo().size();
@@ -159,26 +159,26 @@ public class TableMapReduceUtil {
   }
 
   /**
-   * Sets the number of reduce tasks for the given job configuration to the 
-   * number of regions the given table has. 
-   * 
+   * Sets the number of reduce tasks for the given job configuration to the
+   * number of regions the given table has.
+   *
    * @param table  The table to get the region count for.
    * @param job  The current job to adjust.
    * @throws IOException When retrieving the table details fails.
    */
-  public static void setNumReduceTasks(String table, Job job) 
-  throws IOException { 
+  public static void setNumReduceTasks(String table, Job job)
+  throws IOException {
     HTable outputTable = new HTable(new HBaseConfiguration(
       job.getConfiguration()), table);
     int regions = outputTable.getRegionsInfo().size();
     job.setNumReduceTasks(regions);
   }
-  
+
   /**
    * Sets the number of rows to return and cache with each scanner iteration.
    * Higher caching values will enable faster mapreduce jobs at the expense of
    * requiring more heap to contain the cached rows.
-   * 
+   *
    * @param job The current job to adjust.
    * @param batchSize The number of rows to return in batch with each scanner
    * iteration.
@@ -186,5 +186,5 @@ public class TableMapReduceUtil {
   public static void setScannerCaching(Job job, int batchSize) {
     job.getConfiguration().setInt("hbase.client.scanner.caching", batchSize);
   }
-  
+
 }
\ No newline at end of file

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java Fri May  7 19:17:48 2010
@@ -24,9 +24,9 @@ import org.apache.hadoop.hbase.io.Immuta
 import org.apache.hadoop.mapreduce.Mapper;
 
 /**
- * Extends the base <code>Mapper</code> class to add the required input key 
+ * Extends the base <code>Mapper</code> class to add the required input key
  * and value classes.
- * 
+ *
  * @param <KEYOUT>  The type of the key.
  * @param <VALUEOUT>  The type of the value.
  * @see org.apache.hadoop.mapreduce.Mapper

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java Fri May  7 19:17:48 2010
@@ -36,9 +36,9 @@ import org.apache.hadoop.mapreduce.TaskA
 
 /**
  * Convert Map/Reduce output and write it to an HBase table. The KEY is ignored
- * while the output value <u>must</u> be either a {@link Put} or a 
- * {@link Delete} instance. 
- * 
+ * while the output value <u>must</u> be either a {@link Put} or a
+ * {@link Delete} instance.
+ *
  * @param <KEY>  The type of the key. Ignored in this class.
  */
 public class TableOutputFormat<KEY> extends OutputFormat<KEY, Writable> {
@@ -49,18 +49,18 @@ public class TableOutputFormat<KEY> exte
 
   /**
    * Writes the reducer output to an HBase table.
-   * 
+   *
    * @param <KEY>  The type of the key.
    */
-  protected static class TableRecordWriter<KEY> 
+  protected static class TableRecordWriter<KEY>
   extends RecordWriter<KEY, Writable> {
-    
+
     /** The table to write to. */
     private HTable table;
 
     /**
      * Instantiate a TableRecordWriter with the HBase HClient for writing.
-     * 
+     *
      * @param table  The table to write to.
      */
     public TableRecordWriter(HTable table) {
@@ -69,37 +69,37 @@ public class TableOutputFormat<KEY> exte
 
     /**
      * Closes the writer, in this case flush table commits.
-     * 
+     *
      * @param context  The context.
      * @throws IOException When closing the writer fails.
      * @see org.apache.hadoop.mapreduce.RecordWriter#close(org.apache.hadoop.mapreduce.TaskAttemptContext)
      */
     @Override
-    public void close(TaskAttemptContext context) 
+    public void close(TaskAttemptContext context)
     throws IOException {
       table.flushCommits();
     }
 
     /**
      * Writes a key/value pair into the table.
-     * 
+     *
      * @param key  The key.
      * @param value  The value.
      * @throws IOException When writing fails.
      * @see org.apache.hadoop.mapreduce.RecordWriter#write(java.lang.Object, java.lang.Object)
      */
     @Override
-    public void write(KEY key, Writable value) 
+    public void write(KEY key, Writable value)
     throws IOException {
       if (value instanceof Put) this.table.put(new Put((Put)value));
       else if (value instanceof Delete) this.table.delete(new Delete((Delete)value));
       else throw new IOException("Pass a Delete or a Put");
     }
   }
-  
+
   /**
    * Creates a new record writer.
-   * 
+   *
    * @param context  The current task context.
    * @return The newly created writer instance.
    * @throws IOException When creating the writer fails.
@@ -107,13 +107,13 @@ public class TableOutputFormat<KEY> exte
    * @see org.apache.hadoop.mapreduce.lib.output.FileOutputFormat#getRecordWriter(org.apache.hadoop.mapreduce.TaskAttemptContext)
    */
   public RecordWriter<KEY, Writable> getRecordWriter(
-    TaskAttemptContext context) 
+    TaskAttemptContext context)
   throws IOException, InterruptedException {
     // expecting exactly one path
     String tableName = context.getConfiguration().get(OUTPUT_TABLE);
     HTable table = null;
     try {
-      table = new HTable(new HBaseConfiguration(context.getConfiguration()), 
+      table = new HTable(new HBaseConfiguration(context.getConfiguration()),
         tableName);
     } catch(IOException e) {
       LOG.error(e);
@@ -125,9 +125,9 @@ public class TableOutputFormat<KEY> exte
 
   /**
    * Checks if the output target exists.
-   * 
+   *
    * @param context  The current context.
-   * @throws IOException When the check fails. 
+   * @throws IOException When the check fails.
    * @throws InterruptedException When the job is aborted.
    * @see org.apache.hadoop.mapreduce.OutputFormat#checkOutputSpecs(org.apache.hadoop.mapreduce.JobContext)
    */
@@ -135,12 +135,12 @@ public class TableOutputFormat<KEY> exte
   public void checkOutputSpecs(JobContext context) throws IOException,
       InterruptedException {
     // TODO Check if the table exists?
-    
+
   }
 
   /**
    * Returns the output committer.
-   *  
+   *
    * @param context  The current context.
    * @return The committer.
    * @throws IOException When creating the committer fails.
@@ -148,9 +148,9 @@ public class TableOutputFormat<KEY> exte
    * @see org.apache.hadoop.mapreduce.OutputFormat#getOutputCommitter(org.apache.hadoop.mapreduce.TaskAttemptContext)
    */
   @Override
-  public OutputCommitter getOutputCommitter(TaskAttemptContext context) 
+  public OutputCommitter getOutputCommitter(TaskAttemptContext context)
   throws IOException, InterruptedException {
     return new TableOutputCommitter();
   }
-  
+
 }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java Fri May  7 19:17:48 2010
@@ -24,16 +24,16 @@ import org.apache.hadoop.mapreduce.Reduc
 
 /**
  * Extends the basic <code>Reducer</code> class to add the required key and
- * value input/output classes. While the input key and value as well as the 
- * output key can be anything handed in from the previous map phase the output 
- * value <u>must</u> be either a {@link org.apache.hadoop.hbase.client.Put Put} 
+ * value input/output classes. While the input key and value as well as the
+ * output key can be anything handed in from the previous map phase the output
+ * value <u>must</u> be either a {@link org.apache.hadoop.hbase.client.Put Put}
  * or a {@link org.apache.hadoop.hbase.client.Delete Delete} instance when
  * using the {@link TableOutputFormat} class.
  * <p>
- * This class is extended by {@link IdentityTableReducer} but can also be 
+ * This class is extended by {@link IdentityTableReducer} but can also be
  * subclassed to implement similar features or any custom code needed. It has
- * the advantage to enforce the output value to a specific basic type. 
- * 
+ * the advantage to enforce the output value to a specific basic type.
+ *
  * @param <KEYIN>  The type of the input key.
  * @param <VALUEIN>  The type of the input value.
  * @param <KEYOUT>  The type of the output key.

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java Fri May  7 19:17:48 2010
@@ -32,9 +32,9 @@ import org.apache.hadoop.mapreduce.Input
  * A table split corresponds to a key range (low, high). All references to row
  * below refer to the key of the row.
  */
-public class TableSplit extends InputSplit 
+public class TableSplit extends InputSplit
 implements Writable, Comparable<TableSplit> {
-  
+
   private byte [] tableName;
   private byte [] startRow;
   private byte [] endRow;
@@ -48,7 +48,7 @@ implements Writable, Comparable<TableSpl
 
   /**
    * Creates a new instance while assigning all variables.
-   * 
+   *
    * @param tableName  The name of the current table.
    * @param startRow  The start row of the split.
    * @param endRow  The end row of the split.
@@ -64,8 +64,8 @@ implements Writable, Comparable<TableSpl
 
   /**
    * Returns the table name.
-   * 
-   * @return The table name. 
+   *
+   * @return The table name.
    */
   public byte [] getTableName() {
     return tableName;
@@ -73,26 +73,26 @@ implements Writable, Comparable<TableSpl
 
   /**
    * Returns the start row.
-   *  
+   *
    * @return The start row.
-   */ 
+   */
   public byte [] getStartRow() {
     return startRow;
   }
 
   /**
    * Returns the end row.
-   * 
-   * @return The end row. 
+   *
+   * @return The end row.
    */
   public byte [] getEndRow() {
     return endRow;
   }
 
-  /** 
+  /**
    * Returns the region location.
-   * 
-   * @return The region's location. 
+   *
+   * @return The region's location.
    */
   public String getRegionLocation() {
     return regionLocation;
@@ -100,7 +100,7 @@ implements Writable, Comparable<TableSpl
 
   /**
    * Returns the region's location as an array.
-   * 
+   *
    * @return The array containing the region location.
    * @see org.apache.hadoop.mapreduce.InputSplit#getLocations()
    */
@@ -111,7 +111,7 @@ implements Writable, Comparable<TableSpl
 
   /**
    * Returns the length of the split.
-   * 
+   *
    * @return The length of the split.
    * @see org.apache.hadoop.mapreduce.InputSplit#getLength()
    */
@@ -123,7 +123,7 @@ implements Writable, Comparable<TableSpl
 
   /**
    * Reads the values of each field.
-   * 
+   *
    * @param in  The input to read from.
    * @throws IOException When reading the input fails.
    */
@@ -137,7 +137,7 @@ implements Writable, Comparable<TableSpl
 
   /**
    * Writes the field values to the output.
-   * 
+   *
    * @param out  The output to write to.
    * @throws IOException When writing the values to the output fails.
    */
@@ -151,7 +151,7 @@ implements Writable, Comparable<TableSpl
 
   /**
    * Returns the details about this instance as a string.
-   * 
+   *
    * @return The values of this instance as a string.
    * @see java.lang.Object#toString()
    */
@@ -163,7 +163,7 @@ implements Writable, Comparable<TableSpl
 
   /**
    * Compares this split against the given one.
-   * 
+   *
    * @param split  The split to compare to.
    * @return The result of the comparison.
    * @see java.lang.Comparable#compareTo(java.lang.Object)
@@ -172,5 +172,5 @@ implements Writable, Comparable<TableSpl
   public int compareTo(TableSplit split) {
     return Bytes.compareTo(getStartRow(), split.getStartRow());
   }
-  
+
 }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/package-info.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/package-info.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/package-info.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/package-info.java Fri May  7 19:17:48 2010
@@ -97,7 +97,7 @@ below. If running the reduce step makes 
 to have lots of reducers so load is spread across the hbase cluster.</p>
 
 <p>There is also a new hbase partitioner that will run as many reducers as
-currently existing regions.  The 
+currently existing regions.  The
 {@link org.apache.hadoop.hbase.mapreduce.HRegionPartitioner} is suitable
 when your table is large and your upload is not such that it will greatly
 alter the number of existing regions when done; otherwise use the default
@@ -117,7 +117,7 @@ The row id must be formatted as a {@link
 value as a {@link org.apache.hadoop.hbase.KeyValue} (A KeyValue holds the value for a cell and
 its coordinates; row/family/qualifier/timestamp, etc.).  Note that you must
 specify a timestamp when you create the KeyValue in your map task
-otherwise the KeyValue will be created with the default LATEST_TIMESTAMP (Long.MAX_VALUE).  
+otherwise the KeyValue will be created with the default LATEST_TIMESTAMP (Long.MAX_VALUE).
 Use System.currentTimeMillis() if your data does not inherently bear a timestamp.
 Your reduce task
 will also need to emit the KeyValues in order.  See {@link org.apache.hadoop.hbase.mapreduce.KeyValueSortReducer}
@@ -134,7 +134,7 @@ Given the default hash Partitioner, if t
 had configured two reducers, reducer 0 would have get keys 0, 2 and 4 whereas
 reducer 1 would get keys 1 and 3 (in order).  For your bulk import to work,
 the keys need to be orderd so reducer 0 gets keys 0-2 and reducer 1 gets keys
-3-4 (See TotalOrderPartitioner up in hadoop for more on what this means). 
+3-4 (See TotalOrderPartitioner up in hadoop for more on what this means).
 To achieve total ordering, you will likely need to write a Partitioner
 that is intimate with your tables key namespace and that knows how
 to distribute keys among the reducers so a total order is maintained

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/AddColumn.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/AddColumn.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/AddColumn.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/AddColumn.java Fri May  7 19:17:48 2010
@@ -29,8 +29,8 @@ import org.apache.hadoop.hbase.ipc.HRegi
 class AddColumn extends ColumnOperation {
   private final HColumnDescriptor newColumn;
 
-  AddColumn(final HMaster master, final byte [] tableName, 
-    final HColumnDescriptor newColumn) 
+  AddColumn(final HMaster master, final byte [] tableName,
+    final HColumnDescriptor newColumn)
   throws IOException {
     super(master, tableName);
     this.newColumn = newColumn;

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/BaseScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/BaseScanner.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/BaseScanner.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/BaseScanner.java Fri May  7 19:17:48 2010
@@ -56,11 +56,11 @@ import org.apache.hadoop.ipc.RemoteExcep
 /**
  * Base HRegion scanner class. Holds utilty common to <code>ROOT</code> and
  * <code>META</code> HRegion scanners.
- * 
+ *
  * <p>How do we know if all regions are assigned? After the initial scan of
  * the <code>ROOT</code> and <code>META</code> regions, all regions known at
  * that time will have been or are in the process of being assigned.</p>
- * 
+ *
  * <p>When a region is split the region server notifies the master of the
  * split and the new regions are assigned. But suppose the master loses the
  * split message? We need to periodically rescan the <code>ROOT</code> and
@@ -69,34 +69,34 @@ import org.apache.hadoop.ipc.RemoteExcep
  *    <li>If we rescan, any regions that are new but not assigned will have
  *    no server info. Any regions that are not being served by the same
  *    server will get re-assigned.</li>
- *      
+ *
  *    <li>Thus a periodic rescan of the root region will find any new
  *    <code>META</code> regions where we missed the <code>META</code> split
  *    message or we failed to detect a server death and consequently need to
  *    assign the region to a new server.</li>
- *        
+ *
  *    <li>if we keep track of all the known <code>META</code> regions, then
  *    we can rescan them periodically. If we do this then we can detect any
  *    regions for which we missed a region split message.</li>
  *    </ul>
- *    
+ *
  * Thus just keeping track of all the <code>META</code> regions permits
  * periodic rescanning which will detect unassigned regions (new or
  * otherwise) without the need to keep track of every region.</p>
- * 
+ *
  * <p>So the <code>ROOT</code> region scanner needs to wake up:
  * <ol>
  * <li>when the master receives notification that the <code>ROOT</code>
  * region has been opened.</li>
  * <li>periodically after the first scan</li>
  * </ol>
- * 
+ *
  * The <code>META</code>  scanner needs to wake up:
  * <ol>
  * <li>when a <code>META</code> region comes on line</li>
  * </li>periodically to rescan the online <code>META</code> regions</li>
  * </ol>
- * 
+ *
  * <p>A <code>META</code> region is not 'online' until it has been scanned
  * once.
  */
@@ -120,16 +120,16 @@ abstract class BaseScanner extends Chore
   }
   private final boolean rootRegion;
   protected final HMaster master;
-  
+
   protected boolean initialScanComplete;
-  
+
   protected abstract boolean initialScan();
   protected abstract void maintenanceScan();
-  
-  // will use this variable to synchronize and make sure we aren't interrupted 
+
+  // will use this variable to synchronize and make sure we aren't interrupted
   // mid-scan
   final Object scannerLock = new Object();
-  
+
   BaseScanner(final HMaster master, final boolean rootRegion, final int period,
       final AtomicBoolean stop) {
     super(period, stop);
@@ -137,17 +137,17 @@ abstract class BaseScanner extends Chore
     this.master = master;
     this.initialScanComplete = false;
   }
-  
+
   /** @return true if initial scan completed successfully */
   public boolean isInitialScanComplete() {
     return initialScanComplete;
   }
-  
+
   @Override
   protected boolean initialChore() {
     return initialScan();
   }
-  
+
   @Override
   protected void chore() {
     maintenanceScan();
@@ -203,7 +203,7 @@ abstract class BaseScanner extends Chore
         e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
         if (e instanceof UnknownScannerException) {
           // Reset scannerId so we do not try closing a scanner the other side
-          // has lost account of: prevents duplicated stack trace out of the 
+          // has lost account of: prevents duplicated stack trace out of the
           // below close in the finally.
           scannerId = -1L;
         }
@@ -221,7 +221,7 @@ abstract class BaseScanner extends Chore
     }
 
     // Scan is finished.
-    
+
     // First clean up any meta region rows which had null HRegionInfos
     if (emptyRows.size() > 0) {
       LOG.warn("Found " + emptyRows.size() + " rows with empty HRegionInfo " +
@@ -282,7 +282,7 @@ abstract class BaseScanner extends Chore
    * the filesystem, then a daughters was not added to .META. -- must have been
    * a crash before their addition.  Add them here.
    * @param metaRegionName Meta region name: e.g. .META.,,1
-   * @param server HRegionInterface of meta server to talk to 
+   * @param server HRegionInterface of meta server to talk to
    * @param parent HRegionInfo of split offlined parent
    * @param rowContent Content of <code>parent</code> row in
    * <code>metaRegionName</code>
@@ -290,7 +290,7 @@ abstract class BaseScanner extends Chore
    * the filesystem.
    * @throws IOException
    */
-  private boolean cleanupAndVerifySplits(final byte [] metaRegionName, 
+  private boolean cleanupAndVerifySplits(final byte [] metaRegionName,
     final HRegionInterface srvr, final HRegionInfo parent,
     Result rowContent)
   throws IOException {
@@ -312,7 +312,7 @@ abstract class BaseScanner extends Chore
     return result;
   }
 
-  
+
   /*
    * See if the passed daughter has references in the filesystem to the parent
    * and if not, remove the note of daughter region in the parent row: its
@@ -328,7 +328,7 @@ abstract class BaseScanner extends Chore
    * @return True if this daughter still has references to the parent.
    * @throws IOException
    */
-  private boolean checkDaughter(final byte [] metaRegionName, 
+  private boolean checkDaughter(final byte [] metaRegionName,
     final HRegionInterface srvr, final HRegionInfo parent,
     final Result rowContent, final byte [] qualifier)
   throws IOException {
@@ -394,7 +394,7 @@ abstract class BaseScanner extends Chore
    * @param daughter
    * @throws IOException
    */
-  private void addDaughterRowChecked(final byte [] metaRegionName, 
+  private void addDaughterRowChecked(final byte [] metaRegionName,
     final HRegionInterface srvr, final byte [] parent,
     final HRegionInfo split, final byte [] daughter)
   throws IOException {
@@ -463,7 +463,7 @@ abstract class BaseScanner extends Chore
    * @param qualifier
    * @throws IOException
    */
-  private void removeDaughterFromParent(final byte [] metaRegionName, 
+  private void removeDaughterFromParent(final byte [] metaRegionName,
     final HRegionInterface srvr, final HRegionInfo parent,
     final HRegionInfo split, final byte [] qualifier)
   throws IOException {
@@ -476,20 +476,20 @@ abstract class BaseScanner extends Chore
     srvr.delete(metaRegionName, delete);
   }
 
-  /* 
+  /*
    * Checks if a daughter region -- either splitA or splitB -- still holds
    * references to parent.  If not, removes reference to the split from
    * the parent meta region row so we don't check it any more.
    * @param metaRegionName Name of meta region to look in.
    * @param srvr Where region resides.
-   * @param parent Parent region name. 
+   * @param parent Parent region name.
    * @param rowContent Keyed content of the parent row in meta region.
    * @param split Which column family.
    * @param qualifier Which of the daughters to look at, splitA or splitB.
    * @return True if still has references to parent.
    * @throws IOException
    */
-  private boolean hasReferences(final byte [] metaRegionName, 
+  private boolean hasReferences(final byte [] metaRegionName,
     final HRegionInterface srvr, final HRegionInfo parent,
     Result rowContent, final HRegionInfo split, byte [] qualifier)
   throws IOException {
@@ -534,7 +534,7 @@ abstract class BaseScanner extends Chore
    */
   protected void checkAssigned(final HRegionInterface regionServer,
     final MetaRegion meta, final HRegionInfo info,
-    final String serverAddress, final long startCode) 
+    final String serverAddress, final long startCode)
   throws IOException {
     String sa = serverAddress;
     long sc = startCode;

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java Fri May  7 19:17:48 2010
@@ -47,8 +47,8 @@ class ChangeTableState extends TableOper
     new TreeMap<String, HashSet<HRegionInfo>>();
   protected long lockid;
 
-  ChangeTableState(final HMaster master, final byte [] tableName, 
-    final boolean onLine) 
+  ChangeTableState(final HMaster master, final byte [] tableName,
+    final boolean onLine)
   throws IOException {
     super(master, tableName);
     this.online = onLine;

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java Fri May  7 19:17:48 2010
@@ -31,8 +31,8 @@ import org.apache.hadoop.hbase.util.Writ
 
 abstract class ColumnOperation extends TableOperation {
   private final Log LOG = LogFactory.getLog(this.getClass());
-  
-  protected ColumnOperation(final HMaster master, final byte [] tableName) 
+
+  protected ColumnOperation(final HMaster master, final byte [] tableName)
   throws IOException {
     super(master, tableName);
   }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java Fri May  7 19:17:48 2010
@@ -30,8 +30,8 @@ import org.apache.hadoop.hbase.regionser
 class DeleteColumn extends ColumnOperation {
   private final byte [] columnName;
 
-  DeleteColumn(final HMaster master, final byte [] tableName, 
-    final byte [] columnName) 
+  DeleteColumn(final HMaster master, final byte [] tableName,
+    final byte [] columnName)
   throws IOException {
     super(master, tableName);
     this.columnName = columnName;

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/HMaster.java Fri May  7 19:17:48 2010
@@ -95,12 +95,12 @@ import org.apache.zookeeper.Watcher.Even
 /**
  * HMaster is the "master server" for a HBase.
  * There is only one HMaster for a single HBase deployment.
- * 
+ *
  * NOTE: This class extends Thread rather than Chore because the sleep time
  * can be interrupted when there is something to do, rather than the Chore
  * sleep time which is invariant.
  */
-public class HMaster extends Thread implements HConstants, HMasterInterface, 
+public class HMaster extends Thread implements HConstants, HMasterInterface,
   HMasterRegionInterface, Watcher {
 
   static final Log LOG = LogFactory.getLog(HMaster.class.getName());
@@ -120,7 +120,7 @@ public class HMaster extends Thread impl
   private final HBaseConfiguration conf;
   final FileSystem fs;
   final Random rand;
-  final int threadWakeFrequency; 
+  final int threadWakeFrequency;
   final int numRetries;
   final long maxRegionOpenTime;
   final int leaseTimeout;
@@ -134,14 +134,14 @@ public class HMaster extends Thread impl
   final ServerConnection connection;
 
   final int metaRescanInterval;
-  
+
   // A Sleeper that sleeps for threadWakeFrequency
   private final Sleeper sleeper;
-  
+
   // Default access so accesible from unit tests. MASTER is name of the webapp
   // and the attribute name used stuffing this instance into web context.
   InfoServer infoServer;
-  
+
   /** Name of master server */
   public static final String MASTER = "master";
 
@@ -152,14 +152,14 @@ public class HMaster extends Thread impl
 
   ServerManager serverManager;
   RegionManager regionManager;
-  
+
   private MasterMetrics metrics;
   final Lock splitLogLock = new ReentrantLock();
 
-  /** 
+  /**
    * Build the HMaster out of a raw configuration item.
    * @param conf configuration
-   * 
+   *
    * @throws IOException
    */
   public HMaster(HBaseConfiguration conf) throws IOException {
@@ -167,7 +167,7 @@ public class HMaster extends Thread impl
     String addressStr = DNS.getDefaultHost(
         conf.get("hbase.master.dns.interface","default"),
         conf.get("hbase.master.dns.nameserver","default"));
-    addressStr += ":" + 
+    addressStr += ":" +
       conf.get(MASTER_PORT, Integer.toString(DEFAULT_MASTER_PORT));
     HServerAddress hsa = new HServerAddress(addressStr);
     LOG.info("My address is " + hsa);
@@ -196,7 +196,7 @@ public class HMaster extends Thread impl
     try {
       // Make sure the hbase root directory exists!
       if (!fs.exists(rootdir)) {
-        fs.mkdirs(rootdir); 
+        fs.mkdirs(rootdir);
         FSUtils.setVersion(fs, rootdir);
       } else {
         FSUtils.checkVersion(fs, rootdir, true);
@@ -218,7 +218,7 @@ public class HMaster extends Thread impl
     this.server = HBaseRPC.getServer(this, hsa.getBindAddress(),
       hsa.getPort(), conf.getInt("hbase.regionserver.handler.count", 10),
       false, conf);
-         
+
     //  The rpc-server port can be ephemeral... ensure we have the correct info
     this.address = new HServerAddress(server.getListenerAddress());
 
@@ -230,16 +230,16 @@ public class HMaster extends Thread impl
       conf.getInt("hbase.master.meta.thread.rescanfrequency", 60 * 1000);
 
     this.sleeper = new Sleeper(this.threadWakeFrequency, this.closed);
-    
+
     zooKeeperWrapper = new ZooKeeperWrapper(conf, this);
     zkMasterAddressWatcher = new ZKMasterAddressWatcher(this);
     serverManager = new ServerManager(this);
     regionManager = new RegionManager(this);
-    
+
     writeAddressToZooKeeper(true);
     this.regionServerOperationQueue =
       new RegionServerOperationQueue(this.conf, this.closed);
-    
+
     // We're almost open for business
     this.closed.set(false);
     LOG.info("HMaster initialized on " + this.address.toString());
@@ -330,7 +330,7 @@ public class HMaster extends Thread impl
   public HServerAddress getMasterAddress() {
     return address;
   }
-  
+
   /**
    * @return Hbase root dir.
    */
@@ -359,18 +359,18 @@ public class HMaster extends Thread impl
   public Map<String, HServerLoad> getServersToLoad() {
     return serverManager.getServersToLoad();
   }
-  
+
   /** @return The average load */
   public double getAverageLoad() {
     return serverManager.getAverageLoad();
   }
-  
+
   /** @return the number of regions on filesystem */
   public int countRegionsOnFS() {
     try {
       return regionManager.countRegionsOnFS();
     } catch (IOException e) {
-      LOG.warn("Get count of Regions on FileSystem error : " + 
+      LOG.warn("Get count of Regions on FileSystem error : " +
           StringUtils.stringifyException(e));
     }
     return -1;
@@ -386,14 +386,14 @@ public class HMaster extends Thread impl
     }
     return rootServer;
   }
-  
+
   /**
    * Wait until root region is available
    */
   public void waitForRootRegionLocation() {
     regionManager.waitForRootRegionLocation();
   }
-  
+
   /**
    * @return Read-only map of online regions.
    */
@@ -446,7 +446,7 @@ public class HMaster extends Thread impl
       LOG.fatal("Unhandled exception. Starting shutdown.", t);
       closed.set(true);
     }
-    
+
     // Wait for all the remaining region servers to report in.
     this.serverManager.letRegionServersShutdown();
 
@@ -473,14 +473,14 @@ public class HMaster extends Thread impl
   /*
    * Verifies if this instance of HBase is fresh or the master was started
    * following a failover. In the second case, it inspects the region server
-   * directory and gets their regions assignment. 
+   * directory and gets their regions assignment.
    */
   private void verifyClusterState()  {
     try {
       LOG.debug("Checking cluster state...");
       HServerAddress rootLocation = zooKeeperWrapper.readRootRegionLocation();
       List<HServerAddress> addresses =  zooKeeperWrapper.scanRSDirectory();
-      
+
       // Check if this is a fresh start of the cluster
       if(addresses.size() == 0) {
         LOG.debug("This is a fresh start, proceeding with normal startup");
@@ -489,13 +489,13 @@ public class HMaster extends Thread impl
       }
       LOG.info("This is a failover, ZK inspection begins...");
       boolean isRootRegionAssigned = false;
-      Map<byte[], HRegionInfo> assignedRegions = 
+      Map<byte[], HRegionInfo> assignedRegions =
         new HashMap<byte[], HRegionInfo>();
       // This is a failover case. We must:
       // - contact every region server to add them to the regionservers list
-      // - get their current regions assignment 
+      // - get their current regions assignment
       for (HServerAddress address : addresses) {
-        HRegionInterface hri = 
+        HRegionInterface hri =
           this.connection.getHRegionConnection(address, false);
         HServerInfo info = hri.getHServerInfo();
         LOG.debug("Inspection found server " + info.getName());
@@ -519,14 +519,14 @@ public class HMaster extends Thread impl
           assignedRegions.put(region.getRegionName(), region);
         }
       }
-      LOG.info("Inspection found " + assignedRegions.size() + " regions, " + 
+      LOG.info("Inspection found " + assignedRegions.size() + " regions, " +
           (isRootRegionAssigned ? "with -ROOT-" : "but -ROOT- was MIA"));
       splitLogAfterStartup();
     } catch(IOException ex) {
       ex.printStackTrace();
     }
   }
-  
+
   /**
    * Inspect the log directory to recover any log file without
    * and active region server.
@@ -630,7 +630,7 @@ public class HMaster extends Thread impl
     // Send back some config info
     return createConfigurationSubset();
   }
-  
+
   /**
    * @return Subset of configuration to pass initializing regionservers: e.g.
    * the filesystem to use and root directory to use.
@@ -642,7 +642,7 @@ public class HMaster extends Thread impl
     if (rsAddress != null) {
       mw.put(new Text("hbase.regionserver.address"), new Text(rsAddress));
     }
-    
+
     return addConfig(mw, "fs.default.name");
   }
 
@@ -651,7 +651,7 @@ public class HMaster extends Thread impl
     return mw;
   }
 
-  public HMsg[] regionServerReport(HServerInfo serverInfo, HMsg msgs[], 
+  public HMsg[] regionServerReport(HServerInfo serverInfo, HMsg msgs[],
     HRegionInfo[] mostLoadedRegions)
   throws IOException {
     return adornRegionServerAnswer(serverInfo,
@@ -661,7 +661,7 @@ public class HMaster extends Thread impl
   /**
    * Override if you'd add messages to return to regionserver <code>hsi</code>
    * @param messages Messages to add to
-   * @return Messages to return to 
+   * @return Messages to return to
    */
   protected HMsg [] adornRegionServerAnswer(final HServerInfo hsi,
       final HMsg [] msgs) {
@@ -683,7 +683,7 @@ public class HMaster extends Thread impl
   }
 
   public void createTable(HTableDescriptor desc, byte [][] splitKeys)
-  throws IOException {    
+  throws IOException {
     if (!isMasterRunning()) {
       throw new MasterNotRunningException();
     }
@@ -725,7 +725,7 @@ public class HMaster extends Thread impl
     }
   }
 
-  private synchronized void createTable(final HRegionInfo [] newRegions) 
+  private synchronized void createTable(final HRegionInfo [] newRegions)
   throws IOException {
     String tableName = newRegions[0].getTableDesc().getNameAsString();
     // 1. Check to see if table already exists. Get meta region where
@@ -733,7 +733,7 @@ public class HMaster extends Thread impl
     // for the table we want to create already exists, then table already
     // created. Throw already-exists exception.
     MetaRegion m = regionManager.getFirstMetaRegionForRegion(newRegions[0]);
-        
+
     byte [] metaRegionName = m.getRegionName();
     HRegionInterface srvr = connection.getHRegionConnection(m.getServer());
     byte[] firstRowInTable = Bytes.toBytes(tableName + ",,");
@@ -767,11 +767,11 @@ public class HMaster extends Thread impl
   }
 
   public void addColumn(byte [] tableName, HColumnDescriptor column)
-  throws IOException {    
+  throws IOException {
     new AddColumn(this, tableName, column).process();
   }
 
-  public void modifyColumn(byte [] tableName, byte [] columnName, 
+  public void modifyColumn(byte [] tableName, byte [] columnName,
     HColumnDescriptor descriptor)
   throws IOException {
     new ModifyColumn(this, tableName, columnName, descriptor).process();
@@ -898,7 +898,7 @@ public class HMaster extends Thread impl
     }
     return null;
   }
-  
+
   /**
    * Get row from meta table.
    * @param row
@@ -914,7 +914,7 @@ public class HMaster extends Thread impl
     get.addFamily(family);
     return srvr.get(meta.getRegionName(), get);
   }
-  
+
   /*
    * @param meta
    * @return Server connection to <code>meta</code> .META. region.
@@ -925,12 +925,12 @@ public class HMaster extends Thread impl
     return this.connection.getHRegionConnection(meta.getServer());
   }
 
-  public void modifyTable(final byte[] tableName, HConstants.Modify op, 
+  public void modifyTable(final byte[] tableName, HConstants.Modify op,
       Writable[] args)
     throws IOException {
     switch (op) {
     case TABLE_SET_HTD:
-      if (args == null || args.length < 1 || 
+      if (args == null || args.length < 1 ||
           !(args[0] instanceof HTableDescriptor))
         throw new IOException("SET_HTD request requires an HTableDescriptor");
       HTableDescriptor htd = (HTableDescriptor) args[0];
@@ -977,12 +977,12 @@ public class HMaster extends Thread impl
       if (args.length == 2) {
         servername = Bytes.toString(((ImmutableBytesWritable)args[1]).get());
       }
-      // Need hri 
+      // Need hri
       Result rr = getFromMETA(regionname, HConstants.CATALOG_FAMILY);
       HRegionInfo hri = getHRegionInfo(rr.getRow(), rr);
       if (servername == null) {
         // Get server from the .META. if it wasn't passed as argument
-        servername = 
+        servername =
           Bytes.toString(rr.getValue(CATALOG_FAMILY, SERVER_QUALIFIER));
       }
       // Take region out of the intransistions in case it got stuck there doing
@@ -1035,7 +1035,7 @@ public class HMaster extends Thread impl
   public HBaseConfiguration getConfiguration() {
     return this.conf;
   }
-  
+
   // TODO ryan rework this function
   /*
    * Get HRegionInfo from passed META map of row values.
@@ -1069,12 +1069,12 @@ public class HMaster extends Thread impl
   /*
    * When we find rows in a meta region that has an empty HRegionInfo, we
    * clean them up here.
-   * 
+   *
    * @param s connection to server serving meta region
    * @param metaRegionName name of the meta region we scanned
    * @param emptyRows the row keys that had empty HRegionInfos
    */
-  protected void deleteEmptyMetaRows(HRegionInterface s, 
+  protected void deleteEmptyMetaRows(HRegionInterface s,
       byte [] metaRegionName,
       List<byte []> emptyRows) {
     for (byte [] regionName: emptyRows) {
@@ -1089,7 +1089,7 @@ public class HMaster extends Thread impl
       }
     }
   }
-  
+
   /**
    * Get the ZK wrapper object
    * @return the zookeeper wrapper
@@ -1097,20 +1097,20 @@ public class HMaster extends Thread impl
   public ZooKeeperWrapper getZooKeeperWrapper() {
     return zooKeeperWrapper;
   }
-  
+
   /**
    * @see org.apache.zookeeper.Watcher#process(org.apache.zookeeper.WatchedEvent)
    */
   @Override
   public void process(WatchedEvent event) {
-    LOG.debug(("Got event " + event.getType() + 
+    LOG.debug(("Got event " + event.getType() +
         " with path " + event.getPath()));
-    // Master should kill itself if its session expired or if its 
+    // Master should kill itself if its session expired or if its
     // znode was deleted manually (usually for testing purposes)
-    if(event.getState() == KeeperState.Expired || 
-        (event.getType().equals(EventType.NodeDeleted) && 
+    if(event.getState() == KeeperState.Expired ||
+        (event.getType().equals(EventType.NodeDeleted) &&
             event.getPath().equals(
-                this.zooKeeperWrapper.getMasterElectionZNode())) 
+                this.zooKeeperWrapper.getMasterElectionZNode()))
                 && !shutdownRequested.get()) {
 
       LOG.info("Master lost its znode, trying to get a new one");
@@ -1133,7 +1133,7 @@ public class HMaster extends Thread impl
       }
     }
   }
-   
+
   /*
    * Main program
    */
@@ -1251,7 +1251,7 @@ public class HMaster extends Thread impl
       printUsageAndExit();
     }
   }
-  
+
   /**
    * Main program
    * @param args

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/MetaRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/MetaRegion.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/MetaRegion.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/MetaRegion.java Fri May  7 19:17:48 2010
@@ -42,7 +42,7 @@ public class MetaRegion implements Compa
     }
     this.regionInfo = regionInfo;
   }
-  
+
   @Override
   public String toString() {
     return "{server: " + this.server.toString() + ", regionname: " +
@@ -65,13 +65,13 @@ public class MetaRegion implements Compa
     return regionInfo.getStartKey();
   }
 
-  
+
   /** @return the endKey */
   public byte [] getEndKey() {
     return regionInfo.getEndKey();
   }
 
-  
+
   public HRegionInfo getRegionInfo() {
     return regionInfo;
   }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/MetaScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/MetaScanner.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/MetaScanner.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/MetaScanner.java Fri May  7 19:17:48 2010
@@ -30,24 +30,24 @@ import org.apache.hadoop.hbase.RemoteExc
 
 /**
  * MetaScanner <code>META</code> table.
- * 
+ *
  * When a <code>META</code> server comes on line, a MetaRegion object is
  * queued up by regionServerReport() and this thread wakes up.
  *
- * It's important to do this work in a separate thread, or else the blocking 
+ * It's important to do this work in a separate thread, or else the blocking
  * action would prevent other work from getting done.
  */
 class MetaScanner extends BaseScanner {
   /** Initial work for the meta scanner is queued up here */
   private volatile BlockingQueue<MetaRegion> metaRegionsToScan =
     new LinkedBlockingQueue<MetaRegion>();
-    
+
   private final List<MetaRegion> metaRegionsToRescan =
     new ArrayList<MetaRegion>();
-    
+
   /**
    * Constructor
-   * 
+   *
    * @param master
    */
   public MetaScanner(HMaster master) {
@@ -88,7 +88,7 @@ class MetaScanner extends BaseScanner {
       // Make sure the file system is still available
       this.master.checkFileSystem();
     } catch (Exception e) {
-      // If for some reason we get some other kind of exception, 
+      // If for some reason we get some other kind of exception,
       // at least log it rather than go out silently.
       LOG.error("Unexpected exception", e);
     }
@@ -102,7 +102,7 @@ class MetaScanner extends BaseScanner {
         (region == null && metaRegionsToScan.size() > 0) &&
           !metaRegionsScanned()) {
       try {
-        region = metaRegionsToScan.poll(this.master.threadWakeFrequency, 
+        region = metaRegionsToScan.poll(this.master.threadWakeFrequency,
           TimeUnit.MILLISECONDS);
       } catch (InterruptedException e) {
         // continue
@@ -134,7 +134,7 @@ class MetaScanner extends BaseScanner {
   }
 
   /*
-   * Called by the meta scanner when it has completed scanning all meta 
+   * Called by the meta scanner when it has completed scanning all meta
    * regions. This wakes up any threads that were waiting for this to happen.
    * @param totalRows Total rows scanned.
    * @param regionCount Count of regions in  .META. table.
@@ -171,10 +171,10 @@ class MetaScanner extends BaseScanner {
     }
     return this.master.closed.get();
   }
-  
+
   /**
    * Add another meta region to scan to the queue.
-   */ 
+   */
   void addMetaRegionToScan(MetaRegion m) {
     metaRegionsToScan.add(m);
   }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ModifyColumn.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ModifyColumn.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ModifyColumn.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ModifyColumn.java Fri May  7 19:17:48 2010
@@ -29,9 +29,9 @@ import org.apache.hadoop.hbase.HRegionIn
 class ModifyColumn extends ColumnOperation {
   private final HColumnDescriptor descriptor;
   private final byte [] columnName;
-  
-  ModifyColumn(final HMaster master, final byte [] tableName, 
-    final byte [] columnName, HColumnDescriptor descriptor) 
+
+  ModifyColumn(final HMaster master, final byte [] tableName,
+    final byte [] columnName, HColumnDescriptor descriptor)
   throws IOException {
     super(master, tableName);
     this.descriptor = descriptor;
@@ -47,7 +47,7 @@ class ModifyColumn extends ColumnOperati
         updateRegionInfo(server, m.getRegionName(), i);
       } else { // otherwise, we have an error.
         throw new InvalidColumnNameException("Column family '" +
-          Bytes.toString(columnName) + 
+          Bytes.toString(columnName) +
           "' doesn't exist, so cannot be modified.");
       }
     }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java Fri May  7 19:17:48 2010
@@ -39,8 +39,8 @@ class ModifyTableMeta extends TableOpera
 
   private HTableDescriptor desc;
 
-  ModifyTableMeta(final HMaster master, final byte [] tableName, 
-    HTableDescriptor desc) 
+  ModifyTableMeta(final HMaster master, final byte [] tableName,
+    HTableDescriptor desc)
   throws IOException {
     super(master, tableName);
     this.desc = desc;

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java Fri May  7 19:17:48 2010
@@ -26,10 +26,10 @@ import org.apache.hadoop.hbase.HRegionIn
 
 /**
  * ProcessRegionClose is the way we do post-processing on a closed region. We
- * only spawn one of these asynchronous tasks when the region needs to be 
+ * only spawn one of these asynchronous tasks when the region needs to be
  * either offlined or deleted. We used to create one of these tasks whenever
  * a region was closed, but since closing a region that isn't being offlined
- * or deleted doesn't actually require post processing, it's no longer 
+ * or deleted doesn't actually require post processing, it's no longer
  * necessary.
  */
 class ProcessRegionClose extends ProcessRegionStatusChange {
@@ -42,7 +42,7 @@ class ProcessRegionClose extends Process
   * @param offlineRegion if true, set the region to offline in meta
   * @param reassignRegion if true, region is to be reassigned
   */
-  public ProcessRegionClose(HMaster master, HRegionInfo regionInfo, 
+  public ProcessRegionClose(HMaster master, HRegionInfo regionInfo,
       boolean offlineRegion, boolean reassignRegion) {
 
    super(master, regionInfo);
@@ -73,7 +73,7 @@ class ProcessRegionClose extends Process
 
             // We can't proceed unless the meta region we are going to update
             // is online. metaRegionAvailable() will put this operation on the
-            // delayedToDoQueue, so return true so the operation is not put 
+            // delayedToDoQueue, so return true so the operation is not put
             // back on the toDoQueue
 
             if(offlineRegion) {

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java Fri May  7 19:17:48 2010
@@ -28,9 +28,9 @@ import org.apache.hadoop.hbase.client.Pu
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.util.Bytes;
 
-/** 
+/**
  * ProcessRegionOpen is instantiated when a region server reports that it is
- * serving a region. This applies to all meta and user regions except the 
+ * serving a region. This applies to all meta and user regions except the
  * root region which is handled specially.
  */
 class ProcessRegionOpen extends ProcessRegionStatusChange {

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java Fri May  7 19:17:48 2010
@@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.HRegionInfo;
 
 /**
- * Abstract class that performs common operations for 
+ * Abstract class that performs common operations for
  * @see #ProcessRegionClose and @see #ProcessRegionOpen
  */
 abstract class ProcessRegionStatusChange extends RegionServerOperation {
@@ -41,7 +41,7 @@ abstract class ProcessRegionStatusChange
     this.regionInfo = regionInfo;
     this.isMetaTable = regionInfo.isMetaTable();
   }
-  
+
   protected boolean metaRegionAvailable() {
     boolean available = true;
     if (isMetaTable) {

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java Fri May  7 19:17:48 2010
@@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.master.RegionManager.RegionState;
 
-/** 
+/**
  * Instantiated when a server's lease has expired, meaning it has crashed.
  * The region server's log file needs to be split up for each region it was
  * serving, and the regions need to get reassigned.
@@ -117,19 +117,19 @@ class ProcessServerShutdown extends Regi
     return this.deadServerAddress;
   }
 
-  private void closeRegionsInTransition() {    
+  private void closeRegionsInTransition() {
     Map<String, RegionState> inTransition =
       master.regionManager.getRegionsInTransitionOnServer(deadServer);
     for (Map.Entry<String, RegionState> entry : inTransition.entrySet()) {
       String regionName = entry.getKey();
       RegionState state = entry.getValue();
-      
+
       LOG.info("Region " + regionName + " was in transition " +
           state + " on dead server " + deadServer + " - marking unassigned");
       master.regionManager.setUnassigned(state.getRegionInfo(), true);
     }
   }
-  
+
   @Override
   public String toString() {
     return "ProcessServerShutdown of " + this.deadServer;
@@ -225,7 +225,7 @@ class ProcessServerShutdown extends Regi
     }
 
     // Scan complete. Remove any rows which had empty HRegionInfos
-    
+
     if (emptyRows.size() > 0) {
       LOG.warn("Found " + emptyRows.size() +
         " rows with empty HRegionInfo while scanning meta region " +
@@ -269,7 +269,7 @@ class ProcessServerShutdown extends Regi
     ScanMetaRegions(MetaRegion m, HMaster master) {
       super(m, master);
     }
-    
+
     public Boolean call() throws IOException {
       if (LOG.isDebugEnabled()) {
         LOG.debug("process server shutdown scanning " +
@@ -289,9 +289,9 @@ class ProcessServerShutdown extends Regi
     LOG.info("process shutdown of server " + this.deadServer +
       ": logSplit: " +
       logSplit + ", rootRescanned: " + rootRescanned +
-      ", numberOfMetaRegions: " + 
+      ", numberOfMetaRegions: " +
       master.regionManager.numMetaRegions() +
-      ", onlineMetaRegions.size(): " + 
+      ", onlineMetaRegions.size(): " +
       master.regionManager.numOnlineMetaRegions());
     if (!logSplit) {
       // Process the old log file
@@ -345,7 +345,7 @@ class ProcessServerShutdown extends Regi
 
       if (LOG.isDebugEnabled()) {
         LOG.debug("process server shutdown scanning root region on " +
-          master.getRootRegionLocation().getBindAddress() + 
+          master.getRootRegionLocation().getBindAddress() +
           " finished " + Thread.currentThread().getName());
       }
       rootRescanned = true;
@@ -368,7 +368,7 @@ class ProcessServerShutdown extends Regi
           Bytes.toString(r.getRegionName()) + " on " + r.getServer());
       }
     }
-    
+
     closeRegionsInTransition();
 
     // Remove this server from dead servers list.  Finished splitting logs.