You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2010/08/27 07:01:07 UTC

svn commit: r990018 [2/10] - in /hbase/branches/0.90_master_rewrite: ./ bin/ bin/replication/ src/assembly/ src/docbkx/ src/main/java/org/apache/hadoop/hbase/ src/main/java/org/apache/hadoop/hbase/client/ src/main/java/org/apache/hadoop/hbase/filter/ s...

Added: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java?rev=990018&view=auto
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java (added)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java Fri Aug 27 05:01:02 2010
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.filter;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.DataInput;
+
+/**
+ * This filter is used for selecting only those keys with columns that matches
+ * a particular prefix. For example, if prefix is 'an', it will pass keys will
+ * columns like 'and', 'anti' but not keys with columns like 'ball', 'act'.
+ */
+public class ColumnPrefixFilter extends FilterBase {
+  protected byte [] prefix = null;
+
+  public ColumnPrefixFilter() {
+    super();
+  }
+
+  public ColumnPrefixFilter(final byte [] prefix) {
+    this.prefix = prefix;
+  }
+
+  public byte[] getPrefix() {
+    return prefix;
+  }
+
+  @Override
+  public ReturnCode filterKeyValue(KeyValue kv) {
+    if (this.prefix == null || kv.getBuffer() == null) {
+      return ReturnCode.INCLUDE;
+    } else {
+      return filterColumn(kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength());
+    }
+  }
+
+  public ReturnCode filterColumn(byte[] buffer, int qualifierOffset, int qualifierLength) {
+    if (qualifierLength < prefix.length) {
+      int cmp = Bytes.compareTo(buffer, qualifierOffset, qualifierLength, this.prefix, 0,
+          qualifierLength);
+      if (cmp <= 0) {
+        return ReturnCode.SEEK_NEXT_USING_HINT;
+      } else {
+        return ReturnCode.NEXT_ROW;
+      }
+    } else {
+      int cmp = Bytes.compareTo(buffer, qualifierOffset, this.prefix.length, this.prefix, 0,
+          this.prefix.length);
+      if (cmp < 0) {
+        return ReturnCode.SEEK_NEXT_USING_HINT;
+      } else if (cmp > 0) {
+        return ReturnCode.NEXT_ROW;
+      } else {
+        return ReturnCode.INCLUDE;
+      }
+    }
+  }
+
+  public void write(DataOutput out) throws IOException {
+    Bytes.writeByteArray(out, this.prefix);
+  }
+
+  public void readFields(DataInput in) throws IOException {
+    this.prefix = Bytes.readByteArray(in);
+  }
+
+  public KeyValue getNextKeyHint(KeyValue kv) {
+    return KeyValue.createFirstOnRow(
+        kv.getBuffer(), kv.getRowOffset(), kv.getRowLength(), kv.getBuffer(),
+        kv.getFamilyOffset(), kv.getFamilyLength(), prefix, 0, prefix.length);
+  }
+}
\ No newline at end of file

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/Filter.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/Filter.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/Filter.java Fri Aug 27 05:01:02 2010
@@ -102,11 +102,19 @@ public interface Filter extends Writable
      */
     SKIP,
     /**
+     * Skip this column. Go to the next column in this row.
+     */
+    NEXT_COL,
+    /**
      * Done with columns, skip to next row. Note that filterRow() will
      * still be called.
      */
     NEXT_ROW,
-  }
+    /**
+     * Seek to next key which is given as hint by the filter.
+     */
+    SEEK_NEXT_USING_HINT,
+}
 
   /**
    * Chance to alter the list of keyvalues to be submitted.
@@ -132,4 +140,13 @@ public interface Filter extends Writable
    */
   public boolean filterRow();
 
+  /**
+   * If the filter returns the match code SEEK_NEXT_USING_HINT, then
+   * it should also tell which is the next key it must seek to.
+   * After receiving the match code SEEK_NEXT_USING_HINT, the QueryMatcher would
+   * call this function to find out which key it must next seek to.
+   * @return KeyValue which must be next seeked. return null if the filter is
+   * not sure which key to seek to next.
+   */
+  public KeyValue getNextKeyHint(KeyValue currentKV);
 }
\ No newline at end of file

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java Fri Aug 27 05:01:02 2010
@@ -110,4 +110,15 @@ public abstract class FilterBase impleme
   public boolean filterRow() {
     return false;
   }
+
+  /**
+   * Filters that are not sure which key must be next seeked to, can inherit
+   * this implementation that, by default, returns a null KeyValue.
+   *
+   * @inheritDoc
+   */
+  public KeyValue getNextKeyHint(KeyValue currentKV) {
+    return null;
+  }
+
 }

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java Fri Aug 27 05:01:02 2010
@@ -245,4 +245,9 @@ public class FilterList implements Filte
       HbaseObjectWritable.writeObject(out, filter, Writable.class, conf);
     }
   }
+
+  @Override
+  public KeyValue getNextKeyHint(KeyValue currentKV) {
+    return null;
+  }
 }
\ No newline at end of file

Added: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java?rev=990018&view=auto
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java (added)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java Fri Aug 27 05:01:02 2010
@@ -0,0 +1,91 @@
+package org.apache.hadoop.hbase.filter;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.List;
+import java.util.TreeSet;
+
+import org.apache.hadoop.hbase.KeyValue;
+
+/**
+ * Filter that returns only cells whose timestamp (version) is
+ * in the specified list of timestamps (versions).
+ * <p>
+ * Note: Use of this filter overrides any time range/time stamp
+ * options specified using {@link Get#setTimeRange(long, long)},
+ * {@link Scan#setTimeRange(long, long)}, {@link Get#setTimeStamp(long)},
+ * or {@link Scan#setTimeStamp(long)}.
+ */
+public class TimestampsFilter extends FilterBase {
+
+  TreeSet<Long> timestamps;
+
+  // Used during scans to hint the scan to stop early
+  // once the timestamps fall below the minTimeStamp.
+  long minTimeStamp = Long.MAX_VALUE;
+
+  /**
+   * Used during deserialization. Do not use otherwise.
+   */
+  public TimestampsFilter() {
+    super();
+  }
+
+  /**
+   * Constructor for filter that retains only those
+   * cells whose timestamp (version) is in the specified
+   * list of timestamps.
+   *
+   * @param timestamps
+   */
+  public TimestampsFilter(List<Long> timestamps) {
+    this.timestamps = new TreeSet<Long>(timestamps);
+    init();
+  }
+
+  private void init() {
+    if (this.timestamps.size() > 0) {
+      minTimeStamp = this.timestamps.first();
+    }
+  }
+
+  /**
+   * Gets the minimum timestamp requested by filter.
+   * @return  minimum timestamp requested by filter.
+   */
+  public long getMin() {
+    return minTimeStamp;
+  }
+
+  @Override
+  public ReturnCode filterKeyValue(KeyValue v) {
+    if (this.timestamps.contains(v.getTimestamp())) {
+      return ReturnCode.INCLUDE;
+    } else if (v.getTimestamp() < minTimeStamp) {
+      // The remaining versions of this column are guaranteed
+      // to be lesser than all of the other values.
+      return ReturnCode.NEXT_COL;
+    }
+    return ReturnCode.SKIP;
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    int numTimestamps = in.readInt();
+    this.timestamps = new TreeSet<Long>();
+    for (int idx = 0; idx < numTimestamps; idx++) {
+      this.timestamps.add(in.readLong());
+    }
+    init();
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    int numTimestamps = this.timestamps.size();
+    out.writeInt(numTimestamps);
+    for (Long timestamp : this.timestamps) {
+      out.writeLong(timestamp);
+    }
+  }
+}

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java Fri Aug 27 05:01:02 2010
@@ -201,6 +201,37 @@ public class HalfStoreFileReader extends
         return delegate.seekTo(key, offset, length);
       }
 
+      @Override
+      public int reseekTo(byte[] key) throws IOException {
+        return reseekTo(key, 0, key.length);
+      }
+
+      @Override
+      public int reseekTo(byte[] key, int offset, int length)
+      throws IOException {
+        //This function is identical to the corresponding seekTo function except
+        //that we call reseekTo (and not seekTo) on the delegate.
+        if (top) {
+          if (getComparator().compare(key, offset, length, splitkey, 0,
+              splitkey.length) < 0) {
+            return -1;
+          }
+        } else {
+          if (getComparator().compare(key, offset, length, splitkey, 0,
+              splitkey.length) >= 0) {
+            // we would place the scanner in the second half.
+            // it might be an error to return false here ever...
+            boolean res = delegate.seekBefore(splitkey, 0, splitkey.length);
+            if (!res) {
+              throw new IOException("Seeking for a key in bottom of file, but" +
+                  " key exists in top of file, failed on seekBefore(midkey)");
+            }
+            return 1;
+          }
+        }
+        return delegate.reseekTo(key, offset, length);
+      }
+
       public org.apache.hadoop.hbase.io.hfile.HFile.Reader getReader() {
         return this.delegate.getReader();
       }

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java Fri Aug 27 05:01:02 2010
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.client.Re
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
+import org.apache.hadoop.hbase.filter.ColumnPrefixFilter;
 import org.apache.hadoop.hbase.filter.CompareFilter;
 import org.apache.hadoop.hbase.filter.DependentColumnFilter;
 import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
@@ -184,8 +185,9 @@ public class HbaseObjectWritable impleme
 
     // List
     addToMap(List.class, code++);
-    //
+
     addToMap(NavigableSet.class, code++);
+    addToMap(ColumnPrefixFilter.class, code++);
   }
 
   private Class<?> declaredClass;
@@ -464,13 +466,19 @@ public class HbaseObjectWritable impleme
         try {
           instanceClass = getClassByName(conf, className);
         } catch (ClassNotFoundException e) {
-          throw new RuntimeException("Can't find class " + className);
+          LOG.error("Can't find class " + className, e);
+          throw new IOException("Can't find class " + className, e);
         }
       } else {
         instanceClass = CODE_TO_CLASS.get(b);
       }
       Writable writable = WritableFactories.newInstance(instanceClass, conf);
-      writable.readFields(in);
+      try {
+        writable.readFields(in);
+      } catch (Exception e) {
+        LOG.error("Error in readFields", e);
+        throw new IOException("Error in readFields" , e);
+      }
       instance = writable;
       if (instanceClass == NullInstance.class) {  // null
         declaredClass = ((NullInstance)instance).declaredClass;
@@ -509,4 +517,4 @@ public class HbaseObjectWritable impleme
   public Configuration getConf() {
     return this.conf;
   }
-}
\ No newline at end of file
+}

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java Fri Aug 27 05:01:02 2010
@@ -147,6 +147,23 @@ public class TimeRange implements Writab
     return (timestamp >= minStamp);
   }
 
+  /**
+   * Compare the timestamp to timerange
+   * @param timestamp
+   * @return -1 if timestamp is less than timerange,
+   * 0 if timestamp is within timerange,
+   * 1 if timestamp is greater than timerange
+   */
+  public int compare(long timestamp) {
+    if (timestamp < minStamp) {
+      return -1;
+    } else if (timestamp >= maxStamp) {
+      return 1;
+    } else {
+      return 0;
+    }
+  }
+
   @Override
   public String toString() {
     StringBuilder sb = new StringBuilder();

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java Fri Aug 27 05:01:02 2010
@@ -108,5 +108,4 @@ public class CachedBlock implements Heap
   public BlockPriority getPriority() {
     return this.priority;
   }
-}
-
+}
\ No newline at end of file

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java Fri Aug 27 05:01:02 2010
@@ -84,15 +84,14 @@ public class CachedBlockQueue implements
   }
 
   /**
-   * Get a sorted List of all elements in this queue, in descending order.
-   * @return list of cached elements in descending order
+   * @return a sorted List of all elements in this queue, in descending order
    */
-  public CachedBlock [] get() {
+  public LinkedList<CachedBlock> get() {
     LinkedList<CachedBlock> blocks = new LinkedList<CachedBlock>();
-    while(!queue.isEmpty()) {
+    while (!queue.isEmpty()) {
       blocks.addFirst(queue.poll());
     }
-    return blocks.toArray(new CachedBlock[blocks.size()]);
+    return blocks;
   }
 
   /**

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java Fri Aug 27 05:01:02 2010
@@ -30,7 +30,6 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
-import java.util.SortedSet;
 
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
@@ -1256,13 +1255,37 @@ public class HFile {
         return seekTo(key, 0, key.length);
       }
 
-
       public int seekTo(byte[] key, int offset, int length) throws IOException {
         int b = reader.blockContainingKey(key, offset, length);
         if (b < 0) return -1; // falls before the beginning of the file! :-(
         // Avoid re-reading the same block (that'd be dumb).
-        loadBlock(b);
+        loadBlock(b, true);
+        return blockSeek(key, offset, length, false);
+      }
+
+      public int reseekTo(byte [] key) throws IOException {
+        return reseekTo(key, 0, key.length);
+      }
+
+      public int reseekTo(byte[] key, int offset, int length)
+        throws IOException {
+
+        if (this.block != null && this.currKeyLen != 0) {
+          ByteBuffer bb = getKey();
+          int compared = this.reader.comparator.compare(key, offset, length,
+              bb.array(), bb.arrayOffset(), bb.limit());
+          if (compared < 1) {
+            //If the required key is less than or equal to current key, then
+            //don't do anything.
+            return compared;
+          }
+        }
 
+        int b = reader.blockContainingKey(key, offset, length);
+        if (b < 0) {
+          return -1;
+        }
+        loadBlock(b, false);
         return blockSeek(key, offset, length, false);
       }
 
@@ -1336,7 +1359,7 @@ public class HFile {
           b--;
           // TODO shortcut: seek forward in this block to the last key of the block.
         }
-        loadBlock(b);
+        loadBlock(b, true);
         blockSeek(key, offset, length, true);
         return true;
       }
@@ -1377,7 +1400,7 @@ public class HFile {
         return true;
       }
 
-      private void loadBlock(int bloc) throws IOException {
+      private void loadBlock(int bloc, boolean rewind) throws IOException {
         if (block == null) {
           block = reader.readBlock(bloc, this.cacheBlocks, this.pread);
           currBlock = bloc;
@@ -1389,7 +1412,13 @@ public class HFile {
             blockFetches++;
           } else {
             // we are already in the same block, just rewind to seek again.
-            block.rewind();
+            if (rewind) {
+              block.rewind();
+            }
+            else {
+              //Go back by (size of rowlength + size of valuelength) = 8 bytes
+              block.position(block.position()-8);
+            }
           }
         }
       }

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java Fri Aug 27 05:01:02 2010
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.io.hfile
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
-import java.util.SortedSet;
 
 import org.apache.hadoop.hbase.KeyValue;
 
@@ -47,13 +46,38 @@ public interface HFileScanner {
    * @return -1, if key < k[0], no position;
    * 0, such that k[i] = key and scanner is left in position i; and
    * 1, such that k[i] < key, and scanner is left in position i.
-   * Furthermore, there may be a k[i+1], such that k[i] < key < k[i+1]
-   * but there may not be a k[i+1], and next() will return false (EOF).
+   * The scanner will position itself between k[i] and k[i+1] where
+   * k[i] < key <= k[i+1].
+   * If there is no key k[i+1] greater than or equal to the input key, then the
+   * scanner will position itself at the end of the file and next() will return
+   * false when it is called.
    * @throws IOException
    */
   public int seekTo(byte[] key) throws IOException;
   public int seekTo(byte[] key, int offset, int length) throws IOException;
   /**
+   * Reseek to or just before the passed <code>key</code>. Similar to seekTo
+   * except that this can be called even if the scanner is not at the beginning
+   * of a file.
+   * This can be used to seek only to keys which come after the current position
+   * of the scanner.
+   * Consider the key stream of all the keys in the file,
+   * <code>k[0] .. k[n]</code>, where there are n keys in the file after
+   * current position of HFileScanner.
+   * The scanner will position itself between k[i] and k[i+1] where
+   * k[i] < key <= k[i+1].
+   * If there is no key k[i+1] greater than or equal to the input key, then the
+   * scanner will position itself at the end of the file and next() will return
+   * false when it is called.
+   * @param key Key to find (should be non-null)
+   * @return -1, if key < k[0], no position;
+   * 0, such that k[i] = key and scanner is left in position i; and
+   * 1, such that k[i] < key, and scanner is left in position i.
+   * @throws IOException
+   */
+  public int reseekTo(byte[] key) throws IOException;
+  public int reseekTo(byte[] key, int offset, int length) throws IOException;
+  /**
    * Consider the key stream of all the keys in the file,
    * <code>k[0] .. k[n]</code>, where there are n keys in the file.
    * @param key Key to find

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java Fri Aug 27 05:01:02 2010
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.io.hfile
 
 import java.lang.ref.WeakReference;
 import java.nio.ByteBuffer;
+import java.util.LinkedList;
 import java.util.PriorityQueue;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.ReentrantLock;
@@ -34,6 +35,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
+import org.apache.hadoop.util.StringUtils;
 
 /**
  * A block cache implementation that is memory-aware using {@link HeapSize},
@@ -93,7 +95,7 @@ public class LruBlockCache implements Bl
   static final float DEFAULT_MEMORY_FACTOR = 0.25f;
 
   /** Statistics thread */
-  static final int statThreadPeriod = 60;
+  static final int statThreadPeriod = 60 * 5;
 
   /** Concurrent map (the cache) */
   private final ConcurrentHashMap<String,CachedBlock> map;
@@ -317,11 +319,14 @@ public class LruBlockCache implements Bl
 
     try {
       evictionInProgress = true;
+      long currentSize = this.size.get();
+      long bytesToFree = currentSize - minSize();
 
-      long bytesToFree = size.get() - minSize();
-
-      LOG.debug("Block cache LRU eviction started.  Attempting to free " +
-          bytesToFree + " bytes");
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Block cache LRU eviction started; Attempting to free " +
+          StringUtils.byteDesc(bytesToFree) + " of total=" +
+          StringUtils.byteDesc(currentSize));
+      }
 
       if(bytesToFree <= 0) return;
 
@@ -372,17 +377,17 @@ public class LruBlockCache implements Bl
         remainingBuckets--;
       }
 
-      float singleMB = ((float)bucketSingle.totalSize())/((float)(1024*1024));
-      float multiMB = ((float)bucketMulti.totalSize())/((float)(1024*1024));
-      float memoryMB = ((float)bucketMemory.totalSize())/((float)(1024*1024));
-
-      LOG.debug("Block cache LRU eviction completed. " +
-          "Freed " + bytesFreed + " bytes.  " +
-          "Priority Sizes: " +
-          "Single=" + singleMB + "MB (" + bucketSingle.totalSize() + "), " +
-          "Multi=" + multiMB + "MB (" + bucketMulti.totalSize() + ")," +
-          "Memory=" + memoryMB + "MB (" + bucketMemory.totalSize() + ")");
-
+      if (LOG.isDebugEnabled()) {
+        long single = bucketSingle.totalSize();
+        long multi = bucketMulti.totalSize();
+        long memory = bucketMemory.totalSize();
+        LOG.debug("Block cache LRU eviction completed; " +
+          "freed=" + StringUtils.byteDesc(bytesFreed) + ", " +
+          "total=" + StringUtils.byteDesc(this.size.get()) + ", " +
+          "single=" + StringUtils.byteDesc(single) + ", " +
+          "multi=" + StringUtils.byteDesc(multi) + ", " +
+          "memory=" + StringUtils.byteDesc(memory));
+      }
     } finally {
       stats.evict();
       evictionInProgress = false;
@@ -414,10 +419,10 @@ public class LruBlockCache implements Bl
     }
 
     public long free(long toFree) {
-      CachedBlock [] blocks = queue.get();
+      LinkedList<CachedBlock> blocks = queue.get();
       long freedBytes = 0;
-      for(int i=0; i<blocks.length; i++) {
-        freedBytes += evictBlock(blocks[i]);
+      for(CachedBlock cb: blocks) {
+        freedBytes += evictBlock(cb);
         if(freedBytes >= toFree) {
           return freedBytes;
         }
@@ -524,7 +529,7 @@ public class LruBlockCache implements Bl
   /*
    * Statistics thread.  Periodically prints the cache statistics to the log.
    */
-  private static class StatisticsThread extends Thread {
+  static class StatisticsThread extends Thread {
     LruBlockCache lru;
 
     public StatisticsThread(LruBlockCache lru) {
@@ -539,27 +544,21 @@ public class LruBlockCache implements Bl
   }
 
   public void logStats() {
+    if (!LOG.isDebugEnabled()) return;
     // Log size
     long totalSize = heapSize();
     long freeSize = maxSize - totalSize;
-    float sizeMB = ((float)totalSize)/((float)(1024*1024));
-    float freeMB = ((float)freeSize)/((float)(1024*1024));
-    float maxMB = ((float)maxSize)/((float)(1024*1024));
-    LruBlockCache.LOG.debug("Cache Stats: Sizes: " +
-        "Total=" + sizeMB + "MB (" + totalSize + "), " +
-        "Free=" + freeMB + "MB (" + freeSize + "), " +
-        "Max=" + maxMB + "MB (" + maxSize +")" +
-      ", Counts: " +
-        "Blocks=" + size() +", " +
-        "Access=" + stats.getRequestCount() + ", " +
-        "Hit=" + stats.getHitCount() + ", " +
-        "Miss=" + stats.getMissCount() + ", " +
-        "Evictions=" + stats.getEvictionCount() + ", " +
-        "Evicted=" + stats.getEvictedCount() +
-      ", Ratios: " +
-        "Hit Ratio=" + stats.getHitRatio()*100 + "%, " +
-        "Miss Ratio=" + stats.getMissRatio()*100 + "%, " +
-        "Evicted/Run=" + stats.evictedPerEviction());
+    LruBlockCache.LOG.debug("LRU Stats: " +
+        "total=" + StringUtils.byteDesc(totalSize) + ", " +
+        "free=" + StringUtils.byteDesc(freeSize) + ", " +
+        "max=" + StringUtils.byteDesc(this.maxSize) + ", " +
+        "blocks=" + size() +", " +
+        "accesses=" + stats.getRequestCount() + ", " +
+        "hits=" + stats.getHitCount() + ", " +
+        "hitRatio=" + StringUtils.formatPercent(stats.getHitRatio(), 2) + "%, " +
+        "evictions=" + stats.getEvictionCount() + ", " +
+        "evicted=" + stats.getEvictedCount() + ", " +
+        "evictedPerRun=" + stats.evictedPerEviction());
   }
 
   /**

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java Fri Aug 27 05:01:02 2010
@@ -29,6 +29,8 @@ import org.apache.hadoop.metrics.Updater
 import org.apache.hadoop.metrics.util.MetricsRegistry;
 import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
 
+import java.lang.reflect.Method;
+
 /**
  *
  * This class is for maintaining  the various RPC statistics
@@ -57,6 +59,9 @@ public class HBaseRpcMetrics implements 
 
     context.registerUpdater(this);
 
+    initMethods(HMasterInterface.class);
+    initMethods(HMasterRegionInterface.class);
+    initMethods(HRegionInterface.class);
     rpcStatistics = new HBaseRPCStatistics(this.registry, hostName, port);
   }
 
@@ -73,6 +78,12 @@ public class HBaseRpcMetrics implements 
 
   //public Map <String, MetricsTimeVaryingRate> metricsList = Collections.synchronizedMap(new HashMap<String, MetricsTimeVaryingRate>());
 
+  private void initMethods(Class<? extends HBaseRPCProtocolVersion> protocol) {
+    for (Method m : protocol.getDeclaredMethods()) {
+      if (get(m.getName()) == null)
+        create(m.getName());
+    }
+  }
 
   private MetricsTimeVaryingRate get(String key) {
     return (MetricsTimeVaryingRate) registry.get(key);

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java Fri Aug 27 05:01:02 2010
@@ -918,7 +918,7 @@ public abstract class HBaseServer {
           try {
             value = call(call.param, call.timestamp);             // make the call
           } catch (Throwable e) {
-            LOG.info(getName()+", call "+call+": error: " + e, e);
+            LOG.debug(getName()+", call "+call+": error: " + e, e);
             errorClass = e.getClass().getName();
             error = StringUtils.stringifyException(e);
           }

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java Fri Aug 27 05:01:02 2010
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Pu
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
 
 /**
  * Clients interact with HRegionServers using a handle to the HRegionInterface.
@@ -336,4 +337,15 @@ public interface HRegionInterface extend
    */
   void compactRegion(HRegionInfo regionInfo, boolean major)
   throws NotServingRegionException, IOException;
-}
\ No newline at end of file
+
+  /**
+   * Replicates the given entries. The guarantee is that the given entries
+   * will be durable on the slave cluster if this method returns without
+   * any exception.
+   * hbase.replication has to be set to true for this to work.
+   *
+   * @param entries entries to replicate
+   * @throws IOException
+   */
+  public void replicateLogEntries(HLog.Entry[] entries) throws IOException;
+}

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java Fri Aug 27 05:01:02 2010
@@ -123,14 +123,14 @@ extends InputFormat<ImmutableBytesWritab
    */
   @Override
   public List<InputSplit> getSplits(JobContext context) throws IOException {
+	if (table == null) {
+	    throw new IOException("No table was provided.");
+	}
     Pair<byte[][], byte[][]> keys = table.getStartEndKeys();
     if (keys == null || keys.getFirst() == null ||
         keys.getFirst().length == 0) {
       throw new IOException("Expecting at least one region.");
     }
-    if (table == null) {
-      throw new IOException("No table was provided.");
-    }
     int count = 0;
     List<InputSplit> splits = new ArrayList<InputSplit>(keys.getFirst().length);
     for (int i = 0; i < keys.getFirst().length; i++) {

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java Fri Aug 27 05:01:02 2010
@@ -141,7 +141,7 @@ public class TableMapReduceUtil {
   public static void initTableReducerJob(String table,
     Class<? extends TableReducer> reducer, Job job,
     Class partitioner) throws IOException {
-    initTableReducerJob(table, reducer, job, null, null, null, null);
+    initTableReducerJob(table, reducer, job, partitioner, null, null, null);
   }
 
   /**

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/master/HMaster.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Fri Aug 27 05:01:02 2010
@@ -31,6 +31,10 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicReference;
 
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -182,7 +186,6 @@ implements HMasterInterface, HMasterRegi
   public HMaster(final Configuration conf)
   throws IOException, KeeperException, InterruptedException {
     this.conf = conf;
-
     /*
      * 1. Determine address and initialize RPC server (but do not start).
      * The RPC server ports can be ephemeral.
@@ -196,6 +199,12 @@ implements HMasterInterface, HMasterRegi
     // set the thread name now we have an address
     setName(MASTER + "-" + this.address);
 
+    // Hack! Maps DFSClient => Master for logs.  HDFS made this 
+    // config param for task trackers, but we can piggyback off of it.
+    if (this.conf.get("mapred.task.id") == null) {
+      this.conf.set("mapred.task.id", "hb_m_" + this.address.toString());
+    }
+
     /*
      * 2. Determine if this is a fresh cluster startup or failed over master.
      * This is done by checking for the existence of any ephemeral
@@ -855,22 +864,41 @@ implements HMasterInterface, HMasterRegi
   }
 
   protected static void doMain(String [] args,
-      Class<? extends HMaster> masterClass) throws IOException {
-    if (args.length < 1) {
-      printUsageAndExit();
-    }
+      Class<? extends HMaster> masterClass) {
     Configuration conf = HBaseConfiguration.create();
-    // Process command-line args.
-    for (String cmd: args) {
-      if (cmd.startsWith("--minServers=")) {
-        // How many servers must check in before we'll start assigning.
-        // TODO: Verify works with new master regime.
+
+    Options opt = new Options();
+    opt.addOption("minServers", true, "Minimum RegionServers needed to host user tables");
+    opt.addOption("D", true, "Override HBase Configuration Settings");
+    opt.addOption("backup", false, "Do not try to become HMaster until the primary fails");
+    try {
+      CommandLine cmd = new GnuParser().parse(opt, args);
+
+      if (cmd.hasOption("minServers")) {
+        String val = cmd.getOptionValue("minServers");
         conf.setInt("hbase.regions.server.count.min",
-          Integer.valueOf(cmd.substring(13)));
-        continue;
+            Integer.valueOf(val));
+        LOG.debug("minServers set to " + val);
       }
 
-      if (cmd.equalsIgnoreCase("start")) {
+      if (cmd.hasOption("D")) {
+        for (String confOpt : cmd.getOptionValues("D")) {
+          String[] kv = confOpt.split("=", 2);
+          if (kv.length == 2) {
+            conf.set(kv[0], kv[1]);
+            LOG.debug("-D configuration override: " + kv[0] + "=" + kv[1]);
+          } else {
+            throw new ParseException("-D option format invalid: " + confOpt);
+          }
+        }
+      }
+      
+      // check if we are the backup master - override the conf if so
+      if (cmd.hasOption("backup")) {
+        conf.setBoolean(HConstants.MASTER_TYPE_BACKUP, true);
+      }
+
+      if (cmd.getArgList().contains("start")) {
         try {
           // Print out vm stats before starting up.
           RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
@@ -882,7 +910,8 @@ implements HMasterInterface, HMasterRegi
           // If 'local', defer to LocalHBaseCluster instance.  Starts master
           // and regionserver both in the one JVM.
           if (LocalHBaseCluster.isLocal(conf)) {
-            final MiniZooKeeperCluster zooKeeperCluster = new MiniZooKeeperCluster();
+            final MiniZooKeeperCluster zooKeeperCluster =
+              new MiniZooKeeperCluster();
             File zkDataPath = new File(conf.get("hbase.zookeeper.property.dataDir"));
             int zkClientPort = conf.getInt("hbase.zookeeper.property.clientPort", 0);
             if (zkClientPort == 0) {
@@ -908,16 +937,17 @@ implements HMasterInterface, HMasterRegi
             cluster.startup();
           } else {
             HMaster master = constructMaster(masterClass, conf);
+            if (master.isStopped()) {
+              LOG.info("Won't bring the Master up as a shutdown is requested");
+              return;
+            }
             master.start();
           }
         } catch (Throwable t) {
           LOG.error("Failed to start master", t);
           System.exit(-1);
         }
-        break;
-      }
-
-      if (cmd.equalsIgnoreCase("stop")) {
+      } else if (cmd.getArgList().contains("stop")) {
         HBaseAdmin adm = null;
         try {
           adm = new HBaseAdmin(conf);
@@ -934,10 +964,12 @@ implements HMasterInterface, HMasterRegi
           LOG.error("Failed to stop master", t);
           System.exit(-1);
         }
-        break;
+      } else {
+        throw new ParseException("Unknown argument(s): " +
+            org.apache.commons.lang.StringUtils.join(cmd.getArgs(), " "));
       }
-
-      // Print out usage if we get to here.
+    } catch (ParseException e) {
+      LOG.error("Could not parse: ", e);
       printUsageAndExit();
     }
   }
@@ -950,4 +982,4 @@ implements HMasterInterface, HMasterRegi
   public static void main(String [] args) throws IOException {
     doMain(args, HMaster.class);
   }
-}
\ No newline at end of file
+}

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/master/LogCleanerDelegate.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/master/LogCleanerDelegate.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/master/LogCleanerDelegate.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/master/LogCleanerDelegate.java Fri Aug 27 05:01:02 2010
@@ -23,10 +23,19 @@ import org.apache.hadoop.conf.Configurab
 import org.apache.hadoop.fs.Path;
 
 /**
- * Interface for the log cleaning function inside the master. Only 1 is called
- * so if the desired effect is the mix of many cleaners, do call them yourself
- * in order to control the flow.
- * HBase ships with OldLogsCleaner as the default implementation
+ * Interface for the log cleaning function inside the master. By default, three
+ * cleaners <code>TimeToLiveLogCleaner</code>,  <code>ReplicationLogCleaner</code>,
+ * <code>SnapshotLogCleaner</code> are called in order. So if other effects are
+ * needed, implement your own LogCleanerDelegate and add it to the configuration
+ * "hbase.master.logcleaner.plugins", which is a comma-separated list of fully
+ * qualified class names. LogsCleaner will add it to the chain.
+ *
+ * HBase ships with LogsCleaner as the default implementation.
+ *
+ * This interface extends Configurable, so setConf needs to be called once
+ * before using the cleaner.
+ * Since LogCleanerDelegates are created in LogsCleaner by reflection. Classes
+ * that implements this interface should provide a default constructor.
  */
 public interface LogCleanerDelegate extends Configurable {
 

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java Fri Aug 27 05:01:02 2010
@@ -88,7 +88,7 @@ public class ServerManager {
 
   private int minimumServerCount;
 
-  private final OldLogsCleaner oldLogCleaner;
+  private final LogsCleaner logCleaner;
 
   // Reporting to track master metrics.
   private final MasterMetrics metrics;
@@ -134,11 +134,11 @@ public class ServerManager {
     String n = Thread.currentThread().getName();
     Threads.setDaemonThreadRunning(this.serverMonitorThread,
       n + ".serverMonitor");
-    this.oldLogCleaner = new OldLogsCleaner(
+    this.logCleaner = new LogsCleaner(
       c.getInt("hbase.master.meta.thread.rescanfrequency",60 * 1000),
       master, c, this.services.getMasterFileSystem().getFileSystem(),
       this.services.getMasterFileSystem().getOldLogDir());
-    Threads.setDaemonThreadRunning(oldLogCleaner,
+    Threads.setDaemonThreadRunning(logCleaner,
       n + ".oldLogCleaner");
   }
 

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java Fri Aug 27 05:01:02 2010
@@ -19,8 +19,6 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode;
-
 /**
  * Implementing classes of this interface will be used for the tracking
  * and enforcement of columns and numbers of versions during the course of a
@@ -29,12 +27,10 @@ import org.apache.hadoop.hbase.regionser
  * Currently there are two different types of Store/Family-level queries.
  * <ul><li>{@link ExplicitColumnTracker} is used when the query specifies
  * one or more column qualifiers to return in the family.
- * <li>{@link WildcardColumnTracker} is used when the query asks for all
- * qualifiers within the family.
  * <p>
- * This class is utilized by {@link QueryMatcher} through two methods:
+ * This class is utilized by {@link ScanQueryMatcher} through two methods:
  * <ul><li>{@link #checkColumn} is called when a Put satisfies all other
- * conditions of the query.  This method returns a {@link MatchCode} to define
+ * conditions of the query.  This method returns a {@link org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode} to define
  * what action should be taken.
  * <li>{@link #update} is called at the end of every StoreFile or memstore.
  * <p>
@@ -48,7 +44,7 @@ public interface ColumnTracker {
    * @param length
    * @return The match code instance.
    */
-  public MatchCode checkColumn(byte [] bytes, int offset, int length);
+  public ScanQueryMatcher.MatchCode checkColumn(byte [] bytes, int offset, int length);
 
   /**
    * Updates internal variables in between files

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java Fri Aug 27 05:01:02 2010
@@ -29,10 +29,7 @@ import java.util.concurrent.locks.Reentr
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -51,11 +48,20 @@ public class CompactSplitThread extends 
 
   private final HashSet<HRegion> regionsInQueue = new HashSet<HRegion>();
 
+  /**
+   * Splitting should not take place if the total number of regions exceed this.
+   * This is not a hard limit to the number of regions but it is a guideline to
+   * stop splitting after number of online regions is greater than this.
+   */
+  private int regionSplitLimit;
+
   /** @param server */
   public CompactSplitThread(HRegionServer server) {
     super();
     this.server = server;
-    this.conf = server.conf;
+    this.conf = server.getConfiguration();
+    this.regionSplitLimit = conf.getInt("hbase.regionserver.regionSplitLimit",
+        Integer.MAX_VALUE);
     this.frequency =
       conf.getLong("hbase.regionserver.thread.splitcompactcheckfrequency",
       20 * 1000);
@@ -75,7 +81,8 @@ public class CompactSplitThread extends 
           try {
             // Don't interrupt us while we are working
             byte [] midKey = r.compactStores();
-            if (midKey != null && !this.server.isStopped()) {
+            if (shouldSplitRegion() && midKey != null &&
+                !this.server.isStopped()) {
               split(r, midKey);
             }
           } finally {
@@ -124,7 +131,6 @@ public class CompactSplitThread extends 
     if (LOG.isDebugEnabled()) {
       LOG.debug("Compaction " + (force? "(major) ": "") +
         "requested for region " + r.getRegionNameAsString() +
-        "/" + r.getRegionInfo().getEncodedName() +
         (why != null && !why.isEmpty()? " because: " + why: ""));
     }
     synchronized (regionsInQueue) {
@@ -135,69 +141,39 @@ public class CompactSplitThread extends 
     }
   }
 
-  private void split(final HRegion region, final byte [] midKey)
+  private void split(final HRegion parent, final byte [] midKey)
   throws IOException {
-    final HRegionInfo oldRegionInfo = region.getRegionInfo();
     final long startTime = System.currentTimeMillis();
-    final HRegion [] newRegions = region.splitRegion(midKey);
-    if (newRegions == null) {
-      // Didn't need to be split
-      return;
-    }
-    // TODO: Handle splitting of meta.
-
-    // Mark old region as offline and split in META.
-    // NOTE: there is no need for retry logic here. HTable does it for us.
-    oldRegionInfo.setOffline(true);
-    oldRegionInfo.setSplit(true);
-    // Inform the HRegionServer that the parent HRegion is no-longer online.
-    this.server.removeFromOnlineRegions(oldRegionInfo.getEncodedName());
-    MetaEditor.offlineParentInMeta(this.server.getCatalogTracker(),
-      oldRegionInfo, newRegions[0].getRegionInfo(),
-      newRegions[1].getRegionInfo());
-
-    // If we crash here, then the daughters will not be added and we'll have
-    // and offlined parent but no daughters to take up the slack.  hbase-2244
-    // adds fixup to the metascanners.
-    // TODO: Need new fixerupper in new master regime.
-
-    // TODO: if we fail here on out, crash out.  The recovery of a shutdown
-    // server should have fixup and get the daughters up on line.
-
-
-    // Add new regions to META
-    for (int i = 0; i < newRegions.length; i++) {
-      MetaEditor.addRegionToMeta(this.server.getCatalogTracker(),
-        newRegions[i].getRegionInfo());
-    }
-
-    // Open the regions on this server. TODO: Revisit.  Make sure no holes.
-    for (int i = 0; i < newRegions.length; i++) {
-      HRegionInfo hri = newRegions[i].getRegionInfo();
-      HRegion r = null;
+    SplitTransaction st = new SplitTransaction(parent, midKey);
+    // If prepare does not return true, for some reason -- logged inside in
+    // the prepare call -- we are not ready to split just now.  Just return.
+    if (!st.prepare()) return;
+    try {
+      st.execute(this.server, this.server.getCatalogTracker());
+    } catch (IOException ioe) {
       try {
-        // Instantiate the region.
-        r = HRegion.openHRegion(hri, this.server.getWAL(),
-          this.server.getConfiguration(), this.server.getFlushRequester(), null);
-        this.server.postOpenDeployTasks(r, this.server.getCatalogTracker());
-      } catch (Throwable tt) {
-        this.server.abort("Failed open of " + hri.getRegionNameAsString(), tt);
+        LOG.info("Running rollback of failed split of " +
+          parent.getRegionNameAsString() + "; " + ioe.getMessage());
+        st.rollback(this.server);
+        LOG.info("Successful rollback of failed split of " +
+          parent.getRegionNameAsString());
+      } catch (RuntimeException e) {
+        // If failed rollback, kill this server to avoid having a hole in table.
+        LOG.info("Failed rollback of failed split of " +
+          parent.getRegionNameAsString() + " -- aborting server", e);
+        this.server.abort("Failed split");
       }
+      return;
     }
 
-    // If we crash here, the master will not know of the new daughters and they
-    // will not be assigned.  The metascanner when it runs will notice and take
-    // care of assigning the new daughters.
-
-    // Now tell the master about the new regions; it needs to update its
-    // inmemory state of regions.
-    server.reportSplit(oldRegionInfo, newRegions[0].getRegionInfo(),
-      newRegions[1].getRegionInfo());
-
-    LOG.info("region split, META updated, daughters opened, and report to master all" +
-      " successful. Old region=" + oldRegionInfo.toString() +
-      ", new regions: " + newRegions[0].toString() + ", " +
-      newRegions[1].toString() + ". Split took " +
+    // Now tell the master about the new regions.  If we fail here, its OK.
+    // Basescanner will do fix up.  And reporting split to master is going away.
+    // TODO: Verify this still holds in new master rewrite.
+    this.server.reportSplit(parent.getRegionInfo(), st.getFirstDaughter(),
+      st.getSecondDaughter());
+    LOG.info("Region split, META updated, and report to master. Parent=" +
+      parent.getRegionInfo() + ", new regions: " +
+      st.getFirstDaughter() + ", " + st.getSecondDaughter() + ". Split took " +
       StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime));
   }
 
@@ -219,4 +195,15 @@ public class CompactSplitThread extends 
   public int getCompactionQueueSize() {
     return compactionQueue.size();
   }
-}
\ No newline at end of file
+
+  private boolean shouldSplitRegion() {
+    return (regionSplitLimit > server.getNumberOfOnlineRegions());
+  }
+
+  /**
+   * @return the regionSplitLimit
+   */
+  public int getRegionSplitLimit() {
+    return this.regionSplitLimit;
+  }
+}

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java Fri Aug 27 05:01:02 2010
@@ -22,7 +22,7 @@ package org.apache.hadoop.hbase.regionse
 import java.util.ArrayList;
 import java.util.List;
 import java.util.NavigableSet;
-import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode;
+
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
@@ -36,9 +36,9 @@ import org.apache.hadoop.hbase.util.Byte
  * between rows.
  *
  * <p>
- * This class is utilized by {@link QueryMatcher} through two methods:
+ * This class is utilized by {@link ScanQueryMatcher} through two methods:
  * <ul><li>{@link #checkColumn} is called when a Put satisfies all other
- * conditions of the query.  This method returns a {@link MatchCode} to define
+ * conditions of the query.  This method returns a {@link org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode} to define
  * what action should be taken.
  * <li>{@link #update} is called at the end of every StoreFile or memstore.
  * <p>
@@ -84,18 +84,18 @@ public class ExplicitColumnTracker imple
    * @param bytes KeyValue buffer
    * @param offset offset to the start of the qualifier
    * @param length length of the qualifier
-   * @return MatchCode telling QueryMatcher what action to take
+   * @return MatchCode telling ScanQueryMatcher what action to take
    */
-  public MatchCode checkColumn(byte [] bytes, int offset, int length) {
+  public ScanQueryMatcher.MatchCode checkColumn(byte [] bytes, int offset, int length) {
     do {
       // No more columns left, we are done with this query
       if(this.columns.size() == 0) {
-        return MatchCode.DONE; // done_row
+        return ScanQueryMatcher.MatchCode.DONE; // done_row
       }
 
       // No more columns to match against, done with storefile
       if(this.column == null) {
-        return MatchCode.NEXT; // done_row
+        return ScanQueryMatcher.MatchCode.NEXT; // done_row
       }
 
       // Compare specific column to current column
@@ -114,13 +114,13 @@ public class ExplicitColumnTracker imple
             this.column = this.columns.get(this.index);
           }
         }
-        return MatchCode.INCLUDE;
+        return ScanQueryMatcher.MatchCode.INCLUDE;
       }
 
 
       if (ret > 0) {
          // Specified column is smaller than the current, skip to next column.
-        return MatchCode.SKIP;
+        return ScanQueryMatcher.MatchCode.SKIP;
       }
 
       // Specified column is bigger than current column
@@ -128,7 +128,7 @@ public class ExplicitColumnTracker imple
       if(ret <= -1) {
         if(++this.index == this.columns.size()) {
           // No more to match, do not include, done with storefile
-          return MatchCode.NEXT; // done_row
+          return ScanQueryMatcher.MatchCode.NEXT; // done_row
         }
         // This is the recursive case.
         this.column = this.columns.get(this.index);
@@ -163,4 +163,39 @@ public class ExplicitColumnTracker imple
       col.setCount(this.maxVersions);
     }
   }
+
+  /**
+   * This method is used to inform the column tracker that we are done with
+   * this column. We may get this information from external filters or
+   * timestamp range and we then need to indicate this information to
+   * tracker. It is required only in case of ExplicitColumnTracker.
+   * @param bytes
+   * @param offset
+   * @param length
+   */
+  public void doneWithColumn(byte [] bytes, int offset, int length) {
+    while (this.column != null) {
+      int compare = Bytes.compareTo(column.getBuffer(), column.getOffset(),
+          column.getLength(), bytes, offset, length);
+      if (compare == 0) {
+        this.columns.remove(this.index);
+        if (this.columns.size() == this.index) {
+          // Will not hit any more columns in this storefile
+          this.column = null;
+        } else {
+          this.column = this.columns.get(this.index);
+        }
+        return;
+      } else if ( compare <= -1) {
+        if(++this.index != this.columns.size()) {
+          this.column = this.columns.get(this.index);
+        } else {
+          this.column = null;
+        }
+      } else {
+        return;
+      }
+    }
+  }
+
 }

Modified: hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java (original)
+++ hbase/branches/0.90_master_rewrite/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java Fri Aug 27 05:01:02 2010
@@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.util.Byte
 
 /**
  * State and utility processing {@link HRegion#getClosestRowBefore(byte[], byte[])}.
- * Like {@link GetDeleteTracker} and {@link ScanDeleteTracker} but does not
+ * Like {@link ScanDeleteTracker} and {@link ScanDeleteTracker} but does not
  * implement the {@link DeleteTracker} interface since state spans rows (There
  * is no update nor reset method).
  */