You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2009/02/25 06:34:30 UTC

svn commit: r747666 [1/3] - in /hadoop/hbase/trunk: ./ conf/ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/io/ src/java/org/apache/hadoop/hbase/master/ src/java/org/apache/hadoop/hbase/regio...

Author: stack
Date: Wed Feb 25 05:34:29 2009
New Revision: 747666

URL: http://svn.apache.org/viewvc?rev=747666&view=rev
Log:
HBASE-61 Create an HBase-specific MapFile implementation

Modified:
    hadoop/hbase/trunk/CHANGES.txt
    hadoop/hbase/trunk/conf/hbase-default.xml
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/MetaScanner.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Cell.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HBaseMapFile.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfMapFileReader.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/MapFile.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Reference.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/MetaRegion.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegion.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Bytes.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/FSUtils.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/MetaUtils.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Writables.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseTestCase.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestClassMigration.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestCompare.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestEmptyMetaInfo.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestRegionRebalancing.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScanMultipleVersions.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRegExpRowFilter.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGet2.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestSplit.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestTimestamp.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Wed Feb 25 05:34:29 2009
@@ -5,6 +5,8 @@
    HBASE-1144  Store the ROOT region location in Zookeeper
                (Nitay Joffe via Stack)
    HBASE-1146  Replace the HRS leases with Zookeeper
+   HBASE-61    Create an HBase-specific MapFile implementation
+               (Ryan Rawson via Stack)
 
   BUG FIXES
    HBASE-1140  "ant clean test" fails (Nitay Joffe via Stack)

Modified: hadoop/hbase/trunk/conf/hbase-default.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/conf/hbase-default.xml?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/conf/hbase-default.xml (original)
+++ hadoop/hbase/trunk/conf/hbase-default.xml Wed Feb 25 05:34:29 2009
@@ -316,6 +316,14 @@
     </description>
   </property>
   <property>
+    <name>hfile.min.blocksize.size</name>
+    <value>65536</value>
+    <description>Minimum store file block size.  The smaller you make this, the
+    bigger your index and the less you fetch on a random-access.  Set size down
+    if you have small cells and want faster random-access of individual cells.
+    </description>
+  </property>
+  <property>
     <name>hbase.io.seqfile.compression.type</name>
     <value>NONE</value>
     <description>The compression type for hbase sequencefile.Writers

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java Wed Feb 25 05:34:29 2009
@@ -335,7 +335,6 @@
         TreeMap<byte [], Cell> results =
           new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
         while(rootScanner.next(key, results)) {
-          key.setHRegionInfo(HRegionInfo.ROOT_REGIONINFO);
           for(Cell c: results.values()) {
             HRegionInfo info = Writables.getHRegionInfoOrNull(c.getValue());
             if (info != null) {

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java Wed Feb 25 05:34:29 2009
@@ -427,13 +427,13 @@
     }
 
     // Compare start keys.
-    result = HStoreKey.compareTwoRowKeys(o, this.startKey, o.startKey);
+    result = HStoreKey.compareTwoRowKeys(this.startKey, o.startKey);
     if (result != 0) {
       return result;
     }
     
     // Compare end keys.
-    return HStoreKey.compareTwoRowKeys(o, this.endKey, o.endKey);
+    return HStoreKey.compareTwoRowKeys(this.endKey, o.endKey);
   }
 
   /**

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java Wed Feb 25 05:34:29 2009
@@ -20,14 +20,21 @@
 package org.apache.hadoop.hbase;
 
 
+import java.io.ByteArrayOutputStream;
 import java.io.DataInput;
 import java.io.DataOutput;
+import java.io.DataOutputStream;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.io.RawComparator;
+import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.WritableComparator;
+import org.apache.hadoop.io.WritableUtils;
 
 /**
  * A Key for a stored row.
@@ -42,12 +49,6 @@
   private byte [] column = HConstants.EMPTY_BYTE_ARRAY;
   private long timestamp = Long.MAX_VALUE;
 
-  /*
-   * regionInfo is only used as a hack to compare HSKs.
-   * It is not serialized.  See https://issues.apache.org/jira/browse/HBASE-832
-   */
-  private HRegionInfo regionInfo = null;
-  
   /**
    * Estimated size tax paid for each instance of HSK.  Estimate based on
    * study of jhat and jprofiler numbers.
@@ -55,6 +56,9 @@
   // In jprofiler, says shallow size is 48 bytes.  Add to it cost of two
   // byte arrays and then something for the HRI hosting.
   public static final int ESTIMATED_HEAP_TAX = 48;
+  
+  public static final StoreKeyByteComparator BYTECOMPARATOR =
+    new StoreKeyByteComparator();
 
   /** Default constructor used in conjunction with Writable interface */
   public HStoreKey() {
@@ -80,18 +84,7 @@
    * @param row - row key
    */
   public HStoreKey(final String row) {
-    this(row, Long.MAX_VALUE);
-  }
-
-  /**
-   * Create an HStoreKey specifying the row and timestamp
-   * The column and table names default to the empty string
-   * 
-   * @param row row key
-   * @param hri
-   */
-  public HStoreKey(final byte [] row, final HRegionInfo hri) {
-    this(row, HConstants.EMPTY_BYTE_ARRAY, hri);
+    this(Bytes.toBytes(row), Long.MAX_VALUE);
   }
  
   /**
@@ -102,33 +95,11 @@
    * @param timestamp timestamp value
    * @param hri HRegionInfo
    */
-  public HStoreKey(final byte [] row, long timestamp, final HRegionInfo hri) {
-    this(row, HConstants.EMPTY_BYTE_ARRAY, timestamp, hri);
-  }
-
-  /**
-   * Create an HStoreKey specifying the row and timestamp
-   * The column and table names default to the empty string
-   * 
-   * @param row row key
-   * @param timestamp timestamp value
-   */
-  public HStoreKey(final byte [] row, long timestamp) {
+  public HStoreKey(final byte [] row, final long timestamp) {
     this(row, HConstants.EMPTY_BYTE_ARRAY, timestamp);
   }
 
   /**
-   * Create an HStoreKey specifying the row and timestamp
-   * The column and table names default to the empty string
-   * 
-   * @param row row key
-   * @param timestamp timestamp value
-   */
-  public HStoreKey(final String row, long timestamp) {
-    this (row, "", timestamp, new HRegionInfo());
-  }
-
-  /**
    * Create an HStoreKey specifying the row and column names
    * The timestamp defaults to LATEST_TIMESTAMP
    * and table name defaults to the empty string
@@ -137,7 +108,7 @@
    * @param column column key
    */
   public HStoreKey(final String row, final String column) {
-    this(row, column, HConstants.LATEST_TIMESTAMP, new HRegionInfo());
+    this(row, column, HConstants.LATEST_TIMESTAMP);
   }
 
   /**
@@ -151,19 +122,6 @@
   public HStoreKey(final byte [] row, final byte [] column) {
     this(row, column, HConstants.LATEST_TIMESTAMP);
   }
-  
-  /**
-   * Create an HStoreKey specifying the row, column names and table name
-   * The timestamp defaults to LATEST_TIMESTAMP
-   * 
-   * @param row row key
-   * @param column column key
-   * @param regionInfo region info
-   */
-  public HStoreKey(final byte [] row, 
-      final byte [] column, final HRegionInfo regionInfo) {
-    this(row, column, HConstants.LATEST_TIMESTAMP, regionInfo);
-  }
 
   /**
    * Create an HStoreKey specifying all the fields
@@ -174,25 +132,11 @@
    * @param timestamp timestamp value
    * @param regionInfo region info
    */
-  public HStoreKey(final String row, 
-      final String column, long timestamp, final HRegionInfo regionInfo) {
-    this (Bytes.toBytes(row), Bytes.toBytes(column), 
-        timestamp, regionInfo);
+  public HStoreKey(final String row, final String column, final long timestamp) {
+    this (Bytes.toBytes(row), Bytes.toBytes(column), timestamp);
   }
 
   /**
-   * Create an HStoreKey specifying all the fields with unspecified table
-   * Does not make copies of the passed byte arrays. Presumes the passed 
-   * arrays immutable.
-   * @param row row key
-   * @param column column key
-   * @param timestamp timestamp value
-   */
-  public HStoreKey(final byte [] row, final byte [] column, long timestamp) {
-    this(row, column, timestamp, null);
-  }
-  
-  /**
    * Create an HStoreKey specifying all the fields with specified table
    * Does not make copies of the passed byte arrays. Presumes the passed 
    * arrays immutable.
@@ -201,13 +145,11 @@
    * @param timestamp timestamp value
    * @param regionInfo region info
    */
-  public HStoreKey(final byte [] row, 
-      final byte [] column, long timestamp, final HRegionInfo regionInfo) {
+  public HStoreKey(final byte [] row, final byte [] column, final long timestamp) {
     // Make copies
     this.row = row;
     this.column = column;
     this.timestamp = timestamp;
-    this.regionInfo = regionInfo;
   }
 
   /**
@@ -215,17 +157,20 @@
    * 
    * @param other the source key
    */
-  public HStoreKey(HStoreKey other) {
-    this(other.getRow(), other.getColumn(), other.getTimestamp(),
-      other.getHRegionInfo());
+  public HStoreKey(final HStoreKey other) {
+    this(other.getRow(), other.getColumn(), other.getTimestamp());
   }
-  
+
+  public HStoreKey(final ByteBuffer bb) {
+    this(getRow(bb), getColumn(bb), getTimestamp(bb));
+  }
+
   /**
    * Change the value of the row key
    * 
    * @param newrow new row key value
    */
-  public void setRow(byte [] newrow) {
+  public void setRow(final byte [] newrow) {
     this.row = newrow;
   }
   
@@ -234,7 +179,7 @@
    * 
    * @param c new column family value
    */
-  public void setColumn(byte [] c) {
+  public void setColumn(final byte [] c) {
     this.column = c;
   }
 
@@ -243,7 +188,7 @@
    * 
    * @param timestamp new timestamp value
    */
-  public void setVersion(long timestamp) {
+  public void setVersion(final long timestamp) {
     this.timestamp = timestamp;
   }
   
@@ -252,7 +197,7 @@
    * 
    * @param k key value to copy
    */
-  public void set(HStoreKey k) {
+  public void set(final HStoreKey k) {
     this.row = k.getRow();
     this.column = k.getColumn();
     this.timestamp = k.getTimestamp();
@@ -272,19 +217,7 @@
   public long getTimestamp() {
     return this.timestamp;
   }
-  
-  /** @return value of regioninfo */
-  public HRegionInfo getHRegionInfo() {
-    return this.regionInfo;
-  }
-  
-  /**
-   * @param hri
-   */
-  public void setHRegionInfo(final HRegionInfo hri) {
-    this.regionInfo = hri;
-  }
-  
+
   /**
    * Compares the row and column of two keys
    * @param other Key to compare against. Compares row and column.
@@ -292,8 +225,8 @@
    * @see #matchesWithoutColumn(HStoreKey)
    * @see #matchesRowFamily(HStoreKey)
    */ 
-  public boolean matchesRowCol(HStoreKey other) {
-    return HStoreKey.equalsTwoRowKeys(getHRegionInfo(), getRow(), other.getRow()) &&
+  public boolean matchesRowCol(final HStoreKey other) {
+    return HStoreKey.equalsTwoRowKeys(getRow(), other.getRow()) &&
       Bytes.equals(getColumn(), other.getColumn());
   }
   
@@ -306,8 +239,8 @@
    * @see #matchesRowCol(HStoreKey)
    * @see #matchesRowFamily(HStoreKey)
    */
-  public boolean matchesWithoutColumn(HStoreKey other) {
-    return equalsTwoRowKeys(getHRegionInfo(), getRow(), other.getRow()) &&
+  public boolean matchesWithoutColumn(final HStoreKey other) {
+    return equalsTwoRowKeys(getRow(), other.getRow()) &&
       getTimestamp() >= other.getTimestamp();
   }
   
@@ -320,9 +253,9 @@
    * @see #matchesRowCol(HStoreKey)
    * @see #matchesWithoutColumn(HStoreKey)
    */
-  public boolean matchesRowFamily(HStoreKey that) {
-    int delimiterIndex = getFamilyDelimiterIndex(getColumn());
-    return equalsTwoRowKeys(getHRegionInfo(), getRow(), that.getRow()) &&
+  public boolean matchesRowFamily(final HStoreKey that) {
+    final int delimiterIndex = getFamilyDelimiterIndex(getColumn());
+    return equalsTwoRowKeys(getRow(), that.getRow()) &&
       Bytes.compareTo(getColumn(), 0, delimiterIndex, that.getColumn(), 0,
         delimiterIndex) == 0;
   }
@@ -334,8 +267,8 @@
   }
   
   @Override
-  public boolean equals(Object obj) {
-    HStoreKey other = (HStoreKey)obj;
+  public boolean equals(final Object obj) {
+    final HStoreKey other = (HStoreKey)obj;
     // Do a quick check.
     if (this.row.length != other.row.length ||
         this.column.length != other.column.length ||
@@ -356,17 +289,15 @@
   // Comparable
 
   public int compareTo(final HStoreKey o) {
-    return compareTo(this.regionInfo, this, o);
+    return compareTo(this, o);
   }
-  
-  static int compareTo(final HRegionInfo hri, final HStoreKey left,
-      final HStoreKey right) {
+  static int compareTo(final HStoreKey left, final HStoreKey right) {
     // We can be passed null
     if (left == null && right == null) return 0;
     if (left == null) return -1;
     if (right == null) return 1;
     
-    int result = compareTwoRowKeys(hri, left.getRow(), right.getRow());
+    int result = compareTwoRowKeys(left.getRow(), right.getRow());
     if (result != 0) {
       return result;
     }
@@ -386,11 +317,7 @@
     } else if (left.getTimestamp() > right.getTimestamp()) {
       result = -1;
     }
-    // Because of HBASE-877, our BeforeThisStoreKey trick no longer works in
-    // mapfiles and so instead we need to do this weird check here below.
-    return result == 0 && left instanceof BeforeThisStoreKey? -1:
-      result == 0 && right instanceof BeforeThisStoreKey? 1:
-      result;
+    return result;
   }
 
   /**
@@ -402,13 +329,13 @@
    */
   public static byte [] getFamily(final byte [] column)
   throws ColumnNameParseException {
-    int index = getFamilyDelimiterIndex(column);
+    final int index = getFamilyDelimiterIndex(column);
     if (index <= 0) {
       throw new ColumnNameParseException("Missing ':' delimiter between " +
         "column family and qualifier in the passed column name <" +
         Bytes.toString(column) + ">");
     }
-    byte [] result = new byte[index];
+    final byte [] result = new byte[index];
     System.arraycopy(column, 0, result, 0, index);
     return result;
   }
@@ -418,7 +345,7 @@
    * @return Return hash of family portion of passed column.
    */
   public static Integer getFamilyMapKey(final byte [] column) {
-    int index = getFamilyDelimiterIndex(column);
+    final int index = getFamilyDelimiterIndex(column);
     // If index < -1, presume passed column is a family name absent colon
     // delimiter
     return Bytes.mapKey(column, index > 0? index: column.length);
@@ -432,7 +359,7 @@
   public static boolean matchingFamily(final byte [] family,
       final byte [] column) {
     // Make sure index of the ':' is at same offset.
-    int index = getFamilyDelimiterIndex(column);
+    final int index = getFamilyDelimiterIndex(column);
     if (index != family.length) {
       return false;
     }
@@ -445,7 +372,7 @@
    */
   public static byte [] addDelimiter(final byte [] family) {
     // Manufacture key by adding delimiter to the passed in colFamily.
-    byte [] familyPlusDelimiter = new byte [family.length + 1];
+    final byte [] familyPlusDelimiter = new byte [family.length + 1];
     System.arraycopy(family, 0, familyPlusDelimiter, 0, family.length);
     familyPlusDelimiter[family.length] = HStoreKey.COLUMN_FAMILY_DELIMITER;
     return familyPlusDelimiter;
@@ -457,9 +384,9 @@
    * @see #parseColumn(byte[])
    */
   public static byte [] getQualifier(final byte [] column) {
-    int index = getFamilyDelimiterIndex(column);
-    int len = column.length - (index + 1);
-    byte [] result = new byte[len];
+    final int index = getFamilyDelimiterIndex(column);
+    final int len = column.length - (index + 1);
+    final byte [] result = new byte[len];
     System.arraycopy(column, index + 1, result, 0, len);
     return result;
   }
@@ -473,14 +400,14 @@
    */
   public static byte [][] parseColumn(final byte [] c)
   throws ColumnNameParseException {
-    byte [][] result = new byte [2][];
-    int index = getFamilyDelimiterIndex(c);
+    final byte [][] result = new byte [2][];
+    final int index = getFamilyDelimiterIndex(c);
     if (index == -1) {
       throw new ColumnNameParseException("Impossible column name: " + c);
     }
     result[0] = new byte [index];
     System.arraycopy(c, 0, result[0], 0, index);
-    int len = c.length - (index + 1);
+    final int len = c.length - (index + 1);
     result[1] = new byte[len];
     System.arraycopy(c, index + 1 /*Skip delimiter*/, result[1], 0,
       len);
@@ -507,15 +434,6 @@
   }
 
   /**
-   * Returns row and column bytes out of an HStoreKey.
-   * @param hsk Store key.
-   * @return byte array encoding of HStoreKey
-   */
-  public static byte[] getBytes(final HStoreKey hsk) {
-    return Bytes.add(hsk.getRow(), hsk.getColumn());
-  }
-  
-  /**
    * Utility method to compare two row keys.
    * This is required because of the meta delimiters.
    * This is a hack.
@@ -524,16 +442,7 @@
    * @param rowB
    * @return value of the comparison
    */
-  public static int compareTwoRowKeys(HRegionInfo regionInfo, 
-      byte[] rowA, byte[] rowB) {
-    if (regionInfo != null && regionInfo.isMetaRegion()) {
-      byte[][] keysA = stripStartKeyMeta(rowA);
-      byte[][] KeysB = stripStartKeyMeta(rowB);
-      int rowCompare = Bytes.compareTo(keysA[0], KeysB[0]);
-      if(rowCompare == 0)
-        rowCompare = Bytes.compareTo(keysA[1], KeysB[1]);
-      return rowCompare;
-    }
+  public static int compareTwoRowKeys(final byte[] rowA, final byte[] rowB) {
     return Bytes.compareTo(rowA, rowB);
   }
   
@@ -541,54 +450,48 @@
    * Utility method to check if two row keys are equal.
    * This is required because of the meta delimiters
    * This is a hack
-   * @param regionInfo
    * @param rowA
    * @param rowB
    * @return if it's equal
    */
-  public static boolean equalsTwoRowKeys(HRegionInfo regionInfo, 
-      byte[] rowA, byte[] rowB) {
+  public static boolean equalsTwoRowKeys(final byte[] rowA, final byte[] rowB) {
     return ((rowA == null) && (rowB == null)) ? true:
       (rowA == null) || (rowB == null) || (rowA.length != rowB.length) ? false:
-        compareTwoRowKeys(regionInfo,rowA,rowB) == 0;
+        compareTwoRowKeys(rowA,rowB) == 0;
   }
-  
-  private static byte[][] stripStartKeyMeta(byte[] rowKey) {
-    int offset = -1;
-    for (int i = rowKey.length - 1; i > 0; i--) {
-      if (rowKey[i] == HConstants.META_ROW_DELIMITER) {
-        offset = i;
-        break;
-      }
-    }
-    byte [] row = rowKey;
-    byte [] timestamp = HConstants.EMPTY_BYTE_ARRAY;
-    if (offset != -1) {
-      row = new byte[offset];
-      System.arraycopy(rowKey, 0, row, 0, offset);
-      timestamp = new byte[rowKey.length - offset - 1];
-      System.arraycopy(rowKey, offset+1, timestamp, 0,rowKey.length - offset - 1);
-    }
-    byte[][] elements = new byte[2][];
-    elements[0] = row;
-    elements[1] = timestamp;
-    return elements;
-  }
-  
+
   // Writable
 
-  public void write(DataOutput out) throws IOException {
+  public void write(final DataOutput out) throws IOException {
     Bytes.writeByteArray(out, this.row);
     Bytes.writeByteArray(out, this.column);
     out.writeLong(timestamp);
   }
 
-  public void readFields(DataInput in) throws IOException {
+  public void readFields(final DataInput in) throws IOException {
     this.row = Bytes.readByteArray(in);
     this.column = Bytes.readByteArray(in);
     this.timestamp = in.readLong();
   }
 
+  /**
+   * @param hsk
+   * @return Size of this key in serialized bytes.
+   */
+  public static int getSerializedSize(final HStoreKey hsk) {
+    return getSerializedSize(hsk.getRow()) +
+      getSerializedSize(hsk.getColumn()) +
+      Bytes.SIZEOF_LONG;
+  }
+
+  /**
+   * @param b
+   * @return Length of buffer when its been serialized.
+   */
+  private static int getSerializedSize(final byte [] b) {
+    return b == null? 1: b.length + WritableUtils.getVIntSize(b.length);
+  }
+
   public long heapSize() {
     return getRow().length + Bytes.ESTIMATED_HEAP_TAX +
       getColumn().length + Bytes.ESTIMATED_HEAP_TAX +
@@ -596,22 +499,47 @@
   }
 
   /**
-   * Passed as comparator for memcache and for store files.  See HBASE-868.
-   */
-  public static class HStoreKeyWritableComparator extends WritableComparator {
-    private final HRegionInfo hri;
-    
-    /** @param hri */
-    public HStoreKeyWritableComparator(final HRegionInfo hri) {
-      super(HStoreKey.class);
-      this.hri = hri;
-    }
-    
-    @SuppressWarnings("unchecked")
-    @Override
-    public int compare(final WritableComparable left,
-        final WritableComparable right) {
-      return compareTo(this.hri, (HStoreKey)left, (HStoreKey)right);
+   * @return The bytes of <code>hsk</code> gotten by running its 
+   * {@link Writable#write(java.io.DataOutput)} method.
+   * @throws IOException
+   */
+  public byte [] getBytes() throws IOException {
+    return getBytes(this);
+  }
+
+  /**
+   * Return serialize <code>hsk</code> bytes.
+   * Note, this method's implementation has changed.  Used to just return
+   * row and column.  This is a customized version of
+   * {@link Writables#getBytes(Writable)}
+   * @param hsk Instance
+   * @return The bytes of <code>hsk</code> gotten by running its 
+   * {@link Writable#write(java.io.DataOutput)} method.
+   * @throws IOException
+   */
+  public static byte [] getBytes(final HStoreKey hsk) throws IOException {
+    // TODO: Redo with system.arraycopy instead of DOS.
+    if (hsk == null) {
+      throw new IllegalArgumentException("Writable cannot be null");
+    }
+    final int serializedSize = getSerializedSize(hsk);
+    final ByteArrayOutputStream byteStream = new ByteArrayOutputStream(serializedSize);
+    DataOutputStream out = new DataOutputStream(byteStream);
+    try {
+      hsk.write(out);
+      out.close();
+      out = null;
+      final byte [] serializedKey = byteStream.toByteArray();
+      if (serializedKey.length != serializedSize) {
+        // REMOVE THIS AFTER CONFIDENCE THAT OUR SIZING IS BEING DONE PROPERLY
+        throw new AssertionError("Sizes do not agree " + serializedKey.length +
+          ", " + serializedSize);
+      }
+      return serializedKey;
+    } finally {
+      if (out != null) {
+        out.close();
+      }
     }
   }
   
@@ -624,6 +552,7 @@
    * returning us the deleted key (getClosest gets exact or nearest before when
    * you pass true argument).  TODO: Throw this class away when MapFile has
    * a real 'previous' method.  See HBASE-751.
+   * @deprecated
    */
   public static class BeforeThisStoreKey extends HStoreKey {
     private final HStoreKey beforeThisKey;
@@ -638,12 +567,12 @@
     
     @Override
     public int compareTo(final HStoreKey o) {
-      int result = this.beforeThisKey.compareTo(o);
+      final int result = this.beforeThisKey.compareTo(o);
       return result == 0? -1: result;
     }
     
     @Override
-    public boolean equals(Object obj) {
+    public boolean equals(final Object obj) {
       return false;
     }
 
@@ -673,42 +602,42 @@
     }
 
     @Override
-    public boolean matchesRowCol(HStoreKey other) {
+    public boolean matchesRowCol(final HStoreKey other) {
       return this.beforeThisKey.matchesRowCol(other);
     }
 
     @Override
-    public boolean matchesRowFamily(HStoreKey that) {
+    public boolean matchesRowFamily(final HStoreKey that) {
       return this.beforeThisKey.matchesRowFamily(that);
     }
 
     @Override
-    public boolean matchesWithoutColumn(HStoreKey other) {
+    public boolean matchesWithoutColumn(final HStoreKey other) {
       return this.beforeThisKey.matchesWithoutColumn(other);
     }
 
     @Override
-    public void readFields(DataInput in) throws IOException {
+    public void readFields(final DataInput in) throws IOException {
       this.beforeThisKey.readFields(in);
     }
 
     @Override
-    public void set(HStoreKey k) {
+    public void set(final HStoreKey k) {
       this.beforeThisKey.set(k);
     }
 
     @Override
-    public void setColumn(byte[] c) {
+    public void setColumn(final byte[] c) {
       this.beforeThisKey.setColumn(c);
     }
 
     @Override
-    public void setRow(byte[] newrow) {
+    public void setRow(final byte[] newrow) {
       this.beforeThisKey.setRow(newrow);
     }
 
     @Override
-    public void setVersion(long timestamp) {
+    public void setVersion(final long timestamp) {
       this.beforeThisKey.setVersion(timestamp);
     }
 
@@ -718,18 +647,266 @@
     }
 
     @Override
-    public void write(DataOutput out) throws IOException {
+    public void write(final DataOutput out) throws IOException {
       this.beforeThisKey.write(out);
     }
-    
-    @Override
-    public HRegionInfo getHRegionInfo() {
-      return this.beforeThisKey.getHRegionInfo();
+  }
+
+  /**
+   * Passed as comparator for memcache and for store files.  See HBASE-868.
+   */
+  public static class HStoreKeyWritableComparator extends WritableComparator {
+    public HStoreKeyWritableComparator() {
+      super(HStoreKey.class);
     }
     
-    @Override
-    public void setHRegionInfo(final HRegionInfo hri) {
-      this.beforeThisKey.setHRegionInfo(hri);
+    @SuppressWarnings("unchecked")
+    public int compare(final WritableComparable left,
+        final WritableComparable right) {
+      return compareTo((HStoreKey)left, (HStoreKey)right);
     }
   }
+
+  /**
+   * @param bb ByteBuffer that contains serialized HStoreKey
+   * @return Row
+   */
+  public static byte [] getRow(final ByteBuffer bb) {
+    byte firstByte = bb.get(0);
+    int vint = firstByte;
+    int vintWidth = WritableUtils.decodeVIntSize(firstByte);
+    if (vintWidth != 1) {
+      vint = getBigVint(vintWidth, firstByte, bb.array(), bb.arrayOffset());
+    }
+    byte [] b = new byte [vint];
+    System.arraycopy(bb.array(), bb.arrayOffset() + vintWidth, b, 0, vint);
+    return b;
+  }
+
+  /**
+   * @param bb ByteBuffer that contains serialized HStoreKey
+   * @return Column
+   */
+  public static byte [] getColumn(final ByteBuffer bb) {
+    byte firstByte = bb.get(0);
+    int vint = firstByte;
+    int vintWidth = WritableUtils.decodeVIntSize(firstByte);
+    if (vintWidth != 1) {
+      vint = getBigVint(vintWidth, firstByte, bb.array(), bb.arrayOffset());
+    }
+    // Skip over row.
+    int offset = vint + vintWidth;
+    firstByte = bb.get(offset);
+    vint = firstByte;
+    vintWidth = WritableUtils.decodeVIntSize(firstByte);
+    if (vintWidth != 1) {
+      vint = getBigVint(vintWidth, firstByte, bb.array(),
+        bb.arrayOffset() + offset);
+    }
+    byte [] b = new byte [vint];
+    System.arraycopy(bb.array(), bb.arrayOffset() + offset + vintWidth, b, 0,
+      vint);
+    return b;
+  }
+
+  /**
+   * @param bb ByteBuffer that contains serialized HStoreKey
+   * @return Timestamp
+   */
+  public static long getTimestamp(final ByteBuffer bb) {
+    byte firstByte = bb.get(0);
+    int vint = firstByte;
+    int vintWidth = WritableUtils.decodeVIntSize(firstByte);
+    if (vintWidth != 1) {
+      vint = getBigVint(vintWidth, firstByte, bb.array(), bb.arrayOffset());
+    }
+    // Skip over row.
+    int offset = vint + vintWidth;
+    firstByte = bb.get(offset);
+    vint = firstByte;
+    vintWidth = WritableUtils.decodeVIntSize(firstByte);
+    if (vintWidth != 1) {
+      vint = getBigVint(vintWidth, firstByte, bb.array(),
+        bb.arrayOffset() + offset);
+    }
+    // Skip over column
+    offset += (vint + vintWidth);
+    return bb.getLong(offset);
+  }
+
+  /**
+   * RawComparator for plain -- i.e. non-catalog table keys such as 
+   * -ROOT- and .META. -- HStoreKeys.  Compares at byte level.
+   */
+  public static class StoreKeyByteComparator implements RawComparator<byte []> {
+    public StoreKeyByteComparator() {
+      super();
+    }
+
+    public int compare(final byte[] b1, final byte[] b2) {
+      return compare(b1, 0, b1.length, b2, 0, b2.length);
+    }
+
+    public int compare(final byte [] b1, int o1, int l1,
+        final byte [] b2, int o2, int l2) {
+      // Below is byte compare without creating new objects.  Its awkward but
+      // seems no way around getting vint width, value, and compare result any
+      // other way. The passed byte arrays, b1 and b2, have a vint, row, vint,
+      // column, timestamp in them.  The byte array was written by the
+      // #write(DataOutputStream) method above. See it to better understand the
+      // below.
+
+      // Calculate vint and vint width for rows in b1 and b2.
+      byte firstByte1 = b1[o1];
+      int vint1 = firstByte1;
+      int vintWidth1 = WritableUtils.decodeVIntSize(firstByte1);
+      if (vintWidth1 != 1) {
+        vint1 = getBigVint(vintWidth1, firstByte1, b1, o1);
+      }
+      byte firstByte2 = b2[o2];
+      int vint2 = firstByte2;
+      int vintWidth2 = WritableUtils.decodeVIntSize(firstByte2);
+      if (vintWidth2 != 1) {
+        vint2 = getBigVint(vintWidth2, firstByte2, b2, o2);
+      }
+      // Compare the rows.
+      int result = WritableComparator.compareBytes(b1, o1 + vintWidth1, vint1,
+          b2, o2 + vintWidth2, vint2);
+      if (result != 0) {
+        return result;
+      }
+
+      // Update offsets and lengths so we are aligned on columns.
+      int diff1 = vintWidth1 + vint1;
+      o1 += diff1;
+      l1 -= diff1;
+      int diff2 = vintWidth2 + vint2;
+      o2 += diff2;
+      l2 -= diff2;
+      // Calculate vint and vint width for columns in b1 and b2.
+      firstByte1 = b1[o1];
+      vint1 = firstByte1;
+      vintWidth1 = WritableUtils.decodeVIntSize(firstByte1);
+      if (vintWidth1 != 1) {
+        vint1 = getBigVint(vintWidth1, firstByte1, b1, o1);
+      }
+      firstByte2 = b2[o2];
+      vint2 = firstByte2;
+      vintWidth2 = WritableUtils.decodeVIntSize(firstByte2);
+      if (vintWidth2 != 1) {
+        vint2 = getBigVint(vintWidth2, firstByte2, b2, o2);
+      }
+      // Compare columns.
+      result = WritableComparator.compareBytes(b1, o1 + vintWidth1, vint1,
+          b2, o2 + vintWidth2, vint2);
+      if (result != 0) {
+        return result;
+      }
+
+      // Update offsets and lengths.
+      diff1 = vintWidth1 + vint1;
+      o1 += diff1;
+      l1 -= diff1;
+      diff2 = vintWidth2 + vint2;
+      o2 += diff2;
+      l2 -= diff2;
+      // The below older timestamps sorting ahead of newer timestamps looks
+      // wrong but it is intentional. This way, newer timestamps are first
+      // found when we iterate over a memcache and newer versions are the
+      // first we trip over when reading from a store file.
+      for (int i = 0; i < l1; i++) {
+        int leftb = b1[o1 + i] & 0xff;
+        int rightb = b2[o2 + i] & 0xff;
+        if (leftb < rightb) {
+          return 1;
+        } else if (leftb > rightb) {
+          return -1;
+        }
+      }
+      return 0;
+    }
+  }
+
+  /*
+   * Vint is wider than one byte.  Find out how much bigger it is.
+   * @param vintWidth
+   * @param firstByte
+   * @param buffer
+   * @param offset
+   * @return
+   */
+  static int getBigVint(final int vintWidth, final byte firstByte,
+      final byte [] buffer, final int offset) {
+    long i = 0;
+    for (int idx = 0; idx < vintWidth - 1; idx++) {
+      final byte b = buffer[offset + 1 + idx];
+      i = i << 8;
+      i = i | (b & 0xFF);
+    }
+    i = (WritableUtils.isNegativeVInt(firstByte) ? (i ^ -1L) : i);
+    if (i > Integer.MAX_VALUE) {
+      throw new IllegalArgumentException("Calculated vint too large");
+    }
+    return (int)i;
+  }
+
+  /**
+   * Create a store key.
+   * @param bb
+   * @return HStoreKey instance made of the passed <code>b</code>.
+   * @throws IOException
+   */
+  public static HStoreKey create(final ByteBuffer bb)
+  throws IOException {
+    byte firstByte = bb.get(0);
+    int vint = firstByte;
+    int vintWidth = WritableUtils.decodeVIntSize(firstByte);
+    if (vintWidth != 1) {
+      vint = getBigVint(vintWidth, firstByte, bb.array(), bb.arrayOffset());
+    }
+    byte [] row = new byte [vint];
+    System.arraycopy(bb.array(), bb.arrayOffset() + vintWidth,
+      row, 0, row.length);
+    // Skip over row.
+    int offset = vint + vintWidth;
+    firstByte = bb.get(offset);
+    vint = firstByte;
+    vintWidth = WritableUtils.decodeVIntSize(firstByte);
+    if (vintWidth != 1) {
+      vint = getBigVint(vintWidth, firstByte, bb.array(),
+        bb.arrayOffset() + offset);
+    }
+    byte [] column = new byte [vint];
+    System.arraycopy(bb.array(), bb.arrayOffset() + offset + vintWidth,
+      column, 0, column.length);
+    // Skip over column
+    offset += (vint + vintWidth);
+    long ts = bb.getLong(offset);
+    return new HStoreKey(row, column, ts);
+  }
+
+  /**
+   * Create a store key.
+   * @param b Serialized HStoreKey; a byte array with a row only in it won't do.
+   * It must have all the vints denoting r/c/ts lengths.
+   * @return HStoreKey instance made of the passed <code>b</code>.
+   * @throws IOException
+   */
+  public static HStoreKey create(final byte [] b) throws IOException {
+    return create(b, 0, b.length);
+  }
+
+  /**
+   * Create a store key.
+   * @param b Serialized HStoreKey
+   * @param offset
+   * @param length
+   * @return HStoreKey instance made of the passed <code>b</code>.
+   * @throws IOException
+   */
+  public static HStoreKey create(final byte [] b, final int offset,
+    final int length)
+  throws IOException {
+    return (HStoreKey)Writables.getWritable(b, offset, length, new HStoreKey());
+  }
 }
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java Wed Feb 25 05:34:29 2009
@@ -385,8 +385,8 @@
             }
           }
           endKey = currentRegion.getEndKey();
-        } while (!(endKey == null || HStoreKey.equalsTwoRowKeys(currentRegion,
-            endKey, HConstants.EMPTY_BYTE_ARRAY)));
+        } while (!(endKey == null || HStoreKey.equalsTwoRowKeys(endKey,
+            HConstants.EMPTY_BYTE_ARRAY)));
       }
       finally {
         s.setClose();
@@ -645,10 +645,8 @@
           // this one. the exception case is when the endkey is EMPTY_START_ROW,
           // signifying that the region we're checking is actually the last
           // region in the table.
-          if (HStoreKey.equalsTwoRowKeys(possibleRegion.getRegionInfo(), 
-              endKey, HConstants.EMPTY_END_ROW) ||
-              HStoreKey.compareTwoRowKeys(possibleRegion.getRegionInfo(), 
-                  endKey, row) > 0) {
+          if (HStoreKey.equalsTwoRowKeys(endKey, HConstants.EMPTY_END_ROW) ||
+              HStoreKey.compareTwoRowKeys(endKey, row) > 0) {
             return possibleRegion;
           }
         }
@@ -685,8 +683,7 @@
 
           // by nature of the map, we know that the start key has to be < 
           // otherwise it wouldn't be in the headMap. 
-          if (HStoreKey.compareTwoRowKeys(possibleRegion.getRegionInfo(),
-              endKey, row) <= 0) {
+          if (HStoreKey.compareTwoRowKeys(endKey, row) <= 0) {
             // delete any matching entry
             HRegionLocation rl =
               tableLocations.remove(matchingRegions.lastKey());

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java Wed Feb 25 05:34:29 2009
@@ -226,9 +226,8 @@
    * @return Array of region starting row keys
    * @throws IOException
    */
-  public byte[][] getStartKeys() throws IOException {
+  public byte [][] getStartKeys() throws IOException {
     final List<byte[]> keyList = new ArrayList<byte[]>();
-
     MetaScannerVisitor visitor = new MetaScannerVisitor() {
       public boolean processRow(RowResult rowResult) throws IOException {
         HRegionInfo info = Writables.getHRegionInfo(
@@ -240,7 +239,6 @@
         }
         return true;
       }
-
     };
     MetaScanner.metaScan(configuration, visitor, this.tableName);
     return keyList.toArray(new byte[keyList.size()][]);

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/MetaScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/MetaScanner.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/MetaScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/MetaScanner.java Wed Feb 25 05:34:29 2009
@@ -57,7 +57,6 @@
         RowResult r = null;
         do {
           RowResult[] rrs = connection.getRegionServerWithRetries(callable);
-          
           if (rrs == null || rrs.length == 0 || rrs[0].size() == 0) {
             break;
           }
@@ -70,7 +69,7 @@
         callable.setClose();
         connection.getRegionServerWithRetries(callable);
       }
-    } while (HStoreKey.compareTwoRowKeys(callable.getHRegionInfo(), startRow, LAST_ROW) != 0);
+    } while (HStoreKey.compareTwoRowKeys(startRow, LAST_ROW) != 0);
   }
 
   /**

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Cell.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Cell.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Cell.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Cell.java Wed Feb 25 05:34:29 2009
@@ -22,6 +22,7 @@
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.Comparator;
 import java.util.Iterator;
 import java.util.Map;
@@ -77,6 +78,16 @@
   }
 
   /**
+   * Create a new Cell with a given value and timestamp. Used by HStore.
+   * 
+   * @param bb
+   * @param timestamp
+   */
+  public Cell(final ByteBuffer bb, long timestamp) {
+    this.valueMap.put(timestamp, Bytes.toBytes(bb));
+  }
+
+  /**
    * @param vals
    *          array of values
    * @param ts

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HBaseMapFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HBaseMapFile.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HBaseMapFile.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HBaseMapFile.java Wed Feb 25 05:34:29 2009
@@ -72,10 +72,10 @@
     public HBaseReader(FileSystem fs, String dirName, Configuration conf,
         boolean blockCacheEnabled, HRegionInfo hri)
     throws IOException {
-      super(fs, dirName, new HStoreKey.HStoreKeyWritableComparator(hri), 
+      super(fs, dirName, new HStoreKey.HStoreKeyWritableComparator(), 
           conf, false); // defer opening streams
       this.blockCacheEnabled = blockCacheEnabled;
-      open(fs, dirName, new HStoreKey.HStoreKeyWritableComparator(hri), conf);
+      open(fs, dirName, new HStoreKey.HStoreKeyWritableComparator(), conf);
       
       // Force reading of the mapfile index by calling midKey. Reading the
       // index will bring the index into memory over here on the client and
@@ -121,7 +121,7 @@
     public HBaseWriter(Configuration conf, FileSystem fs, String dirName,
         SequenceFile.CompressionType compression, final HRegionInfo hri)
     throws IOException {
-      super(conf, fs, dirName, new HStoreKey.HStoreKeyWritableComparator(hri),
+      super(conf, fs, dirName, new HStoreKey.HStoreKeyWritableComparator(),
          VALUE_CLASS, compression);
       // Default for mapfiles is 128.  Makes random reads faster if we
       // have more keys indexed and we're not 'next'-ing around in the

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfMapFileReader.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfMapFileReader.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfMapFileReader.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfMapFileReader.java Wed Feb 25 05:34:29 2009
@@ -90,7 +90,6 @@
     // have an actual midkey themselves. No midkey is how we indicate file is
     // not splittable.
     this.midkey = new HStoreKey((HStoreKey)mk);
-    this.midkey.setHRegionInfo(hri);
     // Is it top or bottom half?
     this.top = Reference.isTopFileRegion(r);
   }
@@ -212,4 +211,4 @@
     checkKey(key);
     return super.seek(key);
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java Wed Feb 25 05:34:29 2009
@@ -45,7 +45,7 @@
  * if passed a value type that it has not already been told about. Its  been
  * primed with hbase Writables and byte [].  Keys are always byte arrays.
  *
- * @param <byte []> key  TODO: Parameter K is never used, could be removed.
+ * @param <K> <byte []> key  TODO: Parameter K is never used, could be removed.
  * @param <V> value Expects a Writable or byte [].
  */
 public class HbaseMapWritable <K, V>
@@ -191,7 +191,7 @@
     // Then write out each key/value pair
     for (Map.Entry<byte [], V> e: instance.entrySet()) {
       Bytes.writeByteArray(out, e.getKey());
-      Byte id =getId(e.getValue().getClass());
+      Byte id = getId(e.getValue().getClass());
       out.writeByte(id);
       Object value = e.getValue();
       if (value instanceof byte []) {

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/MapFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/MapFile.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/MapFile.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/MapFile.java Wed Feb 25 05:34:29 2009
@@ -171,11 +171,13 @@
             CompressionType.BLOCK, progress);
     }
     
-    /** The number of entries that are added before an index entry is added.*/
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.io.StoreFileWriter#getIndexInterval()
+     */
     public int getIndexInterval() { return indexInterval; }
 
-    /** Sets the index interval.
-     * @see #getIndexInterval()
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.io.StoreFileWriter#setIndexInterval(int)
      */
     public void setIndexInterval(int interval) { indexInterval = interval; }
 
@@ -186,14 +188,17 @@
       conf.setInt(INDEX_INTERVAL, interval);
     }
 
-    /** Close the map. */
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.io.StoreFileWriter#close()
+     */
     public synchronized void close() throws IOException {
       data.close();
       index.close();
     }
 
-    /** Append a key/value pair to the map.  The key must be greater or equal
-     * to the previous key added to the map. */
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.io.StoreFileWriter#append(org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable)
+     */
     public synchronized void append(WritableComparable key, Writable val)
       throws IOException {
 
@@ -250,10 +255,14 @@
     private WritableComparable[] keys;
     private long[] positions;
 
-    /** Returns the class of keys in this file. */
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.io.StoreFileReader#getKeyClass()
+     */
     public Class<?> getKeyClass() { return data.getKeyClass(); }
 
-    /** Returns the class of values in this file. */
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.io.StoreFileReader#getValueClass()
+     */
     public Class<?> getValueClass() { return data.getValueClass(); }
 
     /** Construct a map reader for the named map.*/
@@ -362,14 +371,15 @@
       }
     }
 
-    /** Re-positions the reader before its first key. */
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.io.StoreFileReader#reset()
+     */
     public synchronized void reset() throws IOException {
       data.seek(firstPosition);
     }
 
-    /** Get the key at approximately the middle of the file.
-     * 
-     * @throws IOException
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.io.StoreFileReader#midKey()
      */
     public synchronized WritableComparable midKey() throws IOException {
 
@@ -382,9 +392,8 @@
       return keys[pos];
     }
     
-    /** Reads the final key from the file.
-     *
-     * @param key key to read into
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.io.StoreFileReader#finalKey(org.apache.hadoop.io.WritableComparable)
      */
     public synchronized void finalKey(WritableComparable key)
       throws IOException {
@@ -404,9 +413,8 @@
       }
     }
 
-    /** Positions the reader at the named key, or if none such exists, at the
-     * first entry after the named key.  Returns true iff the named key exists
-     * in this map.
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.io.StoreFileReader#seek(org.apache.hadoop.io.WritableComparable)
      */
     public synchronized boolean seek(WritableComparable key) throws IOException {
       return seekInternal(key) == 0;
@@ -517,15 +525,17 @@
       return -(low + 1);                          // key not found.
     }
 
-    /** Read the next key/value pair in the map into <code>key</code> and
-     * <code>val</code>.  Returns true if such a pair exists and false when at
-     * the end of the map */
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.io.StoreFileReader#next(org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable)
+     */
     public synchronized boolean next(WritableComparable key, Writable val)
       throws IOException {
       return data.next(key, val);
     }
 
-    /** Return the value for the named key, or null if none exists. */
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.io.StoreFileReader#get(org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable)
+     */
     public synchronized Writable get(WritableComparable key, Writable val)
       throws IOException {
       if (seek(key)) {
@@ -535,14 +545,8 @@
         return null;
     }
 
-    /** 
-     * Finds the record that is the closest match to the specified key.
-     * Returns <code>key</code> or if it does not exist, at the first entry
-     * after the named key.
-     * 
--     * @param key       - key that we're trying to find
--     * @param val       - data value if key is found
--     * @return          - the key that was the closest match or null if eof.
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.io.StoreFileReader#getClosest(org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable)
      */
     public synchronized WritableComparable getClosest(WritableComparable key,
       Writable val)
@@ -550,15 +554,8 @@
       return getClosest(key, val, false);
     }
 
-    /** 
-     * Finds the record that is the closest match to the specified key.
-     * 
-     * @param key       - key that we're trying to find
-     * @param val       - data value if key is found
-     * @param before    - IF true, and <code>key</code> does not exist, return
-     * the first entry that falls just before the <code>key</code>.  Otherwise,
-     * return the record that sorts just after.
-     * @return          - the key that was the closest match or null if eof.
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.io.StoreFileReader#getClosest(org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable, boolean)
      */
     public synchronized WritableComparable getClosest(WritableComparable key,
         Writable val, final boolean before)
@@ -578,7 +575,9 @@
       return nextKey;
     }
 
-    /** Close the map. */
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hbase.io.StoreFileReader#close()
+     */
     public synchronized void close() throws IOException {
       if (!indexClosed) {
         index.close();

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Reference.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Reference.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Reference.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Reference.java Wed Feb 25 05:34:29 2009
@@ -7,41 +7,34 @@
 import java.io.DataOutput;
 import java.io.IOException;
 
-import org.apache.hadoop.hbase.HStoreKey;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.io.Writable;
 
 /**
- * A reference to a part of a store file.  The file referenced usually lives
- * under a different region.  The part referenced is usually the top or bottom
- * half of the file.  References are made at region split time.  Being lazy
- * about copying data between the parent of the split and the split daughters
- * makes splitting faster.
+ * A reference to the top or bottom half of a store file.  The file referenced
+ * lives under a different region.  References are made at region split time.
  * 
- * <p>References work with {@link HalfMapFileReader}.  References know how to
- * write out the reference format in the file system and are whats juggled when
- * references are mixed in with direct store files.  The
- * {@link HalfMapFileReader} is used reading the referred to file.
+ * <p>References work with a special half store file type.  References know how
+ * to write out the reference format in the file system and are whats juggled
+ * when references are mixed in with direct store files.  The half store file
+ * type is used reading the referred to file.
  *
  * <p>References to store files located over in some other region look like
  * this in the file system
- * <code>1278437856009925445.hbaserepository,qAReLZD-OyQORZWq_vqR1k==,959247014679548184</code>:
- * i.e. an id followed by the name of the referenced region.  The data
- * ('mapfiles') of references are empty. The accompanying <code>info</code> file
- * contains the <code>midkey</code> that demarks top and bottom of the
- * referenced storefile, the id of the remote store we're referencing and
- * whether we're to serve the top or bottom region of the remote store file.
+ * <code>1278437856009925445.3323223323</code>:
+ * i.e. an id followed by hash of the referenced region.
  * Note, a region is itself not splitable if it has instances of store file
  * references.  References are cleaned up by compactions.
  */
 public class Reference implements Writable {
-  // TODO: see if it makes sense making a ReferenceMapFile whose Writer is this
-  // class and whose Reader is the {@link HalfMapFileReader}.
-
-  private int encodedRegionName;
-  private long fileid;
+  private byte [] splitkey;
   private Range region;
-  private HStoreKey midkey;
-  
+
   /** 
    * For split HStoreFiles, it specifies if the file covers the lower half or
    * the upper half of the key range
@@ -52,66 +45,86 @@
     /** HStoreFile contains lower half of key range */
     bottom
   }
-  
- public Reference(final int ern, final long fid, final HStoreKey m,
-      final Range fr) {
-    this.encodedRegionName = ern;
-    this.fileid = fid;
+
+  /**
+   * Constructor
+   * @param r
+   * @param s This is a serialized storekey with the row we are to split on,
+   * an empty column and a timestamp of the LATEST_TIMESTAMP.  This is the first
+   * possible entry in a row.  This is what we are splitting around.
+   * @param fr
+   */
+  public Reference(final byte [] s, final Range fr) {
+    this.splitkey = s;
     this.region = fr;
-    this.midkey = m;
-  }
-  
- public Reference() {
-    this(-1, -1, null, Range.bottom);
   }
 
-  public long getFileId() {
-    return fileid;
+  /**
+   * Used by serializations.
+   */
+  public Reference() {
+    this(null, Range.bottom);
   }
 
   public Range getFileRegion() {
-    return region;
+    return this.region;
   }
-  
-  public HStoreKey getMidkey() {
-    return midkey;
-  }
-  
-  public int getEncodedRegionName() {
-    return this.encodedRegionName;
+
+  public byte [] getSplitKey() {
+    return splitkey;
   }
 
-  @Override
   public String toString() {
-    return encodedRegionName + "/" + fileid + "/" + region;
+    return "" + this.region;
   }
 
   // Make it serializable.
 
   public void write(DataOutput out) throws IOException {
-    // Write out the encoded region name as a String.  Doing it as a String
-    // keeps a Reference's serialization backword compatible with
-    // pre-HBASE-82 serializations.  ALternative is rewriting all
-    // info files in hbase (Serialized References are written into the
-    // 'info' file that accompanies HBase Store files).
-    out.writeUTF(Integer.toString(encodedRegionName));
-    out.writeLong(fileid);
     // Write true if we're doing top of the file.
-    out.writeBoolean(isTopFileRegion(region));
-    this.midkey.write(out);
+    out.writeBoolean(isTopFileRegion(this.region));
+    Bytes.writeByteArray(out, this.splitkey);
   }
 
   public void readFields(DataInput in) throws IOException {
-    this.encodedRegionName = Integer.parseInt(in.readUTF());
-    fileid = in.readLong();
     boolean tmp = in.readBoolean();
     // If true, set region to top.
-    region = tmp? Range.top: Range.bottom;
-    midkey = new HStoreKey();
-    midkey.readFields(in);
+    this.region = tmp? Range.top: Range.bottom;
+    this.splitkey = Bytes.readByteArray(in);
   }
-  
+
   public static boolean isTopFileRegion(final Range r) {
     return r.equals(Range.top);
   }
+
+  public Path write(final FileSystem fs, final Path p)
+  throws IOException {
+    FSUtils.create(fs, p);
+    FSDataOutputStream out = fs.create(p);
+    try {
+      write(out);
+    } finally {
+      out.close();
+    }
+    return p;
+  }
+
+  /**
+   * Read a Reference from FileSystem.
+   * @param fs
+   * @param p
+   * @return New Reference made from passed <code>p</code>
+   * @throws IOException
+   */
+  public static Reference read(final FileSystem fs, final Path p)
+  throws IOException {
+    FSDataInputStream in = fs.open(p);
+    try {
+      Reference r = new Reference();
+      r.readFields(in);
+      return r;
+    } finally {
+      in.close();
+    }
+  }
 }
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java Wed Feb 25 05:34:29 2009
@@ -20,36 +20,34 @@
 package org.apache.hadoop.hbase.master;
 
 import java.io.IOException;
-import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.HashMap;
+import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.Chore;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerInfo;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.UnknownScannerException;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.RowResult;
-
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HStoreFile;
-import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.regionserver.HLog;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.regionserver.HLog;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.ipc.RemoteException;
 
 
 /**
@@ -292,19 +290,16 @@
     if (split == null) {
       return result;
     }
-    Path tabledir = HTableDescriptor.getTableDir(this.master.rootdir,
-        split.getTableDesc().getName());
+    Path tabledir = new Path(this.master.rootdir, split.getTableDesc().getNameAsString());
     for (HColumnDescriptor family: split.getTableDesc().getFamilies()) {
-      Path p = HStoreFile.getMapDir(tabledir, split.getEncodedName(),
+      Path p = Store.getStoreHomedir(tabledir, split.getEncodedName(),
         family.getName());
-
       // Look for reference files.  Call listStatus with an anonymous
       // instance of PathFilter.
-
       FileStatus [] ps = this.master.fs.listStatus(p,
           new PathFilter () {
             public boolean accept(Path path) {
-              return HStore.isReference(path);
+              return StoreFile.isReference(path);
             }
           }
       );

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java Wed Feb 25 05:34:29 2009
@@ -21,10 +21,10 @@
 
 import java.io.IOException;
 
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.regionserver.Store;
 
 /** Instantiated to remove a column family from a table */
 class DeleteColumn extends ColumnOperation {
@@ -40,13 +40,14 @@
   @Override
   protected void postProcessMeta(MetaRegion m, HRegionInterface server)
   throws IOException {
-    Path tabledir = new Path(this.master.rootdir, tableName.toString());
     for (HRegionInfo i: unservedRegions) {
       i.getTableDesc().removeFamily(columnName);
       updateRegionInfo(server, m.getRegionName(), i);
       // Delete the directories used by the column
-      FSUtils.deleteColumnFamily(this.master.fs, tabledir, i.getEncodedName(),
-        this.columnName);
+      Path tabledir =
+        new Path(this.master.rootdir, i.getTableDesc().getNameAsString());
+      this.master.fs.delete(Store.getStoreHomedir(tabledir, i.getEncodedName(),
+        this.columnName), true);
     }
   }
 }
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/MetaRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/MetaRegion.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/MetaRegion.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/MetaRegion.java Wed Feb 25 05:34:29 2009
@@ -20,7 +20,6 @@
 package org.apache.hadoop.hbase.master;
 
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.HStoreKey;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -88,8 +87,7 @@
   public int compareTo(MetaRegion other) {
     int result = Bytes.compareTo(this.regionName, other.getRegionName());
     if(result == 0) {
-      result = HStoreKey.compareTwoRowKeys(HRegionInfo.FIRST_META_REGIONINFO,
-        this.startKey, other.getStartKey());
+      result = HStoreKey.compareTwoRowKeys(this.startKey, other.getStartKey());
       if (result == 0) {
         // Might be on different host?
         result = this.server.compareTo(other.server);

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java Wed Feb 25 05:34:29 2009
@@ -655,7 +655,7 @@
       }
       synchronized (updateLock) {
         this.writer.append(new HLogKey(regionName, tableName, HLog.METAROW, logSeqId),
-            new HLogEdit(HLog.METACOLUMN, HLogEdit.completeCacheFlush.get(),
+            new HLogEdit(HLog.METACOLUMN, HLogEdit.COMPLETE_CACHE_FLUSH,
                 System.currentTimeMillis()));
         this.numEntries++;
         Long seq = this.lastSeqWritten.get(regionName);

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java Wed Feb 25 05:34:29 2009
@@ -20,11 +20,11 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import org.apache.hadoop.hbase.io.BatchOperation;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.*;
 
 import java.io.*;
+import java.nio.ByteBuffer;
 
 import org.apache.hadoop.hbase.HConstants;
 
@@ -38,19 +38,15 @@
 public class HLogEdit implements Writable, HConstants {
 
   /** Value stored for a deleted item */
-  public static ImmutableBytesWritable deleteBytes = null;
+  public static byte [] DELETED_BYTES = null;
 
   /** Value written to HLog on a complete cache flush */
-  public static ImmutableBytesWritable completeCacheFlush = null;
+  public static byte [] COMPLETE_CACHE_FLUSH = null;
 
   static {
     try {
-      deleteBytes =
-        new ImmutableBytesWritable("HBASE::DELETEVAL".getBytes(UTF8_ENCODING));
-    
-      completeCacheFlush =
-        new ImmutableBytesWritable("HBASE::CACHEFLUSH".getBytes(UTF8_ENCODING));
-      
+      DELETED_BYTES = "HBASE::DELETEVAL".getBytes(UTF8_ENCODING);
+      COMPLETE_CACHE_FLUSH = "HBASE::CACHEFLUSH".getBytes(UTF8_ENCODING);
     } catch (UnsupportedEncodingException e) {
       assert(false);
     }
@@ -58,12 +54,31 @@
   
   /**
    * @param value
-   * @return True if an entry and its content is {@link #deleteBytes}.
+   * @return True if an entry and its content is {@link #DELETED_BYTES}.
    */
   public static boolean isDeleted(final byte [] value) {
-    return (value == null)? false: deleteBytes.compareTo(value) == 0;
+    return isDeleted(value, 0, value.length);
   }
-  
+
+  /**
+   * @param value
+   * @return True if an entry and its content is {@link #DELETED_BYTES}.
+   */
+  public static boolean isDeleted(final ByteBuffer value) {
+    return isDeleted(value.array(), value.arrayOffset(), value.limit());
+  }
+
+  /**
+   * @param value
+   * @return True if an entry and its content is {@link #DELETED_BYTES}.
+   */
+  public static boolean isDeleted(final byte [] value, final int offset,
+      final int length) {
+    return (value == null)? false:
+      Bytes.BYTES_RAWCOMPARATOR.compare(DELETED_BYTES, 0, DELETED_BYTES.length,
+        value, offset, length) == 0;
+  }
+
   /** If transactional log entry, these are the op codes */
   public enum TransactionalOperation {
     /** start transaction */