You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2009/04/12 12:39:58 UTC

svn commit: r764289 [2/8] - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/filter/ src/java/org/apache/hadoop/hbase/io/ src/java/org/apache/hadoop/hbase/io/hfile/ s...

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Cell.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Cell.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Cell.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Cell.java Sun Apr 12 10:39:55 2009
@@ -25,11 +25,14 @@
 import java.nio.ByteBuffer;
 import java.util.Comparator;
 import java.util.Iterator;
+import java.util.List;
+import java.util.ListIterator;
 import java.util.Map;
 import java.util.SortedMap;
 import java.util.TreeMap;
 import java.util.Map.Entry;
 
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.rest.exception.HBaseRestException;
 import org.apache.hadoop.hbase.rest.serializer.IRestSerializer;
 import org.apache.hadoop.hbase.rest.serializer.ISerializable;
@@ -220,6 +223,50 @@
   }
 
   /**
+   * @param results
+   * @return
+   * TODO: This is the glue between old way of doing things and the new.
+   * Herein we are converting our clean KeyValues to Map of Cells.
+   */
+  public static HbaseMapWritable<byte [], Cell> createCells(final List<KeyValue> results) {
+    HbaseMapWritable<byte [], Cell> cells =
+      new HbaseMapWritable<byte [], Cell>();
+    // Walking backward through the list of results though it has no effect
+    // because we're inserting into a sorted map.
+    for (ListIterator<KeyValue> i = results.listIterator(results.size());
+        i.hasPrevious();) {
+      KeyValue kv = i.previous();
+      byte [] column = kv.getColumn();
+      Cell c = cells.get(column);
+      if (c == null) {
+        c = new Cell(kv.getValue(), kv.getTimestamp());
+        cells.put(column, c);
+      } else {
+        c.add(kv.getValue(), kv.getTimestamp());
+      }
+    }
+    return cells;
+  }
+
+  /**
+   * @param results
+   * @return Array of Cells.
+   * TODO: This is the glue between old way of doing things and the new.
+   * Herein we are converting our clean KeyValues to Map of Cells.
+   */
+  public static Cell [] createSingleCellArray(final List<KeyValue> results) {
+    if (results == null) return null;
+    int index = 0;
+    Cell [] cells = new Cell[results.size()];
+    for (KeyValue kv: results) {
+      cells[index++] = new Cell(kv.getValue(), kv.getTimestamp());
+    }
+    return cells;
+  }
+
+  /*
+   * (non-Javadoc)
+   * 
    * @see
    * org.apache.hadoop.hbase.rest.serializer.ISerializable#restSerialize(org
    * .apache.hadoop.hbase.rest.serializer.IRestSerializer)
@@ -228,4 +275,4 @@
       throws HBaseRestException {
     serializer.serializeCell(this);
   }
-}
\ No newline at end of file
+}

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java Sun Apr 12 10:39:55 2009
@@ -26,6 +26,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
@@ -46,11 +47,11 @@
  * <p>This file is not splitable.  Calls to {@link #midkey()} return null.
  */
 public class HalfHFileReader extends HFile.Reader {
-  static final Log LOG = LogFactory.getLog(HalfHFileReader.class);
-  protected final boolean top;
+  final Log LOG = LogFactory.getLog(HalfHFileReader.class);
+  final boolean top;
   // This is the key we split around.  Its the first possible entry on a row:
   // i.e. empty column and a timestamp of LATEST_TIMESTAMP.
-  protected final byte [] splitkey;
+  final byte [] splitkey;
 
   /**
    * @param fs
@@ -99,6 +100,10 @@
         return delegate.getValueString();
       }
 
+      public KeyValue getKeyValue() {
+        return delegate.getKeyValue();
+      }
+
       public boolean next() throws IOException {
         boolean b = delegate.next();
         if (!b) {
@@ -115,16 +120,23 @@
       }
 
       public boolean seekBefore(byte[] key) throws IOException {
+        return seekBefore(key, 0, key.length);
+      }
+
+      public boolean seekBefore(byte [] key, int offset, int length)
+      throws IOException {
         if (top) {
-          if (getComparator().compare(key, splitkey) < 0) {
+          if (getComparator().compare(key, offset, length, splitkey, 0,
+              splitkey.length) < 0) {
             return false;
           }
         } else {
-          if (getComparator().compare(key, splitkey) >= 0) {
-            return seekBefore(splitkey);
+          if (getComparator().compare(key, offset, length, splitkey, 0,
+              splitkey.length) >= 0) {
+            return seekBefore(splitkey, 0, splitkey.length);
           }
         }
-        return this.delegate.seekBefore(key);
+        return this.delegate.seekBefore(key, offset, length);
       }
 
       public boolean seekTo() throws IOException {
@@ -152,22 +164,28 @@
       }
 
       public int seekTo(byte[] key) throws IOException {
+        return seekTo(key, 0, key.length);
+      }
+
+      public int seekTo(byte[] key, int offset, int length) throws IOException {
         if (top) {
-          if (getComparator().compare(key, splitkey) < 0) {
+          if (getComparator().compare(key, offset, length, splitkey, 0,
+              splitkey.length) < 0) {
             return -1;
           }
         } else {
-          if (getComparator().compare(key, splitkey) >= 0) {
+          if (getComparator().compare(key, offset, length, splitkey, 0,
+              splitkey.length) >= 0) {
             // we would place the scanner in the second half.
             // it might be an error to return false here ever...
-            boolean res = delegate.seekBefore(splitkey);
+            boolean res = delegate.seekBefore(splitkey, 0, splitkey.length);
             if (!res) {
               throw new IOException("Seeking for a key in bottom of file, but key exists in top of file, failed on seekBefore(midkey)");
             }
             return 1;
           }
         }
-        return delegate.seekTo(key);
+        return delegate.seekTo(key, offset, length);
       }
 
       public Reader getReader() {
@@ -201,4 +219,4 @@
     // Returns null to indicate file is not splitable.
     return null;
   }
-}
\ No newline at end of file
+}

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java Sun Apr 12 10:39:55 2009
@@ -179,7 +179,6 @@
   public void write(DataOutput out) throws IOException {
     // Write out the number of entries in the map
     out.writeInt(this.instance.size());
-
     // Then write out each key/value pair
     for (Map.Entry<byte [], V> e: instance.entrySet()) {
       Bytes.writeByteArray(out, e.getKey());
@@ -199,14 +198,13 @@
     // First clear the map.  Otherwise we will just accumulate
     // entries every time this method is called.
     this.instance.clear();
-    
     // Read the number of entries in the map
     int entries = in.readInt();
-    
     // Then read each key/value pair
     for (int i = 0; i < entries; i++) {
       byte [] key = Bytes.readByteArray(in);
-      Class clazz = getClass(in.readByte());
+      byte id = in.readByte();
+      Class clazz = getClass(id);
       V value = null;
       if (clazz.equals(byte [].class)) {
         byte [] bytes = Bytes.readByteArray(in);

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Reference.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Reference.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Reference.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Reference.java Sun Apr 12 10:39:55 2009
@@ -11,6 +11,7 @@
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.io.Writable;
@@ -48,13 +49,12 @@
 
   /**
    * Constructor
-   * @param s This is a serialized storekey with the row we are to split on,
-   * an empty column and a timestamp of the LATEST_TIMESTAMP.  This is the first
-   * possible entry in a row.  This is what we are splitting around.
+   * @param splitRow This is row we are splitting around.
    * @param fr
    */
-  public Reference(final byte [] s, final Range fr) {
-    this.splitkey = s;
+  public Reference(final byte [] splitRow, final Range fr) {
+    this.splitkey = splitRow == null?
+      null: KeyValue.createFirstOnRow(splitRow).getKey();
     this.region = fr;
   }
 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/RowResult.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/RowResult.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/RowResult.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/RowResult.java Sun Apr 12 10:39:55 2009
@@ -26,12 +26,14 @@
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeSet;
 
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.rest.descriptors.RestCell;
 import org.apache.hadoop.hbase.rest.exception.HBaseRestException;
 import org.apache.hadoop.hbase.rest.serializer.IRestSerializer;
@@ -78,8 +80,8 @@
   // 
   // Map interface
   // 
-  
-  public Cell put(byte [] key, Cell value) {
+  public Cell put(byte [] key,
+    Cell value) {
     throw new UnsupportedOperationException("RowResult is read-only!");
   }
 
@@ -264,7 +266,37 @@
   public void restSerialize(IRestSerializer serializer) throws HBaseRestException {
     serializer.serializeRowResult(this);
   }  
-  
+
+  /**
+   * @param r
+   * @return
+   * TODO: This is the glue between old way of doing things and the new.
+   * Herein we are converting our clean KeyValues to old RowResult.
+   */
+  public static RowResult [] createRowResultArray(final List<List<KeyValue>> l) {
+    RowResult [] results = new RowResult[l.size()];
+    int i = 0;
+    for (List<KeyValue> kvl: l) {
+      results[i++] = createRowResult(kvl);
+    }
+    return results;
+  }
+
+  /**
+   * @param results
+   * @return
+   * TODO: This is the glue between old way of doing things and the new.
+   * Herein we are converting our clean KeyValues to old RowResult.
+   */
+  public static RowResult createRowResult(final List<KeyValue> results) {
+    if (results.isEmpty()) {
+      return null;
+    }
+    HbaseMapWritable<byte [], Cell> cells = Cell.createCells(results);
+    byte [] row = results.get(0).getRow();
+    return new RowResult(row, cells);
+  }
+
   //
   // Writable
   //

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java Sun Apr 12 10:39:55 2009
@@ -39,6 +39,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.io.HbaseMapWritable;
 import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.RawComparator;
@@ -187,7 +188,9 @@
     private byte [] firstKey = null;
 
     // Key previously appended.  Becomes the last key in the file.
-    private byte [] lastKey = null;
+    private byte [] lastKeyBuffer = null;
+    private int lastKeyOffset = -1;
+    private int lastKeyLength = -1;
 
     // See {@link BlockIndex}. Below four fields are used to write the block
     // index.
@@ -267,6 +270,7 @@
      * @param ostream Stream to use.
      * @param blocksize
      * @param compress
+     * @param c RawComparator to use.
      * @param c
      * @throws IOException
      */
@@ -319,7 +323,6 @@
       if (this.out == null) return;
       long size = releaseCompressingStream(this.out);
       this.out = null;
-      
       blockKeys.add(firstKey);
       int written = longToInt(size);
       blockOffsets.add(Long.valueOf(blockBegin));
@@ -437,27 +440,58 @@
      * Add key/value to file.
      * Keys must be added in an order that agrees with the Comparator passed
      * on construction.
+     * @param kv KeyValue to add.  Cannot be empty nor null.
+     * @throws IOException
+     */
+    public void append(final KeyValue kv)
+    throws IOException {
+      append(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength(),
+        kv.getBuffer(), kv.getValueOffset(), kv.getValueLength());
+    }
+
+    /**
+     * Add key/value to file.
+     * Keys must be added in an order that agrees with the Comparator passed
+     * on construction.
      * @param key Key to add.  Cannot be empty nor null.
      * @param value Value to add.  Cannot be empty nor null.
      * @throws IOException
      */
     public void append(final byte [] key, final byte [] value)
     throws IOException {
-      checkKey(key);
-      checkValue(value);
+      append(key, 0, key.length, value, 0, value.length);
+    }
+
+    /**
+     * Add key/value to file.
+     * Keys must be added in an order that agrees with the Comparator passed
+     * on construction.
+     * @param key Key to add.  Cannot be empty nor null.
+     * @param value Value to add.  Cannot be empty nor null.
+     * @throws IOException
+     */
+    public void append(final byte [] key, final int koffset, final int klength,
+        final byte [] value, final int voffset, final int vlength)
+    throws IOException {
+      checkKey(key, koffset, klength);
+      checkValue(value, voffset, vlength);
       checkBlockBoundary();
       // Write length of key and value and then actual key and value bytes.
-      this.out.writeInt(key.length);
-      this.keylength += key.length;
-      this.out.writeInt(value.length);
-      this.valuelength += valuelength;
-      this.out.write(key);
-      if (value.length > 0) {
-        this.out.write(value);
-      }
+      this.out.writeInt(klength);
+      this.keylength += klength;
+      this.out.writeInt(vlength);
+      this.valuelength += vlength;
+      this.out.write(key, koffset, klength);
+      this.out.write(value, voffset, vlength);
       // Are we the first key in this block?
-      if (this.firstKey == null) this.firstKey = key;
-      this.lastKey = key;
+      if (this.firstKey == null) {
+        // Copy the key.
+        this.firstKey = new byte [klength];
+        System.arraycopy(key, koffset, this.firstKey, 0, klength);
+      }
+      this.lastKeyBuffer = key;
+      this.lastKeyOffset = koffset;
+      this.lastKeyLength = klength;
       this.entryCount ++;
     }
 
@@ -465,24 +499,29 @@
      * @param key Key to check.
      * @throws IOException
      */
-    private void checkKey(final byte [] key) throws IOException {
-      if (key == null || key.length <= 0) {
+    private void checkKey(final byte [] key, final int offset, final int length)
+    throws IOException {
+      if (key == null || length <= 0) {
         throw new IOException("Key cannot be null or empty");
       }
-      if (key.length > MAXIMUM_KEY_LENGTH) {
-        throw new IOException("Key length " + key.length + " > " +
+      if (length > MAXIMUM_KEY_LENGTH) {
+        throw new IOException("Key length " + length + " > " +
           MAXIMUM_KEY_LENGTH);
       }
-      if (this.lastKey != null) {
-        if (this.comparator.compare(this.lastKey, key) > 0) {
+      if (this.lastKeyBuffer != null) {
+        if (this.comparator.compare(this.lastKeyBuffer, this.lastKeyOffset,
+            this.lastKeyLength, key, offset, length) > 0) {
           throw new IOException("Added a key not lexically larger than" +
-            " previous key=" + Bytes.toString(key) + ", lastkey=" +
-            Bytes.toString(lastKey));
+            " previous key=" + Bytes.toString(key, offset, length) +
+            ", lastkey=" + Bytes.toString(this.lastKeyBuffer, this.lastKeyOffset,
+                this.lastKeyLength));
         }
       }
     }
 
-    private void checkValue(final byte [] value) throws IOException {
+    private void checkValue(final byte [] value,
+        @SuppressWarnings("unused") final int offset,
+        final int length) throws IOException {
       if (value == null) {
         throw new IOException("Value cannot be null");
       }
@@ -562,8 +601,13 @@
      * @throws IOException
      */
     private long writeFileInfo(FSDataOutputStream o) throws IOException {
-      if (this.lastKey != null) {
-        appendFileInfo(this.fileinfo, FileInfo.LASTKEY, this.lastKey, false);
+      if (this.lastKeyBuffer != null) {
+        // Make a copy.  The copy is stuffed into HMapWritable.  Needs a clean
+        // byte buffer.  Won't take a tuple.
+        byte [] b = new byte[this.lastKeyLength];
+        System.arraycopy(this.lastKeyBuffer, this.lastKeyOffset, b, 0,
+          this.lastKeyLength);
+        appendFileInfo(this.fileinfo, FileInfo.LASTKEY, b, false);
       }
       int avgKeyLen = this.entryCount == 0? 0:
         (int)(this.keylength/this.entryCount);
@@ -734,7 +778,7 @@
         return null;
       }
       try {
-        return (RawComparator<byte[]>) Class.forName(clazzName).newInstance();
+        return (RawComparator<byte []>)Class.forName(clazzName).newInstance();
       } catch (InstantiationException e) {
         throw new IOException(e);
       } catch (IllegalAccessException e) {
@@ -775,11 +819,11 @@
      * @return Block number of the block containing the key or -1 if not in this
      * file.
      */
-    protected int blockContainingKey(final byte [] key) {
+    protected int blockContainingKey(final byte [] key, int offset, int length) {
       if (blockIndex == null) {
         throw new RuntimeException("Block index not loaded");
       }
-      return blockIndex.blockContainingKey(key);
+      return blockIndex.blockContainingKey(key, offset, length);
     }
     /**
      * @param metaBlockName
@@ -793,7 +837,8 @@
       if (metaIndex == null) {
         throw new IOException("Meta index not loaded");
       }
-      int block = metaIndex.blockContainingKey(Bytes.toBytes(metaBlockName));
+      byte [] mbname = Bytes.toBytes(metaBlockName);
+      int block = metaIndex.blockContainingKey(mbname, 0, mbname.length);
       if (block == -1)
         return null;
       long blockSize;
@@ -842,7 +887,6 @@
         if (cache != null) {
           ByteBuffer cachedBuf = cache.getBlock(name + block);
           if (cachedBuf != null) {
-            // LOG.debug("Reusing block for: " + block);
             // Return a distinct 'copy' of the block, so pos doesnt get messed by
             // the scanner
             cacheHits++;
@@ -868,16 +912,13 @@
 
         byte [] magic = new byte[DATABLOCKMAGIC.length];
         buf.get(magic, 0, magic.length);
-        // LOG.debug("read block:"+buf.position() + " lim:" + buf.limit());
         if (!Arrays.equals(magic, DATABLOCKMAGIC)) {
           throw new IOException("Data magic is bad in block " + block);
         }
         // Toss the header. May have to remove later due to performance.
         buf.compact();
         buf.limit(buf.limit() - DATABLOCKMAGIC.length);
-        // LOG.debug("read block:"+buf.position() + " lim:" + buf.limit());
         buf.rewind();
-        // LOG.debug("read block:"+buf.position() + " lim:" + buf.limit());
 
         // Cache a copy, not the one we are sending back, so the position doesnt
         // get messed.
@@ -993,6 +1034,11 @@
       public Scanner(Reader r) {
         this.reader = r;
       }
+      
+      public KeyValue getKeyValue() {
+        return new KeyValue(this.block.array(),
+            this.block.arrayOffset() + this.block.position() - 8);
+      }
 
       public ByteBuffer getKey() {
         if (this.block == null || this.currKeyLen == 0) {
@@ -1047,14 +1093,19 @@
         currValueLen = block.getInt();
         return true;
       }
+      
+      public int seekTo(byte [] key) throws IOException {
+        return seekTo(key, 0, key.length);
+      }
+      
 
-      public int seekTo(byte[] key) throws IOException {
-        int b = reader.blockContainingKey(key);
+      public int seekTo(byte[] key, int offset, int length) throws IOException {
+        int b = reader.blockContainingKey(key, offset, length);
         if (b < 0) return -1; // falls before the beginning of the file! :-(
         // Avoid re-reading the same block (that'd be dumb).
         loadBlock(b);
         
-        return blockSeek(key, false);
+        return blockSeek(key, offset, length, false);
       }
 
       /**
@@ -1067,13 +1118,13 @@
        * @param seekBefore find the key before the exact match.
        * @return
        */
-      private int blockSeek(byte[] key, boolean seekBefore) {
+      private int blockSeek(byte[] key, int offset, int length, boolean seekBefore) {
         int klen, vlen;
         int lastLen = 0;
         do {
           klen = block.getInt();
           vlen = block.getInt();
-          int comp = this.reader.comparator.compare(key, 0, key.length,
+          int comp = this.reader.comparator.compare(key, offset, length,
             block.array(), block.arrayOffset() + block.position(), klen);
           if (comp == 0) {
             if (seekBefore) {
@@ -1105,8 +1156,13 @@
         return 1; // didn't exactly find it.
       }
 
-      public boolean seekBefore(byte[] key) throws IOException {
-        int b = reader.blockContainingKey(key);
+      public boolean seekBefore(byte [] key) throws IOException {
+        return seekBefore(key, 0, key.length);
+      }
+      
+      public boolean seekBefore(byte[] key, int offset, int length)
+      throws IOException {
+        int b = reader.blockContainingKey(key, offset, length);
         if (b < 0)
           return false; // key is before the start of the file.
 
@@ -1121,7 +1177,7 @@
           // TODO shortcut: seek forward in this block to the last key of the block.
         }
         loadBlock(b);
-        blockSeek(key, true);
+        blockSeek(key, offset, length, true);
         return true;
       }
 
@@ -1323,8 +1379,8 @@
      * @return Offset of block containing <code>key</code> or -1 if this file
      * does not contain the request.
      */
-    int blockContainingKey(final byte[] key) {
-      int pos = Arrays.binarySearch(blockKeys, key, this.comparator);
+    int blockContainingKey(final byte[] key, int offset, int length) {
+      int pos = Bytes.binarySearch(blockKeys, key, offset, length, this.comparator);
       if (pos < 0) {
         pos ++;
         pos *= -1;
@@ -1484,4 +1540,4 @@
     // size() will wrap to negative integer if it exceeds 2GB (From tfile).
     return (int)(l & 0x00000000ffffffffL);
   }
-}
\ No newline at end of file
+}

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java Sun Apr 12 10:39:55 2009
@@ -22,6 +22,8 @@
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
+import org.apache.hadoop.hbase.KeyValue;
+
 /**
  * A scanner allows you to position yourself within a HFile and
  * scan through it.  It allows you to reposition yourself as well.
@@ -49,6 +51,7 @@
    * @throws IOException
    */
   public int seekTo(byte[] key) throws IOException;
+  public int seekTo(byte[] key, int offset, int length) throws IOException;
   /**
    * Consider the key stream of all the keys in the file, 
    * <code>k[0] .. k[n]</code>, where there are n keys in the file.
@@ -60,6 +63,7 @@
    * @throws IOException
    */
   public boolean seekBefore(byte [] key) throws IOException;
+  public boolean seekBefore(byte []key, int offset, int length) throws IOException;
   /**
    * Positions this scanner at the start of the file.
    * @return False if empty file; i.e. a call to next would return false and
@@ -89,6 +93,10 @@
    */
   public ByteBuffer getValue();
   /**
+   * @return Instance of {@link KeyValue}.
+   */
+  public KeyValue getKeyValue();
+  /**
    * Convenience method to get a copy of the key as a string - interpreting the
    * bytes as UTF8. You must call {@link #seekTo(byte[])} before this method.
    * @return key as a string

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java Sun Apr 12 10:39:55 2009
@@ -39,6 +39,7 @@
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.UnknownScannerException;
 import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.regionserver.HLog;

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java Sun Apr 12 10:39:55 2009
@@ -20,16 +20,15 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.SortedMap;
-import java.util.Vector;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.NavigableSet;
 import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HStoreKey;
-import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.ColumnNameParseException;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
@@ -39,9 +38,9 @@
   final Log LOG = LogFactory.getLog(this.getClass().getName());
 
   // Pattern to determine if a column key is a regex
-  static Pattern isRegexPattern =
+  static final Pattern isRegexPattern =
     Pattern.compile("^.*[\\\\+|^&*$\\[\\]\\}{)(]+.*$");
-  
+
   /** The kind of match we are doing on a column: */
   private static enum MATCH_TYPE {
     /** Just check the column family name */
@@ -52,6 +51,66 @@
     SIMPLE
   }
 
+  private final List<ColumnMatcher> matchers = new ArrayList<ColumnMatcher>();
+
+  // True when scanning is done
+  protected volatile boolean scannerClosed = false;
+
+  // The timestamp to match entries against
+  protected final long timestamp;
+
+  private boolean wildcardMatch = false;
+  private boolean multipleMatchers = false;
+
+  /** Constructor for abstract base class */
+  protected HAbstractScanner(final long timestamp,
+    final NavigableSet<byte []> columns)
+  throws IOException {
+    this.timestamp = timestamp;
+    for (byte [] column: columns) {
+      ColumnMatcher matcher = new ColumnMatcher(column);
+      this.wildcardMatch = matcher.isWildCardMatch();
+      matchers.add(matcher);
+      this.multipleMatchers = !matchers.isEmpty();
+    }
+  }
+
+  /**
+   * For a particular column, find all the matchers defined for the column.
+   * Compare the column family and column key using the matchers. The first one
+   * that matches returns true. If no matchers are successful, return false.
+   * 
+   * @param family/store key
+   * @param kv KeyValue to test
+   * @return true if any of the matchers for the column match the column family
+   * and the column key.
+   *                 
+   * @throws IOException
+   */
+  protected boolean columnMatch(final KeyValue kv)
+  throws IOException {
+    if (matchers == null) {
+      return false;
+    }
+    for(int m = 0; m < this.matchers.size(); m++) {
+      if (this.matchers.get(m).matches(kv)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  public boolean isWildcardScanner() {
+    return this.wildcardMatch;
+  }
+  
+  public boolean isMultipleMatchScanner() {
+    return this.multipleMatchers;
+  }
+
+  public abstract boolean next(List<KeyValue> results)
+  throws IOException;
+
   /**
    * This class provides column matching functions that are more sophisticated
    * than a simple string compare. There are three types of matching:
@@ -66,10 +125,17 @@
     private MATCH_TYPE matchType;
     private byte [] family;
     private Pattern columnMatcher;
+    // Column without delimiter so easy compare to KeyValue column
     private byte [] col;
   
     ColumnMatcher(final byte [] col) throws IOException {
-      byte [][] parse = HStoreKey.parseColumn(col);
+      byte [][] parse = parseColumn(col);
+      // Make up column without delimiter
+      byte [] columnWithoutDelimiter =
+        new byte [parse[0].length + parse[1].length];
+      System.arraycopy(parse[0], 0, columnWithoutDelimiter, 0, parse[0].length);
+      System.arraycopy(parse[1], 0, columnWithoutDelimiter, parse[0].length,
+        parse[1].length);
       // First position has family.  Second has qualifier.
       byte [] qualifier = parse[1];
       try {
@@ -79,11 +145,11 @@
           this.wildCardmatch = true;
         } else if (isRegexPattern.matcher(Bytes.toString(qualifier)).matches()) {
           this.matchType = MATCH_TYPE.REGEX;
-          this.columnMatcher = Pattern.compile(Bytes.toString(col));
+          this.columnMatcher = Pattern.compile(Bytes.toString(columnWithoutDelimiter));
           this.wildCardmatch = true;
         } else {
           this.matchType = MATCH_TYPE.SIMPLE;
-          this.col = col;
+          this.col = columnWithoutDelimiter;
           this.wildCardmatch = false;
         }
       } catch(Exception e) {
@@ -92,96 +158,55 @@
       }
     }
     
-    /** Matching method */
-    boolean matches(final byte [] c) throws IOException {
-      if(this.matchType == MATCH_TYPE.SIMPLE) {
-        return Bytes.equals(c, this.col);
+    /**
+     * @param kv
+     * @return
+     * @throws IOException
+     */
+    boolean matches(final KeyValue kv) throws IOException {
+      if (this.matchType == MATCH_TYPE.SIMPLE) {
+        return kv.matchingColumnNoDelimiter(this.col);
       } else if(this.matchType == MATCH_TYPE.FAMILY_ONLY) {
-        return HStoreKey.matchingFamily(this.family, c);
+        return kv.matchingFamily(this.family);
       } else if (this.matchType == MATCH_TYPE.REGEX) {
-        return this.columnMatcher.matcher(Bytes.toString(c)).matches();
+        // Pass a column without the delimiter since thats whats we're
+        // expected to match.
+        int o = kv.getColumnOffset();
+        int l = kv.getColumnLength(o);
+        String columnMinusQualifier = Bytes.toString(kv.getBuffer(), o, l);
+        return this.columnMatcher.matcher(columnMinusQualifier).matches();
       } else {
         throw new IOException("Invalid match type: " + this.matchType);
       }
     }
-    
+
     boolean isWildCardMatch() {
       return this.wildCardmatch;
     }
-  }
-
-  // Holds matchers for each column family.  Its keyed by the byte [] hashcode
-  // which you can get by calling Bytes.mapKey.
-  private Map<Integer, Vector<ColumnMatcher>> okCols =
-    new HashMap<Integer, Vector<ColumnMatcher>>();
-  
-  // True when scanning is done
-  protected volatile boolean scannerClosed = false;
-  
-  // The timestamp to match entries against
-  protected long timestamp;
-  
-  private boolean wildcardMatch;
-  private boolean multipleMatchers;
 
-  /** Constructor for abstract base class */
-  protected HAbstractScanner(long timestamp, byte [][] targetCols)
-  throws IOException {
-    this.timestamp = timestamp;
-    this.wildcardMatch = false;
-    this.multipleMatchers = false;
-    for(int i = 0; i < targetCols.length; i++) {
-      Integer key = HStoreKey.getFamilyMapKey(targetCols[i]);
-      Vector<ColumnMatcher> matchers = okCols.get(key);
-      if (matchers == null) {
-        matchers = new Vector<ColumnMatcher>();
-      }
-      ColumnMatcher matcher = new ColumnMatcher(targetCols[i]);
-      if (matcher.isWildCardMatch()) {
-        this.wildcardMatch = true;
-      }
-      matchers.add(matcher);
-      if (matchers.size() > 1) {
-        this.multipleMatchers = true;
-      }
-      okCols.put(key, matchers);
-    }
-  }
-
-  /**
-   * For a particular column, find all the matchers defined for the column.
-   * Compare the column family and column key using the matchers. The first one
-   * that matches returns true. If no matchers are successful, return false.
-   * 
-   * @param column Column to test
-   * @return true if any of the matchers for the column match the column family
-   * and the column key.
-   *                 
-   * @throws IOException
-   */
-  protected boolean columnMatch(final byte [] column) throws IOException {
-    Vector<ColumnMatcher> matchers =
-      this.okCols.get(HStoreKey.getFamilyMapKey(column));
-    if (matchers == null) {
-      return false;
-    }
-    for(int m = 0; m < matchers.size(); m++) {
-      if (matchers.get(m).matches(column)) {
-        return true;
-      }
+    /**
+     * @param c Column name
+     * @return Return array of size two whose first element has the family
+     * prefix of passed column <code>c</code> and whose second element is the
+     * column qualifier.
+     * @throws ColumnNameParseException 
+     */
+    public static byte [][] parseColumn(final byte [] c)
+    throws ColumnNameParseException {
+      final byte [][] result = new byte [2][];
+      // TODO: Change this so don't do parse but instead use the comparator
+      // inside in KeyValue which just looks at column family.
+      final int index = KeyValue.getFamilyDelimiterIndex(c, 0, c.length);
+      if (index == -1) {
+        throw new ColumnNameParseException("Impossible column name: " + c);
+      }
+      result[0] = new byte [index];
+      System.arraycopy(c, 0, result[0], 0, index);
+      final int len = c.length - (index + 1);
+      result[1] = new byte[len];
+      System.arraycopy(c, index + 1 /*Skip delimiter*/, result[1], 0,
+        len);
+      return result;
     }
-    return false;
   }
-
-  public boolean isWildcardScanner() {
-    return this.wildcardMatch;
-  }
-  
-  public boolean isMultipleMatchScanner() {
-    return this.multipleMatchers;
-  }
-
-  public abstract boolean next(HStoreKey key, SortedMap<byte [], Cell> results)
-  throws IOException;
-  
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java Sun Apr 12 10:39:55 2009
@@ -23,6 +23,7 @@
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Collections;
+import java.util.List;
 import java.util.Map;
 import java.util.SortedMap;
 import java.util.TreeMap;
@@ -41,11 +42,11 @@
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerInfo;
-import org.apache.hadoop.hbase.HStoreKey;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.io.SequenceFile.Metadata;
@@ -457,8 +458,8 @@
    * @param sync
    * @throws IOException
    */
-  void append(byte [] regionName, byte [] tableName,
-      TreeMap<HStoreKey, byte[]> edits, boolean sync)
+  void append(byte [] regionName, byte [] tableName, List<KeyValue> edits,
+    boolean sync)
   throws IOException {
     if (closed) {
       throw new IOException("Cannot append; log is closed");
@@ -473,13 +474,10 @@
         this.lastSeqWritten.put(regionName, Long.valueOf(seqNum[0]));
       }
       int counter = 0;
-      for (Map.Entry<HStoreKey, byte[]> es : edits.entrySet()) {
-        HStoreKey key = es.getKey();
+      for (KeyValue kv: edits) {
         HLogKey logKey =
-          new HLogKey(regionName, tableName, key.getRow(), seqNum[counter++]);
-        HLogEdit logEdit =
-          new HLogEdit(key.getColumn(), es.getValue(), key.getTimestamp());
-       doWrite(logKey, logEdit, sync);
+          new HLogKey(regionName, tableName, seqNum[counter++]);
+       doWrite(logKey, new HLogEdit(kv), sync);
 
         this.numEntries++;
       }
@@ -555,7 +553,6 @@
     }
     byte [] regionName = regionInfo.getRegionName();
     byte [] tableName = regionInfo.getTableDesc().getName();
-    
     synchronized (updateLock) {
       long seqNum = obtainSeqNum();
       // The 'lastSeqWritten' map holds the sequence number of the oldest
@@ -566,7 +563,7 @@
         this.lastSeqWritten.put(regionName, Long.valueOf(seqNum));
       }
 
-      HLogKey logKey = new HLogKey(regionName, tableName, row, seqNum);
+      HLogKey logKey = new HLogKey(regionName, tableName, seqNum);
       boolean sync = regionInfo.isMetaRegion() || regionInfo.isRootRegion();
       doWrite(logKey, logEdit, sync);
       this.numEntries++;
@@ -645,16 +642,15 @@
    * @throws IOException
    */
   void completeCacheFlush(final byte [] regionName, final byte [] tableName,
-      final long logSeqId) throws IOException {
-
+    final long logSeqId)
+  throws IOException {
     try {
       if (this.closed) {
         return;
       }
       synchronized (updateLock) {
-        this.writer.append(new HLogKey(regionName, tableName, HLog.METAROW, logSeqId),
-            new HLogEdit(HLog.METACOLUMN, HLogEdit.COMPLETE_CACHE_FLUSH,
-                System.currentTimeMillis()));
+        this.writer.append(new HLogKey(regionName, tableName, logSeqId),
+          completeCacheFlushLogEdit());
         this.numEntries++;
         Long seq = this.lastSeqWritten.get(regionName);
         if (seq != null && logSeqId >= seq.longValue()) {
@@ -667,6 +663,12 @@
     }
   }
 
+  private HLogEdit completeCacheFlushLogEdit() {
+    // TODO Profligacy!!! Fix all this creation.
+    return new HLogEdit(new KeyValue(METAROW, METACOLUMN,
+      System.currentTimeMillis(), HLogEdit.COMPLETE_CACHE_FLUSH));
+  }
+
   /**
    * Abort a cache flush.
    * Call if the flush fails. Note that the only recovery for an aborted flush

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java Sun Apr 12 10:39:55 2009
@@ -19,57 +19,36 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import org.apache.hadoop.hbase.io.BatchOperation;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.io.*;
-
-import java.io.*;
-import java.nio.ByteBuffer;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
 
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.BatchOperation;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.Writable;
 
 /**
  * A log value.
  *
  * These aren't sortable; you need to sort by the matching HLogKey.
- * The table and row are already identified in HLogKey.
- * This just indicates the column and value.
+ * TODO: Remove.  Just output KVs.
  */
 public class HLogEdit implements Writable, HConstants {
-
   /** Value stored for a deleted item */
-  public static final byte [] DELETED_BYTES = Bytes.toBytes("HBASE::DELETEVAL");
-
+  public static byte [] DELETED_BYTES;
   /** Value written to HLog on a complete cache flush */
-  public static final byte [] COMPLETE_CACHE_FLUSH = Bytes.toBytes("HBASE::CACHEFLUSH");
-
-  /**
-   * @param value
-   * @return True if an entry and its content is {@link #DELETED_BYTES}.
-   */
-  public static boolean isDeleted(final byte [] value) {
-    return isDeleted(value, 0, value.length);
-  }
+  public static byte [] COMPLETE_CACHE_FLUSH;
 
-  /**
-   * @param value
-   * @return True if an entry and its content is {@link #DELETED_BYTES}.
-   */
-  public static boolean isDeleted(final ByteBuffer value) {
-    return isDeleted(value.array(), value.arrayOffset(), value.limit());
-  }
-
-  /**
-   * @param value
-   * @param offset 
-   * @param length 
-   * @return True if an entry and its content is {@link #DELETED_BYTES}.
-   */
-  public static boolean isDeleted(final byte [] value, final int offset,
-      final int length) {
-    return (value == null)? false:
-      Bytes.BYTES_RAWCOMPARATOR.compare(DELETED_BYTES, 0, DELETED_BYTES.length,
-        value, offset, length) == 0;
+  static {
+    try {
+      DELETED_BYTES = "HBASE::DELETEVAL".getBytes(UTF8_ENCODING);
+      COMPLETE_CACHE_FLUSH = "HBASE::CACHEFLUSH".getBytes(UTF8_ENCODING);
+    } catch (UnsupportedEncodingException e) {
+      assert(false);
+    }
   }
 
   /** If transactional log entry, these are the op codes */
@@ -84,9 +63,7 @@
     ABORT
   }
 
-  private byte [] column;
-  private byte [] val;
-  private long timestamp;
+  private KeyValue kv;
   private static final int MAX_VALUE_LEN = 128;
   
   private boolean isTransactionEntry;
@@ -98,30 +75,28 @@
    * Default constructor used by Writable
    */
   public HLogEdit() {
-    super();
+    this(null);
   }
 
   /**
    * Construct a fully initialized HLogEdit
-   * @param c column name
-   * @param bval value
-   * @param timestamp timestamp for modification
-   */
-  public HLogEdit(byte [] c, byte [] bval, long timestamp) {
-    this.column = c;
-    this.val = bval;
-    this.timestamp = timestamp;
+   * @param kv
+   */
+  public HLogEdit(final KeyValue kv) {
+    this.kv = kv;
     this.isTransactionEntry = false;
   }
-  
-  /** Construct a WRITE transaction. 
-   * 
+
+  /** 
+   * Construct a WRITE transaction. 
    * @param transactionId
    * @param op
    * @param timestamp
    */
-  public HLogEdit(long transactionId, BatchOperation op, long timestamp) {
-    this(op.getColumn(), op.getValue(), timestamp);
+  public HLogEdit(long transactionId, final byte [] row, BatchOperation op,
+      long timestamp) {
+    this(new KeyValue(row, op.getColumn(), timestamp,
+      op.isPut()? KeyValue.Type.Put: KeyValue.Type.Delete, op.getValue()));
     // This covers delete ops too...
     this.transactionId = transactionId;
     this.operation = TransactionalOperation.WRITE;
@@ -134,26 +109,15 @@
    * @param op
    */
   public HLogEdit(long transactionId, TransactionalOperation op) {
-    this.column = new byte[0];
-    this.val = new byte[0];
+    this.kv = KeyValue.LOWESTKEY;
     this.transactionId = transactionId;
     this.operation = op;
     this.isTransactionEntry = true;
   }
 
-  /** @return the column */
-  public byte [] getColumn() {
-    return this.column;
-  }
-
-  /** @return the value */
-  public byte [] getVal() {
-    return this.val;
-  }
-
-  /** @return the timestamp */
-  public long getTimestamp() {
-    return this.timestamp;
+  /** @return the KeyValue */
+  public KeyValue getKeyValue() {
+    return this.kv;
   }
 
   /** @return true if entry is a transactional entry */
@@ -187,33 +151,22 @@
   public String toString() {
     String value = "";
     try {
-      value = (this.val.length > MAX_VALUE_LEN)?
-        new String(this.val, 0, MAX_VALUE_LEN, HConstants.UTF8_ENCODING) +
-          "...":
-        new String(getVal(), HConstants.UTF8_ENCODING);
+      value = (this.kv.getValueLength() > MAX_VALUE_LEN)?
+        new String(this.kv.getValue(), 0, MAX_VALUE_LEN,
+          HConstants.UTF8_ENCODING) + "...":
+        new String(this.kv.getValue(), HConstants.UTF8_ENCODING);
     } catch (UnsupportedEncodingException e) {
       throw new RuntimeException("UTF8 encoding not present?", e);
     }
-    return "("
-        + Bytes.toString(getColumn())
-        + "/"
-        + getTimestamp()
-        + "/"
-        + (isTransactionEntry ? "tran: " + transactionId + " op "
-            + operation.toString() +"/": "") + value + ")";
+    return this.kv.toString() +
+      (isTransactionEntry ? "/tran=" + transactionId + "/op=" +
+        operation.toString(): "") + "/value=" + value;
   }
   
   // Writable
 
   public void write(DataOutput out) throws IOException {
-    Bytes.writeByteArray(out, this.column);
-    if (this.val == null) {
-      out.writeInt(0);
-    } else {
-      out.writeInt(this.val.length);
-      out.write(this.val);
-    }
-    out.writeLong(timestamp);
+    Bytes.writeByteArray(out, kv.getBuffer(), kv.getOffset(), kv.getLength());
     out.writeBoolean(isTransactionEntry);
     if (isTransactionEntry) {
       out.writeLong(transactionId);
@@ -222,14 +175,31 @@
   }
   
   public void readFields(DataInput in) throws IOException {
-    this.column = Bytes.readByteArray(in);
-    this.val = new byte[in.readInt()];
-    in.readFully(this.val);
-    this.timestamp = in.readLong();
+    byte [] kvbytes = Bytes.readByteArray(in);
+    this.kv = new KeyValue(kvbytes, 0, kvbytes.length);
     isTransactionEntry = in.readBoolean();
     if (isTransactionEntry) {
       transactionId = in.readLong();
       operation = TransactionalOperation.valueOf(in.readUTF());
     }
   }
-}
+
+  /**
+   * @param value
+   * @return True if an entry and its content is {@link #DELETED_BYTES}.
+   */
+  public static boolean isDeleted(final byte [] value) {
+    return isDeleted(value, 0, value.length);
+  }
+
+  /**
+   * @param value
+   * @return True if an entry and its content is {@link #DELETED_BYTES}.
+   */
+  public static boolean isDeleted(final byte [] value, final int offset,
+      final int length) {
+    return (value == null)? false:
+      Bytes.BYTES_RAWCOMPARATOR.compare(DELETED_BYTES, 0, DELETED_BYTES.length,
+        value, offset, length) == 0;
+  }
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java Sun Apr 12 10:39:55 2009
@@ -23,7 +23,6 @@
 import org.apache.hadoop.io.*;
 
 import java.io.*;
-import java.util.Arrays;
 
 /**
  * A Key for an entry in the change log.
@@ -32,17 +31,17 @@
  * identifies the appropriate table and row.  Within a table and row, they're 
  * also sorted.
  * 
- * Some Transactional edits (START, COMMIT, ABORT) will not have an associated row.
+ * <p>Some Transactional edits (START, COMMIT, ABORT) will not have an
+ * associated row.
  */
 public class HLogKey implements WritableComparable<HLogKey> {
   private byte [] regionName;
   private byte [] tablename;
-  private byte [] row;
   private long logSeqNum;
 
   /** Create an empty key useful when deserializing */
   public HLogKey() {
-    this(null, null, null, 0L);
+    this(null, null, 0L);
   }
   
   /**
@@ -52,14 +51,12 @@
    *
    * @param regionName  - name of region
    * @param tablename   - name of table
-   * @param row         - row key
    * @param logSeqNum   - log sequence number
    */
   public HLogKey(final byte [] regionName, final byte [] tablename,
-      final byte [] row, long logSeqNum) {
+      long logSeqNum) {
     this.regionName = regionName;
     this.tablename = tablename;
-    this.row = row;
     this.logSeqNum = logSeqNum;
   }
 
@@ -76,12 +73,7 @@
   public byte [] getTablename() {
     return tablename;
   }
-  
-  /** @return row key */
-  public byte [] getRow() {
-    return row;
-  }
-  
+
   /** @return log sequence number */
   public long getLogSeqNum() {
     return logSeqNum;
@@ -90,7 +82,7 @@
   @Override
   public String toString() {
     return Bytes.toString(tablename) + "/" + Bytes.toString(regionName) + "/" +
-      Bytes.toString(row) + "/" + logSeqNum;
+      logSeqNum;
   }
   
   @Override
@@ -106,8 +98,7 @@
   
   @Override
   public int hashCode() {
-    int result = Arrays.hashCode(this.regionName);
-    result ^= Arrays.hashCode(this.row);
+    int result = this.regionName.hashCode();
     result ^= this.logSeqNum;
     return result;
   }
@@ -118,18 +109,11 @@
 
   public int compareTo(HLogKey o) {
     int result = Bytes.compareTo(this.regionName, o.regionName);
-    
     if(result == 0) {
-      result = Bytes.compareTo(this.row, o.row);
-      
-      if(result == 0) {
-        
-        if (this.logSeqNum < o.logSeqNum) {
-          result = -1;
-          
-        } else if (this.logSeqNum > o.logSeqNum) {
-          result = 1;
-        }
+      if (this.logSeqNum < o.logSeqNum) {
+        result = -1;
+      } else if (this.logSeqNum > o.logSeqNum) {
+        result = 1;
       }
     }
     return result;
@@ -142,14 +126,12 @@
   public void write(DataOutput out) throws IOException {
     Bytes.writeByteArray(out, this.regionName);
     Bytes.writeByteArray(out, this.tablename);
-    Bytes.writeByteArray(out, this.row);
     out.writeLong(logSeqNum);
   }
   
   public void readFields(DataInput in) throws IOException {
     this.regionName = Bytes.readByteArray(in);
     this.tablename = Bytes.readByteArray(in);
-    this.row = Bytes.readByteArray(in);
     this.logSeqNum = in.readLong();
   }
 }
\ No newline at end of file