You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2008/11/17 21:37:35 UTC

svn commit: r718370 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/regionserver/ src/java/org/apache/hadoop/hbase/util/ src/java/org/onelab/filter/ src/test/org/apache/hadoop/hbase/regionserver/

Author: stack
Date: Mon Nov 17 12:37:34 2008
New Revision: 718370

URL: http://svn.apache.org/viewvc?rev=718370&view=rev
Log:
HBASE-1003 If cell exceeds TTL but not VERSIONs, will not be removed during major compaction

Modified:
    hadoop/hbase/trunk/CHANGES.txt
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Hash.java
    hadoop/hbase/trunk/src/java/org/onelab/filter/HashFunction.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=718370&r1=718369&r2=718370&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Mon Nov 17 12:37:34 2008
@@ -74,6 +74,8 @@
    HBASE-996   Migration script to up the versions in catalog tables
    HBASE-991   Update the mapred package document examples so they work with
                TRUNK/0.19.0.
+   HBASE-1003  If cell exceeds TTL but not VERSIONs, will not be removed during
+               major compaction
       
   IMPROVEMENTS
    HBASE-901   Add a limit to key length, check key and value length on client side

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java?rev=718370&r1=718369&r2=718370&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java Mon Nov 17 12:37:34 2008
@@ -1109,21 +1109,19 @@
           timesSeen = 1;
         }
 
-        // Added majorCompaction here to make sure all versions make it to 
-        // the major compaction so we do not remove the wrong last versions
-        // this effected HBASE-826
-        if (timesSeen <= family.getMaxVersions() || !majorCompaction) {
-          // Keep old versions until we have maxVersions worth.
-          // Then just skip them.
-          if (sk.getRow().length != 0 && sk.getColumn().length != 0) {
-            // Only write out objects with non-zero length key and value
-            if (!isExpired(sk, ttl, now)) {
+        // Don't write empty rows or columns.  Only remove cells on major
+        // compaction.  Remove if expired of > VERSIONS
+        if (sk.getRow().length != 0 && sk.getColumn().length != 0) {
+          boolean expired = false;
+          if (!majorCompaction ||
+              (timesSeen <= family.getMaxVersions() &&
+                !(expired = isExpired(sk, ttl, now)))) {
               compactedOut.append(sk, vals[smallestKey]);
-            } else {
-              // HBASE-855 remove one from timesSeen because it did not make it
-              // past expired check -- don't count against max versions.
-              timesSeen--;
-            }
+          }
+          if (expired) {
+            // HBASE-855 remove one from timesSeen because it did not make it
+            // past expired check -- don't count against max versions.
+            timesSeen--;
           }
         }
 
@@ -1144,7 +1142,7 @@
       closeCompactionReaders(Arrays.asList(rdrs));
     }
   }
-  
+
   private void closeCompactionReaders(final List<MapFile.Reader> rdrs) {
     for (MapFile.Reader r: rdrs) {
       try {
@@ -1712,12 +1710,7 @@
   
   static boolean isExpired(final HStoreKey hsk, final long ttl,
       final long now) {
-    boolean result = ttl != HConstants.FOREVER && now > hsk.getTimestamp() + ttl;
-    if (result && LOG.isDebugEnabled()) {
-      LOG.debug("rowAtOrBeforeCandidate 1:" + hsk +
-        ": expired, skipped");
-    }
-    return result;
+    return ttl != HConstants.FOREVER && now > hsk.getTimestamp() + ttl;
   }
 
   /* Find a candidate for row that is at or before passed key, sk, in mapfile.

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Hash.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Hash.java?rev=718370&r1=718369&r2=718370&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Hash.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Hash.java Mon Nov 17 12:37:34 2008
@@ -24,6 +24,9 @@
  * This class represents a common API for hashing functions.
  */
 public abstract class Hash {
+  // TODO: Fix the design tangle that has classes over in org.onelab.filter
+  // referring to this class.  Would need to also move the Jenkins and Murmur
+  // hashing function too.
   /** Constant to denote invalid hash type. */
   public static final int INVALID_HASH = -1;
   /** Constant to denote {@link JenkinsHash}. */

Modified: hadoop/hbase/trunk/src/java/org/onelab/filter/HashFunction.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/onelab/filter/HashFunction.java?rev=718370&r1=718369&r2=718370&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/onelab/filter/HashFunction.java (original)
+++ hadoop/hbase/trunk/src/java/org/onelab/filter/HashFunction.java Mon Nov 17 12:37:34 2008
@@ -50,8 +50,6 @@
 package org.onelab.filter;
 
 import org.apache.hadoop.hbase.util.Hash;
-import org.apache.hadoop.hbase.util.JenkinsHash;
-import org.apache.hadoop.hbase.util.MurmurHash;
 
 /**
  * Implements a hash object that returns a certain number of hashed values.
@@ -102,15 +100,14 @@
   }//end constructor
 
   /** Clears <i>this</i> hash function. A NOOP */
-  public void clear(){
-  }//end clear()
+  public void clear() {
+  }
 
   /**
    * Hashes a specified key into several integers.
    * @param k The specified key.
    * @return The array of hashed values.
    */
-  @SuppressWarnings("unchecked")
   public int[] hash(Key k){
       byte[] b = k.getBytes();
       if(b == null) {

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=718370&r1=718369&r2=718370&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java Mon Nov 17 12:37:34 2008
@@ -28,6 +28,7 @@
 import org.apache.hadoop.hbase.HBaseTestCase;
 import org.apache.hadoop.io.MapFile;
 
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HStoreKey;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -147,6 +148,24 @@
       }
     }
     assertTrue(containsStartRow);
+    // Do a simple TTL test.
+    final int ttlInSeconds = 1;
+    for (HStore store: this.r.stores.values()) {
+      store.ttl = ttlInSeconds * 1000;
+    }
+    Thread.sleep(ttlInSeconds * 1000);
+    r.compactStores(true);
+    int count = 0;
+    for (MapFile.Reader reader: this.r.stores.
+        get(Bytes.mapKey(COLUMN_FAMILY_TEXT_MINUS_COLON)).getReaders()) {
+      reader.reset();
+      HStoreKey key = new HStoreKey();
+      ImmutableBytesWritable val = new ImmutableBytesWritable();
+      while(reader.next(key, val)) {
+        count++;
+      }
+    }
+    assertTrue(count == 0);
   }
 
   private void createStoreFile(final HRegion region) throws IOException {