You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ns...@apache.org on 2012/01/18 04:07:25 UTC
svn commit: r1232732 - in /hbase/branches/0.89-fb/src:
main/java/org/apache/hadoop/hbase/io/hfile/
main/java/org/apache/hadoop/hbase/regionserver/
main/java/org/apache/hadoop/hbase/regionserver/metrics/
main/java/org/apache/hadoop/hbase/util/ test/java...
Author: nspiegelberg
Date: Wed Jan 18 03:07:24 2012
New Revision: 1232732
URL: http://svn.apache.org/viewvc?rev=1232732&view=rev
Log:
[jira] [HBASE-5010] [89-fb] Filter HFiles based on TTL
Summary:
Modifying scanner selection in StoreScanner to take TTL into account, so that we
don't scan StoreFiles that only contain expired keys.
This diff is for 89-fb. The HBase trunk diff is at
https://reviews.facebook.net/D1017.
Test Plan:
Unit tests (existing ones and a new one).
Reviewers: kannan, liyintang
Reviewed By: kannan
CC: kannan, mbautin
Differential Revision: https://reviews.facebook.net/D909
Added:
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java
Modified:
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Threads.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java Wed Jan 18 03:07:24 2012
@@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.io.hfile
import java.lang.ref.WeakReference;
import java.util.PriorityQueue;
+import java.util.SortedSet;
+import java.util.TreeSet;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.ConcurrentHashMap;
@@ -817,4 +819,25 @@ public class LruBlockCache implements Bl
public void shutdown() {
this.scheduleThreadPool.shutdown();
}
+
+ /** Clears the cache. Used in tests. */
+ public void clearCache() {
+ map.clear();
+ }
+
+ /**
+ * Used in testing. May be very inefficient.
+ * @return the set of cached file names
+ */
+ SortedSet<String> getCachedFileNamesForTest() {
+ SortedSet<String> fileNames = new TreeSet<String>();
+ for (String cacheKey : map.keySet()) {
+ int sepIndex = cacheKey.indexOf(HFile.CACHE_KEY_SEPARATOR);
+ if (sepIndex != -1) {
+ fileNames.add(cacheKey.substring(0, sepIndex));
+ }
+ }
+ return fileNames;
+ }
+
}
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java Wed Jan 18 03:07:24 2012
@@ -20,8 +20,10 @@
package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
+import java.util.SortedSet;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Scan;
/**
* Scanner that returns the next KeyValue.
@@ -69,6 +71,19 @@ public interface KeyValueScanner {
*/
public void close();
+ /**
+ * Allows to filter out scanners (both StoreFile and memstore) that we don't
+ * want to use based on criteria such as Bloom filters and timestamp ranges.
+ * @param scan the scan that we are selecting scanners for
+ * @param columns the set of columns in the current column family, or null if
+ * not specified by the scan
+ * @param oldestUnexpiredTS the oldest timestamp we are interested in for
+ * this query, based on TTL
+ * @return true if the scanner should be included in the query
+ */
+ public boolean shouldUseScanner(Scan scan, SortedSet<byte[]> columns,
+ long oldestUnexpiredTS);
+
// "Lazy scanner" optimizations
/**
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java Wed Jan 18 03:07:24 2012
@@ -480,9 +480,12 @@ public class MemStore implements HeapSiz
* @param scan
* @return False if the key definitely does not exist in this Memstore
*/
- public boolean shouldSeek(Scan scan) {
- return timeRangeTracker.includesTimeRange(scan.getTimeRange()) ||
- snapshotTimeRangeTracker.includesTimeRange(scan.getTimeRange());
+ public boolean shouldSeek(Scan scan, long oldestUnexpiredTS) {
+ return (timeRangeTracker.includesTimeRange(scan.getTimeRange()) ||
+ snapshotTimeRangeTracker.includesTimeRange(scan.getTimeRange()))
+ && (Math.max(timeRangeTracker.getMaximumTimestamp(),
+ snapshotTimeRangeTracker.getMaximumTimestamp()) >=
+ oldestUnexpiredTS);
}
public TimeRangeTracker getSnapshotTimeRangeTracker() {
@@ -658,6 +661,12 @@ public class MemStore implements HeapSiz
public long getSequenceID() {
return Long.MAX_VALUE;
}
+
+ @Override
+ public boolean shouldUseScanner(Scan scan, SortedSet<byte[]> columns,
+ long oldestUnexpiredTS) {
+ return shouldSeek(scan, oldestUnexpiredTS);
+ }
}
public final static long FIXED_OVERHEAD = ClassSize.align(
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java Wed Jan 18 03:07:24 2012
@@ -20,9 +20,11 @@
package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
+import java.util.SortedSet;
import org.apache.commons.lang.NotImplementedException;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Scan;
/**
* A "non-lazy" scanner which always does a real seek operation. Most scanners
@@ -52,4 +54,11 @@ public abstract class NonLazyKeyValueSca
return forward ? scanner.reseek(kv) : scanner.seek(kv);
}
+ @Override
+ public boolean shouldUseScanner(Scan scan, SortedSet<byte[]> columns,
+ long oldestUnexpiredTS) {
+ // No optimizations implemented by default.
+ return true;
+ }
+
}
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java Wed Jan 18 03:07:24 2012
@@ -80,16 +80,15 @@ public class ScanQueryMatcher {
* @param scan
* @param family
* @param columnSet
- * @param ttl
* @param rowComparator
*/
public ScanQueryMatcher(Scan scan, byte [] family,
- NavigableSet<byte[]> columnSet, long ttl,
- KeyValue.KeyComparator rowComparator, int maxVersions,
- long readPointToUse,
- boolean retainDeletesInOutput) {
+ NavigableSet<byte[]> columnSet, KeyValue.KeyComparator rowComparator,
+ int maxVersions, long readPointToUse,
+ boolean retainDeletesInOutput,
+ long oldestUnexpiredTS) {
this.tr = scan.getTimeRange();
- this.oldestStamp = System.currentTimeMillis() - ttl;
+ this.oldestStamp = oldestUnexpiredTS;
this.rowComparator = rowComparator;
this.deletes = new ScanDeleteTracker();
this.stopRow = scan.getStopRow();
@@ -125,13 +124,13 @@ public class ScanQueryMatcher {
}
public ScanQueryMatcher(Scan scan, byte [] family,
- NavigableSet<byte[]> columns, long ttl,
- KeyValue.KeyComparator rowComparator, int maxVersions) {
- /* By default we will not include deletes */
- /* deletes are included explicitly (for minor compaction) */
- this(scan, family, columns, ttl, rowComparator, maxVersions,
- Long.MAX_VALUE /* max Readpoint to Track versions */,
- false);
+ NavigableSet<byte[]> columns, KeyValue.KeyComparator rowComparator,
+ int maxVersions, long oldestUnexpiredTS) {
+ // By default we will not include deletes.
+ // Deletes are included explicitly (for minor compaction).
+ this(scan, family, columns, rowComparator, maxVersions,
+ Long.MAX_VALUE, // max Readpoint to Track versions
+ false, oldestUnexpiredTS);
}
/**
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java Wed Jan 18 03:07:24 2012
@@ -703,6 +703,11 @@ public class Store extends SchemaConfigu
}
}
+ /**
+ * Get all scanners with no filtering based on TTL (that happens further down
+ * the line).
+ * @return all scanners for this store
+ */
protected List<KeyValueScanner> getScanners(boolean cacheBlocks,
boolean isGet,
boolean isCompaction,
@@ -722,8 +727,9 @@ public class Store extends SchemaConfigu
// TODO this used to get the store files in descending order,
// but now we get them in ascending order, which I think is
// actually more correct, since memstore get put at the end.
- List<StoreFileScanner> sfScanners = StoreFileScanner
- .getScannersForStoreFiles(storeFiles, cacheBlocks, isGet, isCompaction, matcher);
+ List<StoreFileScanner> sfScanners =
+ StoreFileScanner.getScannersForStoreFiles(storeFiles, cacheBlocks,
+ isGet, isCompaction, matcher);
List<KeyValueScanner> scanners =
new ArrayList<KeyValueScanner>(sfScanners.size()+1);
scanners.addAll(sfScanners);
@@ -824,10 +830,10 @@ public class Store extends SchemaConfigu
}
}
- /*
- * Compact the most recent N files. Essentially a hook for testing.
+ /**
+ * Compact the most recent N files.
*/
- protected void compactRecent(int N) throws IOException {
+ public void compactRecentForTesting(int N) throws IOException {
List<StoreFile> filesToCompact;
long maxId;
boolean isMajor;
@@ -1263,7 +1269,8 @@ public class Store extends SchemaConfigu
Scan scan = new Scan();
scan.setMaxVersions(family.getMaxVersions());
/* include deletes, unless we are doing a major compaction */
- scanner = new StoreScanner(this, scan, scanners, smallestReadPoint, !majorCompaction);
+ scanner = new StoreScanner(this, scan, scanners, smallestReadPoint,
+ !majorCompaction);
int bytesWritten = 0;
// since scanner.next() can return 'false' but still be delivering data,
// we have to use a do/while loop.
@@ -1713,7 +1720,8 @@ public class Store extends SchemaConfigu
//////////////////////////////////////////////////////////////////////////////
/**
- * Return a scanner for both the memstore and the HStore files
+ * Return a scanner for both the memstore and the HStore files. Assumes we
+ * are not in a compaction.
* @throws IOException
*/
public StoreScanner getScanner(Scan scan,
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Wed Jan 18 03:07:24 2012
@@ -1190,20 +1190,19 @@ public class StoreFile {
reader.close();
}
- public boolean shouldSeek(Scan scan, final SortedSet<byte[]> columns) {
- return (passesTimerangeFilter(scan) && passesBloomFilter(scan, columns));
- }
-
/**
- * Check if this storeFile may contain keys within the TimeRange
- * @param scan
- * @return False if it definitely does not exist in this StoreFile
+ * Check if this storeFile may contain keys within the TimeRange.
+ * @param scan the current scan
+ * @param oldestUnexpiredTS the oldest timestamp that is not expired, as
+ * determined by the column family's TTL
+ * @return false if queried keys definitely don't exist in this StoreFile
*/
- private boolean passesTimerangeFilter(Scan scan) {
+ boolean passesTimerangeFilter(Scan scan, long oldestUnexpiredTS) {
if (timeRangeTracker == null) {
return true;
} else {
- return timeRangeTracker.includesTimeRange(scan.getTimeRange());
+ return timeRangeTracker.includesTimeRange(scan.getTimeRange()) &&
+ timeRangeTracker.getMaximumTimestamp() >= oldestUnexpiredTS;
}
}
@@ -1223,8 +1222,7 @@ public class StoreFile {
* filter, or if the Bloom filter is not applicable for the scan.
* False if the Bloom filter is applicable and the scan fails it.
*/
- private boolean passesBloomFilter(Scan scan,
- final SortedSet<byte[]> columns) {
+ boolean passesBloomFilter(Scan scan, final SortedSet<byte[]> columns) {
if (!scan.isGetScan())
return true;
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java Wed Jan 18 03:07:24 2012
@@ -20,15 +20,6 @@
package org.apache.hadoop.hbase.regionserver;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.io.hfile.HFileScanner;
-import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
-import org.apache.hadoop.hbase.util.Bytes;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
@@ -36,6 +27,13 @@ import java.util.List;
import java.util.SortedSet;
import java.util.concurrent.atomic.AtomicLong;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
+
/**
* KeyValueScanner adaptor over the Reader. It also provides hooks into
* bloom filter things.
@@ -252,11 +250,6 @@ class StoreFileScanner implements KeyVal
}
}
- // StoreFile filter hook.
- public boolean shouldSeek(Scan scan, final SortedSet<byte[]> columns) {
- return reader.shouldSeek(scan, columns);
- }
-
@Override
public long getSequenceID() {
return reader.getSequenceID();
@@ -369,4 +362,11 @@ class StoreFileScanner implements KeyVal
static final long getSeekCount() {
return seekCount.get();
}
+
+ @Override
+ public boolean shouldUseScanner(Scan scan, SortedSet<byte[]> columns,
+ long oldestUnexpiredTS) {
+ return reader.passesTimerangeFilter(scan, oldestUnexpiredTS) &&
+ reader.passesBloomFilter(scan, columns);
+ }
}
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java Wed Jan 18 03:07:24 2012
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
* Scanner scans both the memstore and the HStore. Coalesce KeyValue stream
@@ -54,6 +55,7 @@ class StoreScanner extends NonLazyKeyVal
private final boolean useRowColBloom;
private final Scan scan;
private final NavigableSet<byte[]> columns;
+ private final long oldestUnexpiredTS;
/** We don't ever expect to change this, the constant is just for clarity. */
static final boolean LAZY_SEEK_ENABLED_BY_DEFAULT = true;
@@ -67,7 +69,7 @@ class StoreScanner extends NonLazyKeyVal
/** An internal constructor. */
private StoreScanner(Store store, boolean cacheBlocks, Scan scan,
- final NavigableSet<byte[]> columns){
+ final NavigableSet<byte[]> columns, long ttl) {
this.store = store;
initializeMetricNames();
this.cacheBlocks = cacheBlocks;
@@ -76,6 +78,7 @@ class StoreScanner extends NonLazyKeyVal
explicitColumnQuery = numCol > 0;
this.scan = scan;
this.columns = columns;
+ oldestUnexpiredTS = EnvironmentEdgeManager.currentTimeMillis() - ttl;
// We look up row-column Bloom filters for multi-column queries as part of
// the seek operation. However, we also look the row-column Bloom filter
@@ -85,7 +88,8 @@ class StoreScanner extends NonLazyKeyVal
}
/**
- * Opens a scanner across memstore, snapshot, and all StoreFiles.
+ * Opens a scanner across memstore, snapshot, and all StoreFiles. Assumes we
+ * are not in a compaction.
*
* @param store who we scan
* @param scan the spec
@@ -93,16 +97,16 @@ class StoreScanner extends NonLazyKeyVal
* @throws IOException
*/
StoreScanner(Store store, Scan scan, final NavigableSet<byte[]> columns)
- throws IOException {
- this(store, scan.getCacheBlocks(), scan, columns);
- matcher = new ScanQueryMatcher(scan, store.getFamily().getName(),
- columns, store.ttl, store.comparator.getRawComparator(),
- store.versionsToReturn(scan.getMaxVersions()),
- Long.MAX_VALUE,
- false);
+ throws IOException {
+ this(store, scan.getCacheBlocks(), scan, columns, store.ttl);
+ matcher =
+ new ScanQueryMatcher(scan, store.getFamily().getName(), columns,
+ store.comparator.getRawComparator(),
+ store.versionsToReturn(scan.getMaxVersions()), Long.MAX_VALUE,
+ false, oldestUnexpiredTS);
// Pass columns to try to filter out unnecessary StoreFiles.
- List<KeyValueScanner> scanners = getScanners(scan, columns);
+ List<KeyValueScanner> scanners = getScannersNoCompaction();
// Seek all scanners to the start of the Row (or if the exact matching row
// key does not exist, then to the start of the next matching Row).
@@ -128,29 +132,30 @@ class StoreScanner extends NonLazyKeyVal
}
/**
- * Used for major compactions.<p>
- *
- * Opens a scanner across specified StoreFiles.
+ * Opens a scanner across specified StoreFiles. Can be used in compactions.
* @param store who we scan
* @param scan the spec
- * @param scanners ancilliary scanners
- * @param smallestReadPoint the readPoint that we should use for tracking versions
+ * @param scanners ancillary scanners
+ * @param smallestReadPoint the readPoint that we should use for tracking
+ * versions
* @param retainDeletesInOutput should we retain deletes after compaction?
*/
- StoreScanner(Store store, Scan scan, List<? extends KeyValueScanner> scanners,
- long smallestReadPoint,
- boolean retainDeletesInOutput)
- throws IOException {
- this(store, false, scan, null);
+ StoreScanner(Store store, Scan scan,
+ List<? extends KeyValueScanner> scanners, long smallestReadPoint,
+ boolean retainDeletesInOutput) throws IOException {
+ this(store, false, scan, null, store.ttl);
+
+ matcher =
+ new ScanQueryMatcher(scan, store.getFamily().getName(), null,
+ store.comparator.getRawComparator(),
+ store.versionsToReturn(scan.getMaxVersions()), smallestReadPoint,
+ retainDeletesInOutput, oldestUnexpiredTS);
- matcher = new ScanQueryMatcher(scan, store.getFamily().getName(),
- null, store.ttl, store.comparator.getRawComparator(),
- store.versionsToReturn(scan.getMaxVersions()),
- smallestReadPoint,
- retainDeletesInOutput);
+ // Filter the list of scanners using Bloom filters, time range, TTL, etc.
+ scanners = selectScannersFrom(scanners);
// Seek all scanners to the initial key
- for(KeyValueScanner scanner : scanners) {
+ for (KeyValueScanner scanner : scanners) {
scanner.seek(matcher.getStartKey());
}
@@ -158,17 +163,17 @@ class StoreScanner extends NonLazyKeyVal
heap = new KeyValueHeap(scanners, store.comparator);
}
- // Constructor for testing.
+ /** Constructor for testing. */
StoreScanner(final Scan scan, final byte [] colFamily, final long ttl,
final KeyValue.KVComparator comparator,
final NavigableSet<byte[]> columns,
final List<KeyValueScanner> scanners)
throws IOException {
- this(null, scan.getCacheBlocks(), scan, columns);
- this.matcher = new ScanQueryMatcher(scan, colFamily, columns, ttl,
- comparator.getRawComparator(), scan.getMaxVersions(),
- Long.MAX_VALUE,
- false);
+ this(null, scan.getCacheBlocks(), scan, columns, ttl);
+ this.matcher =
+ new ScanQueryMatcher(scan, colFamily, columns,
+ comparator.getRawComparator(), scan.getMaxVersions(),
+ Long.MAX_VALUE, false, oldestUnexpiredTS);
// Seek all scanners to the initial key
for(KeyValueScanner scanner : scanners) {
@@ -196,27 +201,29 @@ class StoreScanner extends NonLazyKeyVal
this.metricNameGetsize = mutationSignature + ".getsize";
}
- /*
- * @return List of scanners to seek, possibly filtered by StoreFile.
+ /**
+ * Get a filtered list of scanners. Assumes we are not in a compaction.
+ * @return list of scanners to seek
*/
- private List<KeyValueScanner> getScanners(Scan scan,
- final NavigableSet<byte[]> columns) throws IOException {
- List<KeyValueScanner> allStoreScanners =
- this.store.getScanners(cacheBlocks, isGet, false, this.matcher);
+ private List<KeyValueScanner> getScannersNoCompaction() throws IOException {
+ final boolean isCompaction = false;
+ return selectScannersFrom(store.getScanners(cacheBlocks, isGet,
+ isCompaction, matcher));
+ }
+ /**
+ * Filters the given list of scanners using Bloom filter, time range, and
+ * TTL.
+ */
+ private List<KeyValueScanner> selectScannersFrom(
+ final List<? extends KeyValueScanner> allScanners) {
List<KeyValueScanner> scanners =
- new ArrayList<KeyValueScanner>(allStoreScanners.size());
+ new ArrayList<KeyValueScanner>(allScanners.size());
// include only those scan files which pass all filters
- for (KeyValueScanner kvs : allStoreScanners) {
- if (kvs instanceof StoreFileScanner) {
- if (((StoreFileScanner)kvs).shouldSeek(scan, columns))
- scanners.add(kvs);
- }
- else {
- // kvs is a MemStoreScanner
- if (this.store.memstore.shouldSeek(scan))
- scanners.add(kvs);
+ for (KeyValueScanner kvs : allScanners) {
+ if (kvs.shouldUseScanner(scan, columns, oldestUnexpiredTS)) {
+ scanners.add(kvs);
}
}
return scanners;
@@ -253,7 +260,7 @@ class StoreScanner extends NonLazyKeyVal
public synchronized boolean seek(KeyValue key) throws IOException {
if (this.heap == null) {
- List<KeyValueScanner> scanners = getScanners(scan, columns);
+ List<KeyValueScanner> scanners = getScannersNoCompaction();
heap = new KeyValueHeap(scanners, store.comparator);
}
@@ -443,7 +450,7 @@ class StoreScanner extends NonLazyKeyVal
throw new RuntimeException("StoreScanner.reseek run on an existing heap!");
}
- List<KeyValueScanner> scanners = getScanners(scan, columns);
+ List<KeyValueScanner> scanners = getScannersNoCompaction();
for(KeyValueScanner scanner : scanners) {
scanner.seek(lastTopKey);
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java Wed Jan 18 03:07:24 2012
@@ -30,11 +30,11 @@ import org.apache.hadoop.hbase.util.Byte
import org.apache.hadoop.io.Writable;
/**
- * Stores the minimum and maximum timestamp values.
+ * Stores the minimum and maximum timestamp values (both are inclusive).
* Can be used to find if any given time range overlaps with its time range
* MemStores use this class to track its minimum and maximum timestamps.
* When writing StoreFiles, this information is stored in meta blocks and used
- * at read time to match against the required TimeRange
+ * at read time to match against the required TimeRange.
*/
public class TimeRangeTracker implements Writable {
@@ -143,4 +143,10 @@ public class TimeRangeTracker implements
this.maximumTimestamp = in.readLong();
}
+ @Override
+ public String toString() {
+ return "[" + minimumTimestamp + "," + maximumTimestamp + "]";
+ }
+
}
+
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java Wed Jan 18 03:07:24 2012
@@ -704,7 +704,7 @@ public class SchemaMetrics {
return metricsSnapshot;
}
- private static long getLong(Map<String, Long> m, String k) {
+ public static long getLong(Map<String, Long> m, String k) {
Long l = m.get(k);
return l != null ? l : 0;
}
@@ -716,7 +716,8 @@ public class SchemaMetrics {
m.remove(k);
}
}
- private static Map<String, Long> diffMetrics(Map<String, Long> a,
+
+ public static Map<String, Long> diffMetrics(Map<String, Long> a,
Map<String, Long> b) {
Set<String> allKeys = new TreeSet<String>(a.keySet());
allKeys.addAll(b.keySet());
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Threads.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Threads.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Threads.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Threads.java Wed Jan 18 03:07:24 2012
@@ -129,19 +129,19 @@ public class Threads {
* the interrupt status.
* @param msToWait the amount of time to sleep in milliseconds
*/
- public static void sleepWithoutInterrupt(long msToWait) {
+ public static void sleepWithoutInterrupt(final long msToWait) {
long timeMillis = System.currentTimeMillis();
+ long endTime = timeMillis + msToWait;
boolean interrupted = false;
- while (msToWait > 0) {
+ while (timeMillis < endTime) {
try {
- Thread.sleep(msToWait);
+ Thread.sleep(endTime - timeMillis);
} catch (InterruptedException ex) {
- long timePassed = System.currentTimeMillis() - timeMillis;
- msToWait -= timePassed;
- timeMillis += timePassed;
interrupted = true;
}
+ timeMillis = System.currentTimeMillis();
}
+
if (interrupted) {
Thread.currentThread().interrupt();
}
Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java?rev=1232732&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java Wed Jan 18 03:07:24 2012
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.BlockMetricType;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Threads;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+/**
+ * Test the optimization that does not scan files where all timestamps are
+ * expired.
+ */
+@RunWith(Parameterized.class)
+public class TestScannerSelectionUsingTTL {
+
+ private static final Log LOG =
+ LogFactory.getLog(TestScannerSelectionUsingTTL.class);
+
+ private static final HBaseTestingUtility TEST_UTIL =
+ new HBaseTestingUtility();
+
+ private static String TABLE = "myTable";
+ private static String FAMILY = "myCF";
+ private static byte[] FAMILY_BYTES = Bytes.toBytes(FAMILY);
+
+ private static final int TTL_SECONDS = 2;
+ private static final int TTL_MS = TTL_SECONDS * 1000;
+
+ private static final int NUM_EXPIRED_FILES = 2;
+ private static final int NUM_ROWS = 8;
+ private static final int NUM_COLS_PER_ROW = 5;
+
+ private final int numFreshFiles, totalNumFiles;
+
+ /** Whether we are specifying the exact files to compact */
+ private final boolean explicitCompaction;
+
+ @Parameters
+ public static Collection<Object[]> parametersNumFreshFiles() {
+ List<Object[]> params = new ArrayList<Object[]>();
+ for (int numFreshFiles = 1; numFreshFiles <= 3; ++numFreshFiles) {
+ for (boolean explicitCompaction : new boolean[] {false, true}) {
+ params.add(new Object[]{ numFreshFiles, explicitCompaction });
+ }
+ }
+ return params;
+ }
+
+ public TestScannerSelectionUsingTTL(int numFreshFiles,
+ boolean explicitCompaction) {
+ this.numFreshFiles = numFreshFiles;
+ this.totalNumFiles = numFreshFiles + NUM_EXPIRED_FILES;
+ this.explicitCompaction = explicitCompaction;
+ }
+
+ @Test
+ public void testScannerSelection() throws IOException {
+ HColumnDescriptor hcd =
+ new HColumnDescriptor(FAMILY_BYTES, Integer.MAX_VALUE,
+ Compression.Algorithm.NONE.getName(),
+ HColumnDescriptor.DEFAULT_IN_MEMORY,
+ HColumnDescriptor.DEFAULT_BLOCKCACHE,
+ TTL_SECONDS,
+ BloomType.NONE.toString());
+ HTableDescriptor htd = new HTableDescriptor(TABLE);
+ htd.addFamily(hcd);
+ HRegionInfo info = new HRegionInfo(htd, null, null, false);
+ HRegion region = HRegion.createHRegion(
+ info, HBaseTestingUtility.getTestDir(), TEST_UTIL.getConfiguration());
+
+ for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
+ if (iFile == NUM_EXPIRED_FILES) {
+ Threads.sleepWithoutInterrupt(TTL_MS);
+ }
+
+ for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
+ Put put = new Put(Bytes.toBytes("row" + iRow));
+ for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
+ put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
+ Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
+ }
+ region.put(put);
+ }
+ region.flushcache();
+ }
+
+ Scan scan = new Scan();
+ scan.setMaxVersions(Integer.MAX_VALUE);
+ LruBlockCache cache = (LruBlockCache)
+ StoreFile.getBlockCache(TEST_UTIL.getConfiguration());
+ cache.clearCache();
+ InternalScanner scanner = region.getScanner(scan);
+ List<KeyValue> results = new ArrayList<KeyValue>();
+ final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW;
+ int numReturnedRows = 0;
+ LOG.info("Scanning the entire table");
+ while (scanner.next(results) || results.size() > 0) {
+ assertEquals(expectedKVsPerRow, results.size());
+ ++numReturnedRows;
+ results.clear();
+ }
+ assertEquals(NUM_ROWS, numReturnedRows);
+ Set<String> accessedFiles = cache.getCachedFileNamesForTest();
+ LOG.debug("Files accessed during scan: " + accessedFiles);
+
+ Map<String, Long> metricsBeforeCompaction =
+ SchemaMetrics.getMetricsSnapshot();
+
+ // Exercise both compaction codepaths.
+ if (explicitCompaction) {
+ region.getStore(FAMILY_BYTES).compactRecentForTesting(totalNumFiles);
+ } else {
+ region.compactStores();
+ }
+
+ SchemaMetrics.validateMetricChanges(metricsBeforeCompaction);
+ Map<String, Long> compactionMetrics =
+ SchemaMetrics.diffMetrics(metricsBeforeCompaction,
+ SchemaMetrics.getMetricsSnapshot());
+ long compactionDataBlocksRead = SchemaMetrics.getLong(
+ compactionMetrics,
+ SchemaMetrics.getInstance(TABLE, FAMILY).getBlockMetricName(
+ BlockCategory.DATA, true, BlockMetricType.READ_COUNT));
+ assertEquals("Invalid number of blocks accessed during compaction. " +
+ "We only expect non-expired files to be accessed.",
+ numFreshFiles, compactionDataBlocksRead);
+ region.close();
+ }
+
+}
Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java Wed Jan 18 03:07:24 2012
@@ -329,7 +329,7 @@ public class TestCompaction extends HBas
Store store2 = this.r.stores.get(fam2);
int numFiles1 = store2.getStorefiles().size();
assertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold); // > 3
- store2.compactRecent(compactionThreshold); // = 3
+ store2.compactRecentForTesting(compactionThreshold); // = 3
int numFiles2 = store2.getStorefiles().size();
// Check that we did compact
assertTrue("Number of store files should go down", numFiles1 > numFiles2);
Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java Wed Jan 18 03:07:24 2012
@@ -156,8 +156,7 @@ public class TestCompoundBloomFilter {
* it to the provided threshold.
*
* @param falsePosRate experimental positive rate
- * @param nTrials the number of calls to
- * {@link StoreFile.Reader#shouldSeek(Scan, java.util.SortedSet)}.
+ * @param nTrials the number of Bloom filter checks
* @param zValueBoundary z-value boundary, positive for an upper bound and
* negative for a lower bound
* @param cbf the compound Bloom filter we are using
@@ -276,7 +275,7 @@ public class TestCompoundBloomFilter {
Scan scan = new Scan(row, row);
TreeSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
columns.add(qualifier);
- return scanner.shouldSeek(scan, columns);
+ return scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE);
}
private Path writeStoreFile(int t, BloomType bt, List<KeyValue> kvs)
Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java Wed Jan 18 03:07:24 2012
@@ -774,16 +774,16 @@ public class TestMemStore extends TestCa
addRows(memstore,timestamp);
scan.setTimeRange(0, 2);
- assertTrue(memstore.shouldSeek(scan));
+ assertTrue(memstore.shouldSeek(scan, Long.MIN_VALUE));
scan.setTimeRange(20, 82);
- assertTrue(memstore.shouldSeek(scan));
+ assertTrue(memstore.shouldSeek(scan, Long.MIN_VALUE));
scan.setTimeRange(10, 20);
- assertTrue(memstore.shouldSeek(scan));
+ assertTrue(memstore.shouldSeek(scan, Long.MIN_VALUE));
scan.setTimeRange(8, 12);
- assertTrue(memstore.shouldSeek(scan));
+ assertTrue(memstore.shouldSeek(scan, Long.MIN_VALUE));
/*This test is not required for correctness but it should pass when
* timestamp range optimization is on*/
Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java Wed Jan 18 03:07:24 2012
@@ -23,20 +23,16 @@ package org.apache.hadoop.hbase.regionse
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-import java.util.NavigableSet;
import org.apache.hadoop.hbase.HBaseTestCase;
-import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
-import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
public class TestQueryMatcher extends HBaseTestCase {
private static final boolean PRINT = false;
@@ -99,8 +95,9 @@ public class TestQueryMatcher extends HB
expected.add(ScanQueryMatcher.MatchCode.DONE);
// 2,4,5
- ScanQueryMatcher qm = new ScanQueryMatcher(scan, fam2,
- get.getFamilyMap().get(fam2), ttl, rowComparator, 1);
+ ScanQueryMatcher qm =
+ new ScanQueryMatcher(scan, fam2, get.getFamilyMap().get(fam2),
+ rowComparator, 1, EnvironmentEdgeManager.currentTimeMillis() - ttl);
List<KeyValue> memstore = new ArrayList<KeyValue>();
memstore.add(new KeyValue(row1, fam2, col1, 1, data));
@@ -143,7 +140,8 @@ public class TestQueryMatcher extends HB
expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
expected.add(ScanQueryMatcher.MatchCode.DONE);
- ScanQueryMatcher qm = new ScanQueryMatcher(scan, fam2, null, ttl, rowComparator, 1);
+ ScanQueryMatcher qm = new ScanQueryMatcher(scan, fam2, null, rowComparator,
+ 1, EnvironmentEdgeManager.currentTimeMillis() - ttl);
List<KeyValue> memstore = new ArrayList<KeyValue>();
memstore.add(new KeyValue(row1, fam2, col1, 1, data));
@@ -193,8 +191,10 @@ public class TestQueryMatcher extends HB
ScanQueryMatcher.MatchCode.DONE
};
- ScanQueryMatcher qm = new ScanQueryMatcher(scan, fam2,
- get.getFamilyMap().get(fam2), testTTL, rowComparator, 1);
+ ScanQueryMatcher qm =
+ new ScanQueryMatcher(scan, fam2, get.getFamilyMap().get(fam2),
+ rowComparator, 1, EnvironmentEdgeManager.currentTimeMillis()
+ - testTTL);
long now = System.currentTimeMillis();
KeyValue [] kvs = new KeyValue[] {
@@ -245,8 +245,9 @@ public class TestQueryMatcher extends HB
ScanQueryMatcher.MatchCode.DONE
};
- ScanQueryMatcher qm = new ScanQueryMatcher(scan, fam2,
- null, testTTL, rowComparator, 1);
+ ScanQueryMatcher qm =
+ new ScanQueryMatcher(scan, fam2, null, rowComparator, 1,
+ EnvironmentEdgeManager.currentTimeMillis() - testTTL);
long now = System.currentTimeMillis();
KeyValue [] kvs = new KeyValue[] {
Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java Wed Jan 18 03:07:24 2012
@@ -64,7 +64,7 @@ import com.google.common.base.Joiner;
import org.mockito.Mockito;
/**
- * Test class fosr the Store
+ * Test class for the Store
*/
public class TestStore extends TestCase {
public static final Log LOG = LogFactory.getLog(TestStore.class);
Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java?rev=1232732&r1=1232731&r2=1232732&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java Wed Jan 18 03:07:24 2012
@@ -27,7 +27,6 @@ import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
-import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.commons.logging.Log;
@@ -366,7 +365,7 @@ public class TestStoreFile extends HBase
Scan scan = new Scan(row.getBytes(),row.getBytes());
scan.addColumn("family".getBytes(), "family:col".getBytes());
- boolean exists = scanner.shouldSeek(scan, columns);
+ boolean exists = scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE);
if (i % 2 == 0) {
if (!exists) falseNeg++;
} else {
@@ -511,7 +510,8 @@ public class TestStoreFile extends HBase
Scan scan = new Scan(row.getBytes(),row.getBytes());
scan.addColumn("family".getBytes(), ("col"+col).getBytes());
- boolean exists = scanner.shouldSeek(scan, columns);
+ boolean exists =
+ scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE);
boolean shouldRowExist = i % 2 == 0;
boolean shouldColExist = j % 2 == 0;
shouldColExist = shouldColExist || bt[x] == StoreFile.BloomType.ROW;
@@ -680,21 +680,20 @@ public class TestStoreFile extends HBase
columns.add(qualifier);
scan.setTimeRange(20, 100);
- assertTrue(scanner.shouldSeek(scan, columns));
+ assertTrue(scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
scan.setTimeRange(1, 2);
- assertTrue(scanner.shouldSeek(scan, columns));
+ assertTrue(scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
scan.setTimeRange(8, 10);
- assertTrue(scanner.shouldSeek(scan, columns));
+ assertTrue(scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
scan.setTimeRange(7, 50);
- assertTrue(scanner.shouldSeek(scan, columns));
+ assertTrue(scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
- /*This test is not required for correctness but it should pass when
- * timestamp range optimization is on*/
- //scan.setTimeRange(27, 50);
- //assertTrue(!scanner.shouldSeek(scan, columns));
+ // This test relies on the timestamp range optimization
+ scan.setTimeRange(27, 50);
+ assertTrue(!scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
}
public void testCacheOnWriteEvictOnClose() throws Exception {
Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java?rev=1232732&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java Wed Jan 18 03:07:24 2012
@@ -0,0 +1,74 @@
+/*
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.junit.Test;
+
+public class TestThreads {
+ private static final Log LOG = LogFactory.getLog(TestThreads.class);
+
+ private static final int SLEEP_TIME_MS = 5000;
+ private static final int TOLERANCE_MS = (int) (0.05 * SLEEP_TIME_MS);
+
+ private volatile boolean wasInterrupted;
+
+ @Test(timeout=6000)
+ public void testSleepWithoutInterrupt() throws InterruptedException {
+ Thread sleeper = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ LOG.debug("Sleeper thread: sleeping for " + SLEEP_TIME_MS);
+ Threads.sleepWithoutInterrupt(SLEEP_TIME_MS);
+ LOG.debug("Sleeper thread: finished sleeping");
+ wasInterrupted = Thread.currentThread().isInterrupted();
+ }
+ });
+ LOG.debug("Starting sleeper thread (" + SLEEP_TIME_MS + " ms)");
+ sleeper.start();
+ long startTime = System.currentTimeMillis();
+ LOG.debug("Main thread: sleeping for 500 ms");
+ Threads.sleep(500);
+
+ LOG.debug("Interrupting the sleeper thread and sleeping for 2000 ms");
+ sleeper.interrupt();
+ Threads.sleep(2000);
+
+ LOG.debug("Interrupting the sleeper thread and sleeping for 1000 ms");
+ sleeper.interrupt();
+ Threads.sleep(1000);
+
+ LOG.debug("Interrupting the sleeper thread again");
+ sleeper.interrupt();
+ sleeper.join();
+
+ assertTrue("sleepWithoutInterrupt did not preserve the thread's " +
+ "interrupted status", wasInterrupted);
+
+ long timeElapsed = System.currentTimeMillis() - startTime;
+ assertTrue("Elapsed time " + timeElapsed + " ms is out of the expected " +
+ "range of the sleep time " + SLEEP_TIME_MS,
+ Math.abs(timeElapsed - SLEEP_TIME_MS) < TOLERANCE_MS);
+ LOG.debug("Target sleep time: " + SLEEP_TIME_MS + ", time elapsed: " +
+ timeElapsed);
+ }
+}