You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ns...@apache.org on 2011/10/11 04:15:13 UTC

svn commit: r1181512 - in /hbase/branches/0.89/src: main/java/org/apache/hadoop/hbase/io/ main/java/org/apache/hadoop/hbase/io/hfile/ main/java/org/apache/hadoop/hbase/mapreduce/ main/java/org/apache/hadoop/hbase/regionserver/ main/java/org/apache/hado...

Author: nspiegelberg
Date: Tue Oct 11 02:15:12 2011
New Revision: 1181512

URL: http://svn.apache.org/viewvc?rev=1181512&view=rev
Log:
make it easier to add per-CF metrics; add some key per-CF metrics to start with

Summary:
Adds per-CF metrics like:

1) Blocks read, cache hit, avg time of read due to compactions on this column
family.
"hadoop.regionserver_cf.threadsnapshot.compactionblockreadcachehitcnt": 25,
"hadoop.regionserver_cf.threadsnapshot.compactionblockreadcnt": 9490,
"hadoop.regionserver_cf.threadsnapshot.compactionread_avg_time": 2,
"hadoop.regionserver_cf.threadsnapshot.compactionread_num_ops": 307873,

2) Stats for non-compaction related block reads.

"hadoop.regionserver_cf.threadsnapshot.fsblockreadcachehitcnt": 12009,
"hadoop.regionserver_cf.threadsnapshot.fsblockreadcnt": 19253,

"hadoop.regionserver_cf.threadsnapshot.fsread_avg_time": 26,
"hadoop.regionserver_cf.threadsnapshot.fsread_num_ops": 2742156,

3) Stats for meta block reads per CF:

"hadoop.regionserver_cf.threadsnapshotfsmetablockreadcachehitcnt": 21785,
"hadoop.regionserver_cf.threadsnapshot.fsmetablockreadcnt": 21785,

4) Bloom Filter stats per CF

"hadoop.regionserver_cf.threadsnapshot.keymaybeinbloomcnt": 7627,
"hadoop.regionserver_cf.threadsnapshot.keynotinbloomcnt": 14158,

Test Plan:
1) Pushed to a single machine on dark launch cluster , and we
are getting data in OpenTSDB

2) Will run all unit tests.

Reviewed By: liyintang
Reviewers: jgray, nspiegelberg, liyintang, kranganathan
Commenters: nspiegelberg
CC: nspiegelberg, liyintang, kannan, hbase@lists,
Revert Plan:
OK

Differential Revision: 229493

Added:
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java
Modified:
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java Tue Oct 11 02:15:12 2011
@@ -78,8 +78,9 @@ public class HalfStoreFileReader extends
   }
 
   @Override
-  public HFileScanner getScanner(final boolean cacheBlocks, final boolean pread) {
-    final HFileScanner s = super.getScanner(cacheBlocks, pread);
+  public HFileScanner getScanner(final boolean cacheBlocks, final boolean pread,
+                                final boolean isCompaction) {
+    final HFileScanner s = super.getScanner(cacheBlocks, pread, isCompaction);
     return new HFileScanner() {
       final HFileScanner delegate = s;
       public boolean atEnd = false;
@@ -252,7 +253,7 @@ public class HalfStoreFileReader extends
       return super.getLastKey();
     }
     // Get a scanner that caches the block and that uses pread.
-    HFileScanner scanner = getScanner(true, true);
+    HFileScanner scanner = getScanner(true, true, false);
     try {
       if (scanner.seekBefore(this.splitkey)) {
         return Bytes.toBytes(scanner.getKey());

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java Tue Oct 11 02:15:12 2011
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.HbaseMapWritable;
 import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
 import org.apache.hadoop.hbase.util.BloomFilter;
 import org.apache.hadoop.hbase.util.ByteBloomFilter;
@@ -778,6 +779,55 @@ public class HFile {
     // file Path plus metadata key/value pairs.
     protected String name;
 
+    // table qualified cfName for this HFile.
+    // This is used to report stats on a per-table/CF basis
+    public String cfName = "";
+
+    // various metrics that we want to track on a per-cf basis
+    public String fsReadTimeMetric = "";
+    public String compactionReadTimeMetric = "";
+
+    public String fsBlockReadCntMetric = "";
+    public String compactionBlockReadCntMetric = "";
+
+    public String fsBlockReadCacheHitCntMetric = "";
+    public String compactionBlockReadCacheHitCntMetric = "";
+
+    public String fsMetaBlockReadCntMetric = "";
+    public String fsMetaBlockReadCacheHitCntMetric = "";
+
+    /*
+     * Parse the HFile path to figure out which table and column family
+     * it belongs to. This is used to maintain read statistics on a
+     * per-column-family basis.
+     *
+     * @param path HFile path name
+     */
+    public void parsePath(String path) {
+      String splits[] = path.split("/");
+      this.cfName = "cf." + splits[splits.length - 2];
+
+      this.fsReadTimeMetric =
+        this.cfName + ".fsRead";
+      this.compactionReadTimeMetric =
+        this.cfName + ".compactionRead";
+
+      this.fsBlockReadCntMetric =
+        this.cfName + ".fsBlockReadCnt";
+      this.fsBlockReadCacheHitCntMetric =
+        this.cfName + ".fsBlockReadCacheHitCnt";
+
+      this.compactionBlockReadCntMetric =
+        this.cfName + ".compactionBlockReadCnt";
+      this.compactionBlockReadCacheHitCntMetric =
+        this.cfName + ".compactionBlockReadCacheHitCnt";
+
+      this.fsMetaBlockReadCntMetric =
+        this.cfName + ".fsMetaBlockReadCnt";
+      this.fsMetaBlockReadCacheHitCntMetric =
+        this.cfName + "fsMetaBlockReadCacheHitCnt";
+    }
+
     /**
      * Opens a HFile.  You must load the file info before you can
      * use it by calling {@link #loadFileInfo()}.
@@ -792,6 +842,7 @@ public class HFile {
       this(fs.open(path), fs.getFileStatus(path).getLen(), cache, inMemory);
       this.closeIStream = true;
       this.name = path.toString();
+      this.parsePath(this.name);
     }
 
     /**
@@ -941,13 +992,15 @@ public class HFile {
      * Call {@link HFileScanner#seekTo(byte[])} to position an start the read.
      * There is nothing to clean up in a Scanner. Letting go of your references
      * to the scanner is sufficient.
+     * @param cacheBlocks True if we should cache blocks read in by this scanner.
      * @param pread Use positional read rather than seek+read if true (pread is
      * better for random reads, seek+read is better scanning).
-     * @param cacheBlocks True if we should cache blocks read in by this scanner.
+     * @param isCompaction is scanner being used for a compaction?
      * @return Scanner on this file.
      */
-    public HFileScanner getScanner(boolean cacheBlocks, final boolean pread) {
-      return new Scanner(this, cacheBlocks, pread);
+    public HFileScanner getScanner(boolean cacheBlocks, final boolean pread,
+                                  final boolean isCompaction) {
+      return new Scanner(this, cacheBlocks, pread, isCompaction);
     }
 
     /**
@@ -992,6 +1045,7 @@ public class HFile {
       // Per meta key from any given file, synchronize reads for said block
       synchronized (metaIndex.blockKeys[block]) {
         metaLoads++;
+        HRegion.incrNumericMetric(this.fsMetaBlockReadCntMetric, 1);
         // Check cache for block.  If found return.
         if (cache != null) {
           ByteBuffer cachedBuf = cache.getBlock(name + "meta" + block);
@@ -999,6 +1053,7 @@ public class HFile {
             // Return a distinct 'shallow copy' of the block,
             // so pos doesnt get messed by the scanner
             cacheHits++;
+            HRegion.incrNumericMetric(this.fsMetaBlockReadCacheHitCntMetric, 1);
             return cachedBuf.duplicate();
           }
           // Cache Miss, please load.
@@ -1016,7 +1071,9 @@ public class HFile {
         // Create a new ByteBuffer 'shallow copy' to hide the magic header
         buf = buf.slice();
 
-        readTime += System.currentTimeMillis() - now;
+        long delta = System.currentTimeMillis() - now;
+        HRegion.incrTimeVaryingMetric(this.fsReadTimeMetric, delta);
+        readTime += delta;
         readOps++;
 
         // Cache the block
@@ -1033,10 +1090,12 @@ public class HFile {
      * @param block Index of block to read.
      * @param pread Use positional read instead of seek+read (positional is
      * better doing random reads whereas seek+read is better scanning).
+     * @param isCompaction is this block being read as part of a compaction
      * @return Block wrapped in a ByteBuffer.
      * @throws IOException
      */
-    ByteBuffer readBlock(int block, boolean cacheBlock, final boolean pread)
+    ByteBuffer readBlock(int block, boolean cacheBlock, final boolean pread,
+                         final boolean isCompaction)
     throws IOException {
       if (blockIndex == null) {
         throw new IOException("Block index not loaded");
@@ -1051,6 +1110,13 @@ public class HFile {
       // the other choice is to duplicate work (which the cache would prevent you from doing).
       synchronized (blockIndex.blockKeys[block]) {
         blockLoads++;
+
+        if (isCompaction) {
+          HRegion.incrNumericMetric(this.compactionBlockReadCntMetric, 1);
+        } else {
+          HRegion.incrNumericMetric(this.fsBlockReadCntMetric, 1);
+        }
+
         // Check cache for block.  If found return.
         if (cache != null) {
           ByteBuffer cachedBuf = cache.getBlock(name + block);
@@ -1058,6 +1124,15 @@ public class HFile {
             // Return a distinct 'shallow copy' of the block,
             // so pos doesnt get messed by the scanner
             cacheHits++;
+
+            if (isCompaction) {
+              HRegion.incrNumericMetric(
+                  this.compactionBlockReadCacheHitCntMetric, 1);
+            } else {
+              HRegion.incrNumericMetric(
+                  this.fsBlockReadCacheHitCntMetric, 1);
+            }
+
             return cachedBuf.duplicate();
           }
           // Carry on, please load.
@@ -1091,8 +1166,15 @@ public class HFile {
         //       reading at buf.arrayOffset()
         buf = buf.slice();
 
-        readTime += System.currentTimeMillis() - now;
+        long delta = System.currentTimeMillis() - now;
+        readTime += delta;
         readOps++;
+        if (isCompaction) {
+          HRegion.incrTimeVaryingMetric(this.compactionReadTimeMetric, delta);
+        } else {
+          HRegion.incrTimeVaryingMetric(this.fsReadTimeMetric, delta);
+        }
+
 
         // Cache the block
         if(cacheBlock && cache != null) {
@@ -1252,16 +1334,19 @@ public class HFile {
 
       private final boolean cacheBlocks;
       private final boolean pread;
+      private final boolean isCompaction;
 
       private int currKeyLen = 0;
       private int currValueLen = 0;
 
       public int blockFetches = 0;
 
-      public Scanner(Reader r, boolean cacheBlocks, final boolean pread) {
+      public Scanner(Reader r, boolean cacheBlocks, final boolean pread,
+                    final boolean isCompaction) {
         this.reader = r;
         this.cacheBlocks = cacheBlocks;
         this.pread = pread;
+        this.isCompaction = isCompaction;
       }
 
       public KeyValue getKeyValue() {
@@ -1322,7 +1407,8 @@ public class HFile {
             block = null;
             return false;
           }
-          block = reader.readBlock(this.currBlock, this.cacheBlocks, this.pread);
+          block = reader.readBlock(this.currBlock, this.cacheBlocks,
+                                   this.pread, this.isCompaction);
           currKeyLen = block.getInt();
           currValueLen = block.getInt();
           blockFetches++;
@@ -1478,7 +1564,8 @@ public class HFile {
           return true;
         }
         currBlock = 0;
-        block = reader.readBlock(this.currBlock, this.cacheBlocks, this.pread);
+        block = reader.readBlock(this.currBlock, this.cacheBlocks,
+                                 this.pread, this.isCompaction);
         currKeyLen = block.getInt();
         currValueLen = block.getInt();
         blockFetches++;
@@ -1487,12 +1574,14 @@ public class HFile {
 
       private void loadBlock(int bloc, boolean rewind) throws IOException {
         if (block == null) {
-          block = reader.readBlock(bloc, this.cacheBlocks, this.pread);
+          block = reader.readBlock(bloc, this.cacheBlocks,
+                                   this.pread, this.isCompaction);
           currBlock = bloc;
           blockFetches++;
         } else {
           if (bloc != currBlock) {
-            block = reader.readBlock(bloc, this.cacheBlocks, this.pread);
+            block = reader.readBlock(bloc, this.cacheBlocks,
+                                     this.pread, this.isCompaction);
             currBlock = bloc;
             blockFetches++;
           } else {
@@ -1962,7 +2051,7 @@ public class HFile {
         int count = 0;
         if (printKey || checkRow || checkFamily) {
           // scan over file and read key/value's and check if requested
-          HFileScanner scanner = reader.getScanner(false, false);
+          HFileScanner scanner = reader.getScanner(false, false, false);
           scanner.seekTo();
           KeyValue pkv = null;
           do {

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java Tue Oct 11 02:15:12 2011
@@ -277,7 +277,7 @@ public class LoadIncrementalHFiles exten
       halfWriter = new StoreFile.Writer(
           fs, outFile, blocksize, compression, conf, KeyValue.COMPARATOR,
           bloomFilterType, 0);
-      HFileScanner scanner = halfReader.getScanner(false, false);
+      HFileScanner scanner = halfReader.getScanner(false, false, false);
       scanner.seekTo();
       do {
         KeyValue kv = scanner.getKeyValue();

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Tue Oct 11 02:15:12 2011
@@ -38,8 +38,11 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
@@ -241,6 +244,41 @@ public class HRegion implements HeapSize
   public static volatile AtomicLong rwccWaitTime = new AtomicLong(0);
   public static volatile AtomicLong memstoreInsertTime = new AtomicLong(0);
 
+  // for simple numeric metrics (# of blocks read from block cache)
+  public static final ConcurrentMap<String, AtomicLong>
+    numericMetrics = new ConcurrentHashMap<String, AtomicLong>();
+
+  // Used for metrics where we want track a metrics (such as latency)
+  // over a number of operations.
+  public static final ConcurrentMap<String,
+                                  Pair<AtomicLong, AtomicInteger>>
+    timeVaryingMetrics =
+      new ConcurrentHashMap<String,
+                            Pair<AtomicLong, AtomicInteger>>();
+
+  public static void incrNumericMetric(String key, long amount) {
+    AtomicLong oldVal = numericMetrics.get(key);
+    if (oldVal == null) {
+      oldVal = numericMetrics.putIfAbsent(key, new AtomicLong(amount));
+      if (oldVal == null)
+        return;
+    }
+    oldVal.addAndGet(amount);
+  }
+
+  public static void incrTimeVaryingMetric(String key, long amount) {
+    Pair<AtomicLong, AtomicInteger> oldVal = timeVaryingMetrics.get(key);
+    if (oldVal == null) {
+      oldVal = timeVaryingMetrics.putIfAbsent(key,
+           new Pair<AtomicLong, AtomicInteger>(new AtomicLong(amount),
+                                              new AtomicInteger(1)));
+      if (oldVal == null)
+        return;
+    }
+    oldVal.getFirst().addAndGet(amount);  // total time
+    oldVal.getSecond().incrementAndGet(); // increment ops by 1
+  }
+
   public static final long getWriteOps() {
     return writeOps.getAndSet(0);
   }

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Tue Oct 11 02:15:12 2011
@@ -98,6 +98,7 @@ import org.apache.hadoop.hbase.ipc.HBase
 import org.apache.hadoop.hbase.ipc.HBaseServer;
 import org.apache.hadoop.hbase.ipc.HMasterRegionInterface;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.regionserver.metrics.RegionServerDynamicMetrics;
 import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
@@ -205,6 +206,7 @@ public class HRegionServer implements HR
   private final LinkedList<byte[]> reservedSpace = new LinkedList<byte []>();
 
   private RegionServerMetrics metrics;
+  private RegionServerDynamicMetrics dynamicMetrics;
 
   // Compactions
   CompactSplitThread compactSplitThread;
@@ -741,6 +743,7 @@ public class HRegionServer implements HR
       this.hlog = setupHLog();
       // Init in here rather than in constructor after thread name has been set
       this.metrics = new RegionServerMetrics();
+      this.dynamicMetrics = RegionServerDynamicMetrics.newInstance();
       startServiceThreads();
       isOnline = true;
     } catch (Throwable e) {

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java Tue Oct 11 02:15:12 2011
@@ -964,7 +964,7 @@ public class Store implements HeapSize {
 
     // For each file, obtain a scanner:
     List<StoreFileScanner> scanners = StoreFileScanner
-      .getScannersForStoreFiles(filesToCompact, false, false);
+      .getScannersForStoreFiles(filesToCompact, false, false, true);
 
     // Make the instantiation lazy in case compaction produces no product; i.e.
     // where all source cells are expired or deleted.
@@ -1212,7 +1212,7 @@ public class Store implements HeapSize {
       firstOnRow = new KeyValue(lastKV.getRow(), HConstants.LATEST_TIMESTAMP);
     }
     // Get a scanner that caches blocks and that uses pread.
-    HFileScanner scanner = r.getScanner(true, true);
+    HFileScanner scanner = r.getScanner(true, true, false);
     // Seek scanner.  If can't seek it, return.
     if (!seekToScanner(scanner, firstOnRow, firstKV)) return;
     // If we found candidate on firstOnRow, just return. THIS WILL NEVER HAPPEN!

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Tue Oct 11 02:15:12 2011
@@ -915,10 +915,16 @@ public class StoreFile {
     private final HFile.Reader reader;
     protected TimeRangeTracker timeRangeTracker = null;
     protected long sequenceID = -1;
+    private final String bloomAccessedMetric;
+    private final String bloomSkippedMetric;
 
     public Reader(FileSystem fs, Path path, BlockCache blockCache, boolean inMemory)
         throws IOException {
       reader = new HFile.Reader(fs, path, blockCache, inMemory);
+
+      // prepare the text (key) for the metrics
+      bloomAccessedMetric = reader.cfName + ".keyMaybeInBloomCnt";
+      bloomSkippedMetric = reader.cfName + ".keyNotInBloomCnt";
       bloomFilterType = BloomType.NONE;
     }
 
@@ -927,6 +933,8 @@ public class StoreFile {
      */
     Reader() {
       this.reader = null;
+      bloomAccessedMetric = "";
+      bloomSkippedMetric = "";
     }
 
     public RawComparator<byte []> getComparator() {
@@ -938,10 +946,15 @@ public class StoreFile {
      *
      * @param cacheBlocks should this scanner cache blocks?
      * @param pread use pread (for highly concurrent small readers)
+     * @param isCompaction is scanner being used for compaction?
      * @return a scanner
      */
-    public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread) {
-      return new StoreFileScanner(this, getScanner(cacheBlocks, pread));
+    public StoreFileScanner getStoreFileScanner(boolean cacheBlocks,
+                                               boolean pread,
+                                               boolean isCompaction) {
+      return new StoreFileScanner(this,
+                                 getScanner(cacheBlocks, pread,
+                                            isCompaction));
     }
 
     /**
@@ -951,11 +964,13 @@ public class StoreFile {
      *
      * @param cacheBlocks should we cache the blocks?
      * @param pread use pread (for concurrent small readers)
+     * @param isCompaction is scanner being used for compaction?
      * @return the underlying HFileScanner
      */
     @Deprecated
-    public HFileScanner getScanner(boolean cacheBlocks, boolean pread) {
-      return reader.getScanner(cacheBlocks, pread);
+    public HFileScanner getScanner(boolean cacheBlocks, boolean pread,
+                                  boolean isCompaction) {
+      return reader.getScanner(cacheBlocks, pread, isCompaction);
     }
 
     public void close() throws IOException {
@@ -1003,17 +1018,23 @@ public class StoreFile {
       try {
         ByteBuffer bloom = reader.getMetaBlock(BLOOM_FILTER_DATA_KEY, true);
         if (bloom != null) {
+          boolean exists;
           if (this.bloomFilterType == BloomType.ROWCOL) {
             // Since a Row Delete is essentially a DeleteFamily applied to all
             // columns, a file might be skipped if using row+col Bloom filter.
             // In order to ensure this file is included an additional check is
             // required looking only for a row bloom.
-            return this.bloomFilter.contains(key, bloom) ||
-                this.bloomFilter.contains(row, bloom);
-          }
-          else {
-            return this.bloomFilter.contains(key, bloom);
+            exists = this.bloomFilter.contains(key, bloom) ||
+                     this.bloomFilter.contains(row, bloom);
+          } else {
+            exists = this.bloomFilter.contains(key, bloom);
           }
+
+          if (exists)
+            HRegion.incrNumericMetric(bloomAccessedMetric, 1);
+          else
+            HRegion.incrNumericMetric(bloomSkippedMetric, 1);
+          return exists;
         }
       } catch (IOException e) {
         LOG.error("Error reading bloom filter data -- proceeding without",

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java Tue Oct 11 02:15:12 2011
@@ -58,14 +58,15 @@ class StoreFileScanner implements KeyVal
    * set of store files.
    */
   public static List<StoreFileScanner> getScannersForStoreFiles(
-      Collection<StoreFile> filesToCompact,
+      Collection<StoreFile> files,
       boolean cacheBlocks,
-      boolean usePread) throws IOException {
+      boolean usePread,
+      boolean isCompaction) throws IOException {
     List<StoreFileScanner> scanners =
-      new ArrayList<StoreFileScanner>(filesToCompact.size());
-    for (StoreFile file : filesToCompact) {
+      new ArrayList<StoreFileScanner>(files.size());
+    for (StoreFile file : files) {
       StoreFile.Reader r = file.createReader();
-      scanners.add(r.getStoreFileScanner(cacheBlocks, usePread));
+      scanners.add(r.getStoreFileScanner(cacheBlocks, usePread, isCompaction));
     }
     return scanners;
   }

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java Tue Oct 11 02:15:12 2011
@@ -140,7 +140,8 @@ class StoreScanner implements KeyValueSc
     // but now we get them in ascending order, which I think is
     // actually more correct, since memstore get put at the end.
     List<StoreFileScanner> sfScanners = StoreFileScanner
-      .getScannersForStoreFiles(store.getStorefiles(), cacheBlocks, isGet);
+      .getScannersForStoreFiles(store.getStorefiles(), cacheBlocks,
+                                isGet, false);
     List<KeyValueScanner> scanners =
       new ArrayList<KeyValueScanner>(sfScanners.size()+1);
     scanners.addAll(sfScanners);
@@ -156,7 +157,8 @@ class StoreScanner implements KeyValueSc
       final NavigableSet<byte[]> columns) throws IOException {
     // First the store file scanners
     List<StoreFileScanner> sfScanners = StoreFileScanner
-      .getScannersForStoreFiles(store.getStorefiles(), cacheBlocks, isGet);
+      .getScannersForStoreFiles(store.getStorefiles(), cacheBlocks,
+                                isGet, false);
     List<KeyValueScanner> scanners =
       new ArrayList<KeyValueScanner>(sfScanners.size()+1);
 

Added: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java?rev=1181512&view=auto
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java (added)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java Tue Oct 11 02:15:12 2011
@@ -0,0 +1,130 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver.metrics;
+
+import java.util.Map.Entry;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.metrics.MetricsContext;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.metrics.MetricsUtil;
+import org.apache.hadoop.metrics.Updater;
+import org.apache.hadoop.metrics.util.MetricsBase;
+import org.apache.hadoop.metrics.util.MetricsLongValue;
+import org.apache.hadoop.metrics.util.MetricsRegistry;
+import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
+
+/**
+ *
+ * This class is for maintaining  the various RPC statistics
+ * and publishing them through the metrics interfaces.
+ * This also registers the JMX MBean for RPC.
+ * <p>
+ * This class has a number of metrics variables that are publicly accessible;
+ * these variables (objects) have methods to update their values;
+ * for example:
+ *  <p> {@link #rpcQueueTime}.inc(time)
+ *
+ */
+public class RegionServerDynamicMetrics implements Updater {
+  private MetricsRecord metricsRecord;
+  private MetricsContext context;
+  private final RegionServerDynamicStatistics rsDynamicStatistics;
+
+  /**
+   * The metrics variables are public:
+   *  - they can be set directly by calling their set/inc methods
+   *  -they can also be read directly - e.g. JMX does this.
+   */
+  public final MetricsRegistry registry = new MetricsRegistry();
+
+  private RegionServerDynamicMetrics() {
+    this.context = MetricsUtil.getContext("hbase");
+    this.metricsRecord = MetricsUtil.createRecord(
+                            this.context,
+                            "RegionServerDynamicStatistics");
+    this.rsDynamicStatistics = new RegionServerDynamicStatistics(this.registry);
+  }
+
+  public static RegionServerDynamicMetrics newInstance() {
+    RegionServerDynamicMetrics metrics =
+      new RegionServerDynamicMetrics();
+    metrics.context.registerUpdater(metrics);
+    return metrics;
+  }
+
+  public synchronized void setNumericMetric(String name, long amt) {
+    MetricsLongValue m = (MetricsLongValue)registry.get(name);
+    if (m == null) {
+      m = new MetricsLongValue(name, this.registry);
+    }
+    m.set(amt);
+  }
+
+  public synchronized void incrTimeVaryingMetric(
+      String name,
+      long amt,
+      int numOps) {
+    MetricsTimeVaryingRate m = (MetricsTimeVaryingRate)registry.get(name);
+    if (m == null) {
+      m = new MetricsTimeVaryingRate(name, this.registry);
+    }
+    if (numOps > 0) {
+      m.inc(numOps, amt);
+    }
+  }
+
+  /**
+   * Push the metrics to the monitoring subsystem on doUpdate() call.
+   * @param context ctx
+   */
+  public void doUpdates(MetricsContext context) {
+    /* get dynamically created numeric metrics, and push the metrics */
+    for (Entry<String, AtomicLong> entry : HRegion.numericMetrics.entrySet()) {
+      this.setNumericMetric(entry.getKey(), entry.getValue().getAndSet(0));
+    }
+    /* get dynamically created time varying metrics, and push the metrics */
+    for (Entry<String, Pair<AtomicLong, AtomicInteger>> entry :
+          HRegion.timeVaryingMetrics.entrySet()) {
+      Pair<AtomicLong, AtomicInteger> value = entry.getValue();
+      this.incrTimeVaryingMetric(entry.getKey(),
+          value.getFirst().getAndSet(0),
+          value.getSecond().getAndSet(0));
+    }
+
+    synchronized (registry) {
+      // Iterate through the registry to propagate the different rpc metrics.
+      for (String metricName : registry.getKeyList() ) {
+        MetricsBase value = registry.get(metricName);
+        value.pushMetric(metricsRecord);
+      }
+    }
+    metricsRecord.update();
+  }
+
+  public void shutdown() {
+    if (rsDynamicStatistics != null)
+      rsDynamicStatistics.shutdown();
+  }
+}

Added: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java?rev=1181512&view=auto
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java (added)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java Tue Oct 11 02:15:12 2011
@@ -0,0 +1,49 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver.metrics;
+
+import org.apache.hadoop.metrics.util.MBeanUtil;
+import org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase;
+import org.apache.hadoop.metrics.util.MetricsRegistry;
+
+import javax.management.ObjectName;
+
+/**
+ * Exports dynamic region server metric recorded in
+ * {@link RegionServerDynamicMetrics} as an MBean
+ * for JMX monitoring.
+ */
+public class RegionServerDynamicStatistics extends MetricsDynamicMBeanBase {
+  private final ObjectName mbeanName;
+
+  public RegionServerDynamicStatistics(MetricsRegistry registry) {
+    super(registry, "RegionServerDynamicStatistics");
+    mbeanName = MBeanUtil.registerMBean("RegionServerDynamic",
+                                        "RegionServerDynamicStatistics",
+                                        this);
+  }
+
+  public void shutdown() {
+    if (mbeanName != null)
+      MBeanUtil.unregisterMBean(mbeanName);
+  }
+
+}

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java Tue Oct 11 02:15:12 2011
@@ -247,7 +247,7 @@ public class HFilePerformanceEvaluation 
     @Override
     void setUp() throws Exception {
       super.setUp();
-      this.scanner = this.reader.getScanner(false, false);
+      this.scanner = this.reader.getScanner(false, false, false);
       this.scanner.seekTo();
     }
 
@@ -279,7 +279,7 @@ public class HFilePerformanceEvaluation 
 
     @Override
     void doRow(int i) throws Exception {
-      HFileScanner scanner = this.reader.getScanner(false, true);
+      HFileScanner scanner = this.reader.getScanner(false, true, false);
       byte [] b = getRandomRow();
       scanner.seekTo(b);
       ByteBuffer k = scanner.getKey();
@@ -303,7 +303,7 @@ public class HFilePerformanceEvaluation 
 
     @Override
     void doRow(int i) throws Exception {
-      HFileScanner scanner = this.reader.getScanner(false, false);
+      HFileScanner scanner = this.reader.getScanner(false, false, false);
       byte [] b = getRandomRow();
       if (scanner.seekTo(b) != 0) {
         System.out.println("Nonexistent row: " + new String(b));
@@ -337,7 +337,7 @@ public class HFilePerformanceEvaluation 
 
     @Override
     void doRow(int i) throws Exception {
-      HFileScanner scanner = this.reader.getScanner(false, true);
+      HFileScanner scanner = this.reader.getScanner(false, true, false);
       scanner.seekTo(getGaussianRandomRowBytes());
       for (int ii = 0; ii < 30; ii++) {
         if (!scanner.next()) {

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java Tue Oct 11 02:15:12 2011
@@ -92,7 +92,7 @@ public class TestHalfStoreFileReader {
     final HalfStoreFileReader halfreader =
         new HalfStoreFileReader(fs, p, null, bottom);
     halfreader.loadFileInfo();
-    final HFileScanner scanner = halfreader.getScanner(false, false);
+    final HFileScanner scanner = halfreader.getScanner(false, false, false);
 
     scanner.seekTo();
     KeyValue curr;

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java Tue Oct 11 02:15:12 2011
@@ -78,7 +78,7 @@ public class RandomSeek {
     List<String> keys = slurp("/Users/ryan/xaa.50k");
 
     // Get a scanner that doesn't cache and that uses pread.
-    HFileScanner scanner = reader.getScanner(false, true);
+    HFileScanner scanner = reader.getScanner(false, true, false);
     int count;
     long totalBytes = 0;
     int notFound = 0;

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java Tue Oct 11 02:15:12 2011
@@ -145,7 +145,7 @@ public class TestHFile extends HBaseTest
     // Load up the index.
     reader.loadFileInfo();
     // Get a scanner that caches and that does not use pread.
-    HFileScanner scanner = reader.getScanner(true, false);
+    HFileScanner scanner = reader.getScanner(true, false, false);
     // Align scanner at start of the file.
     scanner.seekTo();
     readAllRecords(scanner);
@@ -219,7 +219,7 @@ public class TestHFile extends HBaseTest
         .getLen(), null, false);
     reader.loadFileInfo();
     // No data -- this should return false.
-    assertFalse(reader.getScanner(false, false).seekTo());
+    assertFalse(reader.getScanner(false, false, false).seekTo());
     someReadingWithMetaBlock(reader);
     fs.delete(mFile, true);
     reader.close();

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java Tue Oct 11 02:15:12 2011
@@ -245,7 +245,7 @@ public class TestHFilePerformance extend
           case 1:
           default:
             {
-              HFileScanner scanner = reader.getScanner(false, false);
+              HFileScanner scanner = reader.getScanner(false, false, false);
               scanner.seekTo();
               for (long l=0 ; l<rows ; l++ ) {
                 key = scanner.getKey();

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java Tue Oct 11 02:15:12 2011
@@ -161,7 +161,7 @@ public class TestHFileSeek extends TestC
     KeySampler kSampler =
         new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
             keyLenGen);
-    HFileScanner scanner = reader.getScanner(false, false);
+    HFileScanner scanner = reader.getScanner(false, false, false);
     BytesWritable key = new BytesWritable();
     timer.reset();
     timer.start();

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java Tue Oct 11 02:15:12 2011
@@ -62,7 +62,7 @@ public class TestReseekTo {
     HFile.Reader reader = new HFile.Reader(TEST_UTIL.getTestFileSystem(),
         ncTFile, null, false);
     reader.loadFileInfo();
-    HFileScanner scanner = reader.getScanner(false, true);
+    HFileScanner scanner = reader.getScanner(false, true, false);
 
     scanner.seekTo();
     for (int i = 0; i < keyList.size(); i++) {

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java Tue Oct 11 02:15:12 2011
@@ -62,7 +62,7 @@ public class TestSeekTo extends HBaseTes
     Path p = makeNewFile();
     HFile.Reader reader = new HFile.Reader(fs, p, null, false);
     reader.loadFileInfo();
-    HFileScanner scanner = reader.getScanner(false, true);
+    HFileScanner scanner = reader.getScanner(false, true, false);
     assertEquals(false, scanner.seekBefore(toKV("a").getKey()));
 
     assertEquals(false, scanner.seekBefore(toKV("c").getKey()));
@@ -96,7 +96,7 @@ public class TestSeekTo extends HBaseTes
     HFile.Reader reader = new HFile.Reader(fs, p, null, false);
     reader.loadFileInfo();
     assertEquals(2, reader.blockIndex.count);
-    HFileScanner scanner = reader.getScanner(false, true);
+    HFileScanner scanner = reader.getScanner(false, true, false);
     // lies before the start of the file.
     assertEquals(-1, scanner.seekTo(toKV("a").getKey()));
 

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java Tue Oct 11 02:15:12 2011
@@ -152,7 +152,7 @@ public class TestLoadIncrementalHFiles {
     HFile.Reader reader = new HFile.Reader(
         p.getFileSystem(conf), p, null, false);
     reader.loadFileInfo();
-    HFileScanner scanner = reader.getScanner(false, false);
+    HFileScanner scanner = reader.getScanner(false, false, false);
     scanner.seekTo();
     int count = 0;
     do {

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java Tue Oct 11 02:15:12 2011
@@ -342,7 +342,7 @@ public class TestCompaction extends HBas
     int count1 = 0;
     int count2 = 0;
     for (StoreFile f: this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles()) {
-      HFileScanner scanner = f.getReader().getScanner(false, false);
+      HFileScanner scanner = f.getReader().getScanner(false, false, false);
       scanner.seekTo();
       do {
         byte [] row = scanner.getKeyValue().getRow();
@@ -434,7 +434,7 @@ public class TestCompaction extends HBas
     int count = 0;
     for (StoreFile f: this.r.stores.
         get(COLUMN_FAMILY_TEXT).getStorefiles()) {
-      HFileScanner scanner = f.getReader().getScanner(false, false);
+      HFileScanner scanner = f.getReader().getScanner(false, false, false);
       if (!scanner.seekTo()) {
         continue;
       }

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java Tue Oct 11 02:15:12 2011
@@ -77,7 +77,7 @@ public class TestFSErrorsExposed {
     StoreFile sf = new StoreFile(fs, writer.getPath(), false,
         util.getConfiguration(), StoreFile.BloomType.NONE, false);
     StoreFile.Reader reader = sf.createReader();
-    HFileScanner scanner = reader.getScanner(false, true);
+    HFileScanner scanner = reader.getScanner(false, true, false);
 
     FaultyInputStream inStream = fs.inStreams.get(0).get();
     assertNotNull(inStream);
@@ -118,7 +118,7 @@ public class TestFSErrorsExposed {
     StoreFile sf = new StoreFile(fs, writer.getPath(), false,
         util.getConfiguration(), BloomType.NONE, false);
     List<StoreFileScanner> scanners = StoreFileScanner.getScannersForStoreFiles(
-        Collections.singletonList(sf), false, true);
+        Collections.singletonList(sf), false, true, false);
     KeyValueScanner scanner = scanners.get(0);
 
     FaultyInputStream inStream = fs.inStreams.get(0).get();

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java?rev=1181512&r1=1181511&r2=1181512&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java Tue Oct 11 02:15:12 2011
@@ -145,7 +145,7 @@ public class TestStoreFile extends HBase
         StoreFile.BloomType.NONE, false);
     // Now confirm that I can read from the reference and that it only gets
     // keys from top half of the file.
-    HFileScanner s = refHsf.createReader().getScanner(false, false);
+    HFileScanner s = refHsf.createReader().getScanner(false, false, false);
     for(boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next();) {
       ByteBuffer bb = s.getKey();
       kv = KeyValue.createKeyValueFromKey(bb);
@@ -192,7 +192,7 @@ public class TestStoreFile extends HBase
       // Now test reading from the top.
       boolean first = true;
       ByteBuffer key = null;
-      HFileScanner topScanner = top.getScanner(false, false);
+      HFileScanner topScanner = top.getScanner(false, false, false);
       while ((!topScanner.isSeeked() && topScanner.seekTo()) ||
           (topScanner.isSeeked() && topScanner.next())) {
         key = topScanner.getKey();
@@ -207,7 +207,7 @@ public class TestStoreFile extends HBase
       LOG.info("Last in top: " + Bytes.toString(Bytes.toBytes(key)));
 
       first = true;
-      HFileScanner bottomScanner = bottom.getScanner(false, false);
+      HFileScanner bottomScanner = bottom.getScanner(false, false, false);
       while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
           bottomScanner.next()) {
         previous = bottomScanner.getKey();
@@ -237,7 +237,7 @@ public class TestStoreFile extends HBase
           StoreFile.BloomType.NONE, false).createReader();
       bottom = new StoreFile(this.fs, bottomPath, true, conf,
           StoreFile.BloomType.NONE, false).createReader();
-      bottomScanner = bottom.getScanner(false, false);
+      bottomScanner = bottom.getScanner(false, false, false);
       int count = 0;
       while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
           bottomScanner.next()) {
@@ -247,7 +247,7 @@ public class TestStoreFile extends HBase
       assertTrue(count == 0);
       // Now read from the top.
       first = true;
-      topScanner = top.getScanner(false, false);
+      topScanner = top.getScanner(false, false, false);
       while ((!topScanner.isSeeked() && topScanner.seekTo()) ||
           topScanner.next()) {
         key = topScanner.getKey();
@@ -283,7 +283,7 @@ public class TestStoreFile extends HBase
       bottom = new StoreFile(this.fs, bottomPath, true, conf,
           StoreFile.BloomType.NONE, false).createReader();
       first = true;
-      bottomScanner = bottom.getScanner(false, false);
+      bottomScanner = bottom.getScanner(false, false, false);
       while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
           bottomScanner.next()) {
         key = bottomScanner.getKey();
@@ -303,7 +303,7 @@ public class TestStoreFile extends HBase
         assertTrue(Bytes.toString(keyKV.getRow()).charAt(i) == 'z');
       }
       count = 0;
-      topScanner = top.getScanner(false, false);
+      topScanner = top.getScanner(false, false, false);
       while ((!topScanner.isSeeked() && topScanner.seekTo()) ||
           (topScanner.isSeeked() && topScanner.next())) {
         count++;
@@ -341,7 +341,7 @@ public class TestStoreFile extends HBase
     StoreFile.Reader reader = new StoreFile.Reader(fs, f, null, false);
     reader.loadFileInfo();
     reader.loadBloomfilter();
-    StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
+    StoreFileScanner scanner = reader.getStoreFileScanner(false, false, false);
 
     // check false positives rate
     int falsePos = 0;
@@ -431,7 +431,7 @@ public class TestStoreFile extends HBase
       StoreFile.Reader reader = new StoreFile.Reader(fs, f, null, false);
       reader.loadFileInfo();
       reader.loadBloomfilter();
-      StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
+      StoreFileScanner scanner = reader.getStoreFileScanner(false, false, false);
       assertEquals(expKeys[x], reader.bloomFilter.getKeyCount());
 
       // check false positives rate
@@ -605,7 +605,7 @@ public class TestStoreFile extends HBase
     StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf,
         StoreFile.BloomType.NONE, false);
     StoreFile.Reader reader = hsf.createReader();
-    StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
+    StoreFileScanner scanner = reader.getStoreFileScanner(false, false, false);
     TreeSet<byte[]> columns = new TreeSet<byte[]>();
     columns.add(qualifier);