You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ns...@apache.org on 2011/10/11 19:45:01 UTC

svn commit: r1181972 - in /hbase/branches/0.89/src: main/java/org/apache/hadoop/hbase/io/hfile/ main/java/org/apache/hadoop/hbase/regionserver/ test/java/org/apache/hadoop/hbase/io/hfile/ test/java/org/apache/hadoop/hbase/regionserver/

Author: nspiegelberg
Date: Tue Oct 11 17:45:00 2011
New Revision: 1181972

URL: http://svn.apache.org/viewvc?rev=1181972&view=rev
Log:
Refactored and more detailed block read/cache and bloom metrics

Summary: As we keep adding more granular block read and block cache usage
statistics, there is a combinatorial explosion of the number of cases we have to
monitor, especially when we want both per-column family / block type statistics
and aggregate statistics on one or both of these dimensions. I am trying to
unclutter HFile readers, LruBlockCache, StoreFile, etc. by creating a
centralized class that knows how to update all kinds of per-column family/block
type statistics.

Test Plan:
Run all unit tests.
New unit test.
Deploy to one region server in dark launch and compare the new output of
hbaseStats.py to the old one (take a diff of the set of keys).

Reviewers: pritam, liyintang, jgray, kannan

Reviewed By: kannan

CC: , hbase@lists, dist-storage@lists, kannan

Differential Revision: 321147

Revert Plan: OK

Added:
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/ColumnFamilyMetrics.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestColumnFamilyMetrics.java
Modified:
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockType.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockInfo.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderV1.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java
    hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java?rev=1181972&r1=1181971&r2=1181972&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java Tue Oct 11 17:45:00 2011
@@ -22,11 +22,11 @@ package org.apache.hadoop.hbase.io.hfile
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.hfile.ColumnFamilyMetrics.
+    ColumnFamilyConfigured;
 import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
 import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
 import org.apache.hadoop.io.RawComparator;
@@ -34,9 +34,8 @@ import org.apache.hadoop.io.RawComparato
 /**
  * Common functionality needed by all versions of {@link HFile} readers.
  */
-public abstract class AbstractHFileReader implements HFile.Reader {
-
-  private static final Log LOG = LogFactory.getLog(AbstractHFileReader.class);
+public abstract class AbstractHFileReader extends ColumnFamilyConfigured
+    implements HFile.Reader {
 
   /** Filesystem-level block reader for this HFile format version. */
   protected HFileBlock.FSReader fsBlockReader;
@@ -97,32 +96,14 @@ public abstract class AbstractHFileReade
 
   protected FileInfo fileInfo;
 
-  // table qualified cfName for this HFile.
-  // This is used to report stats on a per-table/CF basis
-  public String cfName = "";
-
-  // various metrics that we want to track on a per-cf basis
-  public String fsReadTimeMetric = "";
-  public String compactionReadTimeMetric = "";
-
-  public String fsBlockReadCntMetric = "";
-  public String compactionBlockReadCntMetric = "";
-
-  public String fsBlockReadCacheHitCntMetric = "";
-  public String compactionBlockReadCacheHitCntMetric = "";
-
-  public String fsBlockReadCacheMissCntMetric = "";
-  public String compactionBlockReadCacheMissCntMetric = "";
-
-  public String fsMetaBlockReadCntMetric = "";
-  public String fsMetaBlockReadCacheHitCntMetric = "";
-  public String fsMetaBlockReadCacheMissCntMetric = "";
+  protected final ColumnFamilyMetrics cfMetrics;
 
   protected AbstractHFileReader(Path path, FixedFileTrailer trailer,
       final FSDataInputStream fsdis, final long fileSize,
       final boolean closeIStream,
       final BlockCache blockCache, final boolean inMemory,
       final boolean evictOnClose) {
+    super(path);
     this.trailer = trailer;
     this.compressAlgo = trailer.getCompressionCodec();
     this.blockCache = blockCache;
@@ -133,7 +114,7 @@ public abstract class AbstractHFileReade
     this.evictOnClose = evictOnClose;
     this.path = path;
     this.name = path.getName();
-    parsePath(path.toString());
+    cfMetrics = ColumnFamilyMetrics.getInstance(getColumnFamilyName());
   }
 
   @SuppressWarnings("serial")
@@ -151,42 +132,6 @@ public abstract class AbstractHFileReade
     return KeyValue.keyToString(getLastKey());
   }
 
-  /**
-   * Parse the HFile path to figure out which table and column family
-   * it belongs to. This is used to maintain read statistics on a
-   * per-column-family basis.
-   *
-   * @param path HFile path name
-   */
-  public void parsePath(String path) {
-    String splits[] = path.split("/");
-    if (splits.length < 2) {
-      LOG.warn("Could not determine the table and column family of the " +
-          "HFile path " + path);
-      return;
-    }
-
-    cfName = "cf." + splits[splits.length - 2];
-
-    fsReadTimeMetric = cfName + ".fsRead";
-    compactionReadTimeMetric = cfName + ".compactionRead";
-
-    fsBlockReadCntMetric = cfName + ".fsBlockReadCnt";
-    fsBlockReadCacheHitCntMetric = cfName + ".fsBlockReadCacheHitCnt";
-    fsBlockReadCacheMissCntMetric = cfName + ".fsBlockReadCacheMissCnt";
-
-    compactionBlockReadCntMetric = cfName + ".compactionBlockReadCnt";
-    compactionBlockReadCacheHitCntMetric = cfName
-        + ".compactionBlockReadCacheHitCnt";
-    compactionBlockReadCacheMissCntMetric = cfName
-        + ".compactionBlockReadCacheMissCnt";
-
-    fsMetaBlockReadCntMetric = cfName + ".fsMetaBlockReadCnt";
-    fsMetaBlockReadCacheHitCntMetric = cfName + ".fsMetaBlockReadCacheHitCnt";
-    fsMetaBlockReadCacheMissCntMetric = cfName
-        + ".fsMetaBlockReadCacheMissCnt";
-  }
-
   public abstract boolean isFileInfoLoaded();
 
   @Override
@@ -306,11 +251,6 @@ public abstract class AbstractHFileReade
   }
 
   @Override
-  public String getColumnFamilyName() {
-    return cfName;
-  }
-
-  @Override
   public FixedFileTrailer getTrailer() {
     return trailer;
   }
@@ -382,4 +322,8 @@ public abstract class AbstractHFileReade
     return path;
   }
 
+  public ColumnFamilyMetrics getColumnFamilyMetrics() {
+    return cfMetrics;
+  }
+
 }

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java?rev=1181972&r1=1181971&r2=1181972&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java Tue Oct 11 17:45:00 2011
@@ -34,6 +34,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.KeyValue.KeyComparator;
+import org.apache.hadoop.hbase.io.hfile.ColumnFamilyMetrics.
+    ColumnFamilyConfigured;
 import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -43,7 +45,8 @@ import org.apache.hadoop.io.Writable;
 /**
  * Common functionality needed by all versions of {@link HFile} writers.
  */
-public abstract class AbstractHFileWriter implements HFile.Writer {
+public abstract class AbstractHFileWriter extends ColumnFamilyConfigured
+    implements HFile.Writer {
 
   private static final Log LOG = LogFactory.getLog(AbstractHFileWriter.class);
 
@@ -95,13 +98,6 @@ public abstract class AbstractHFileWrite
   /** May be null if we were passed a stream. */
   protected final Path path;
 
-  // table qualified cfName for this HFile.
-  // This is used to report stats on a per-table/CF basis
-
-  // Note that this is gotten from the path, which can be null, so this can
-  // remain unknown
-  public String cfName = "cf.unknown";
-
   /** Whether to cache key/value data blocks on write */
   protected final boolean cacheDataBlocksOnWrite;
 
@@ -123,6 +119,7 @@ public abstract class AbstractHFileWrite
   public AbstractHFileWriter(Configuration conf,
       FSDataOutputStream outputStream, Path path, int blockSize,
       Compression.Algorithm compressAlgo, KeyComparator comparator) {
+    super(path);
     this.outputStream = outputStream;
     this.path = path;
     this.name = path != null ? path.getName() : outputStream.toString();
@@ -142,20 +139,6 @@ public abstract class AbstractHFileWrite
 
     if (cacheDataBlocksOnWrite || cacheIndexBlocksOnWrite)
       initBlockCache();
-
-    if (path != null)
-      parsePath(path.toString());
-  }
-
-  public void parsePath(String path) {
-    String splits[] = path.split("/");
-    if (splits.length < 2) {
-      LOG.warn("Could not determine the table and column family of the " +
-          "HFile path " + path);
-      return;
-    }
-
-    cfName = "cf." + splits[splits.length - 2];
   }
 
   /**
@@ -263,11 +246,6 @@ public abstract class AbstractHFileWrite
   }
 
   @Override
-  public String getColumnFamilyName() {
-    return cfName;
-  }
-
-  @Override
   public String toString() {
     return "writer=" + (path != null ? path.toString() : null) + ", name="
         + name + ", compression=" + compressAlgo.getName();

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockType.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockType.java?rev=1181972&r1=1181971&r2=1181972&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockType.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockType.java Tue Oct 11 17:45:00 2011
@@ -75,7 +75,7 @@ public enum BlockType {
   INDEX_V1("IDXBLK)+", BlockCategory.INDEX);
 
   public enum BlockCategory {
-    DATA, META, INDEX, BLOOM
+    DATA, META, INDEX, BLOOM, ALL_CATEGORIES, UNKNOWN
   }
 
   public static final int MAGIC_LENGTH = 8;
@@ -101,10 +101,6 @@ public enum BlockType {
     buf.put(magic);
   }
 
-  public String getMetricName(){
-    return metricCat.toString();
-  }
-
   public static BlockType parse(byte[] buf, int offset, int length)
       throws IOException {
     if (length != MAGIC_LENGTH) {
@@ -181,4 +177,8 @@ public enum BlockType {
     }
   }
 
+  public BlockCategory getCategory() {
+    return metricCat;
+  }
+
 }

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java?rev=1181972&r1=1181971&r2=1181972&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java Tue Oct 11 17:45:00 2011
@@ -19,8 +19,6 @@
  */
 package org.apache.hadoop.hbase.io.hfile;
 
-import java.nio.ByteBuffer;
-
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;

Added: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/ColumnFamilyMetrics.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/ColumnFamilyMetrics.java?rev=1181972&view=auto
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/ColumnFamilyMetrics.java (added)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/ColumnFamilyMetrics.java Tue Oct 11 17:45:00 2011
@@ -0,0 +1,489 @@
+/*
+ * Copyright 2011 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.io.hfile;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+
+/**
+ * A collection of metric names in a given column family. The following
+ * "dimensions" are supported:
+ * <ul>
+ * <li>Per-column family vs. aggregated</li>
+ * <li>Block category (data, index, bloom filter, etc.)</li>
+ * <li>Whether the request is part of a compaction</li>
+ * <li>Metric type (read time, block read count, cache hits/misses, etc.)</li>
+ * </ul>
+ *
+ * <p>
+ * An instance of this class does not store any metric values. It just allows
+ * to determine the correct metric name for each combination of the above
+ * dimensions.
+ */
+public class ColumnFamilyMetrics {
+  private static final Log LOG = LogFactory.getLog(ColumnFamilyMetrics.class);
+
+  public interface ColumnFamilyAware {
+    /**
+     * @return Column family name pertaining to this reader/writer/block
+     */
+    public String getColumnFamilyName();
+  }
+
+  public static class ColumnFamilyConfigured implements ColumnFamilyAware {
+    private final String cfName;
+
+    public ColumnFamilyConfigured(Path path) {
+      if (path == null) {
+        cfName = UNKNOWN_CF;
+        return;
+      }
+
+      String splits[] = path.toString().split("/");
+      if (splits.length < 2) {
+        LOG.warn("Could not determine the column family of the HFile path "
+            + path);
+        cfName = UNKNOWN_CF;
+      } else {
+        cfName = splits[splits.length - 2];
+      }
+    }
+
+    @Override
+    public String getColumnFamilyName() {
+      return cfName;
+    }
+  }
+
+  private static final String BLOCK_TYPE_PREFIX = "bt.";
+  private static final String CF_PREFIX = "cf.";
+
+  /** A special column family value that means "all column families". */
+  private static final String CF_ALL = "";
+
+  /**
+   * Special handling for meta-block-specific metrics for
+   * backwards-compatibility.
+   */
+  private static final String META_BLOCK_CATEGORY_STR = "Meta";
+
+  static final String UNKNOWN_CF = "unknown";
+
+  public static enum BlockMetricType {
+    READ_TIME("Read", true),
+    READ_COUNT("BlockReadCnt", true),
+    CACHE_HIT("BlockReadCacheHitCnt", true),
+    CACHE_MISS("BlockReadCacheMissCnt", true),
+
+    CACHE_SIZE("blockCacheSize", false),
+    CACHED("blockCacheNumCached", false),
+    EVICTED("blockCacheNumEvicted", false);
+
+    private final String metricStr;
+    private final boolean compactionAware;
+
+    BlockMetricType(String metricStr, boolean compactionAware) {
+      this.metricStr = metricStr;
+      this.compactionAware = compactionAware;
+    }
+
+    @Override
+    public String toString() {
+      return metricStr;
+    }
+
+    private static final String BLOCK_METRIC_TYPE_REGEX;
+    static {
+      StringBuilder sb = new StringBuilder();
+      for (BlockMetricType bmt : values()) {
+        if (sb.length() > 0)
+          sb.append("|");
+        sb.append(bmt);
+      }
+      BLOCK_METRIC_TYPE_REGEX = sb.toString();
+    }
+  };
+
+  private static final int NUM_BLOCK_CATEGORIES = BlockCategory.values().length;
+
+  private static final int NUM_METRIC_TYPES = BlockMetricType.values().length;
+
+  private static final boolean[] BOOL_VALUES = new boolean[] { false, true };
+
+  private static final int NUM_METRICS =
+      NUM_BLOCK_CATEGORIES *  // blockCategory
+      BOOL_VALUES.length *    // isCompaction
+      NUM_METRIC_TYPES;       // metricType
+
+  /** All instances of this class */
+  private static final ConcurrentHashMap<String, ColumnFamilyMetrics>
+      cfToMetrics = new ConcurrentHashMap<String, ColumnFamilyMetrics>();
+
+  private final String[] blockMetricNames = new String[NUM_METRICS];
+  private final String[] bloomMetricNames = new String[2];
+
+  public static final ColumnFamilyMetrics ALL_CF_METRICS = getInstance(CF_ALL);
+
+  private ColumnFamilyMetrics(final String cf) {
+    final String cfPrefix = cf.equals(CF_ALL) ? "" : CF_PREFIX + cf + ".";
+    for (BlockCategory blockCategory : BlockCategory.values()) {
+      for (boolean isCompaction : BOOL_VALUES) {
+        for (BlockMetricType metricType : BlockMetricType.values()) {
+          if (!metricType.compactionAware && isCompaction)
+            continue;
+
+          StringBuilder sb = new StringBuilder(cfPrefix);
+          if (blockCategory != BlockCategory.ALL_CATEGORIES
+              && blockCategory != BlockCategory.META) {
+            String categoryStr = blockCategory.toString();
+            categoryStr = categoryStr.charAt(0)
+                + categoryStr.substring(1).toLowerCase();
+            sb.append(BLOCK_TYPE_PREFIX + categoryStr + ".");
+          }
+
+          if (metricType.compactionAware)
+            sb.append(isCompaction ? "compaction" : "fs");
+
+          // A special-case for meta blocks for backwards-compatibility.
+          if (blockCategory == BlockCategory.META)
+            sb.append(META_BLOCK_CATEGORY_STR);
+
+          sb.append(metricType);
+
+          int i = getBlockMetricIndex(blockCategory, isCompaction, metricType);
+          blockMetricNames[i] = sb.toString().intern();
+        }
+      }
+    }
+
+    for (boolean isInBloom : BOOL_VALUES) {
+      bloomMetricNames[isInBloom ? 1 : 0] = cfPrefix
+          + (isInBloom ? "keyMaybeInBloomCnt" : "keyNotInBloomCnt");
+    }
+  }
+
+  /**
+   * Returns a metrics object for the given column family, instantiating if
+   * necessary.
+   *
+   * @param cf column family name or null in case of unit tests or necessary
+   *          fallback
+   */
+  public static ColumnFamilyMetrics getInstance(String cf) {
+    if (cf == null)
+      cf = UNKNOWN_CF;
+    ColumnFamilyMetrics cfMetrics = cfToMetrics.get(cf);
+    if (cfMetrics != null)
+      return cfMetrics;
+    cfMetrics = new ColumnFamilyMetrics(cf);
+    ColumnFamilyMetrics existingMetrics = cfToMetrics.putIfAbsent(cf,
+        cfMetrics);
+    return existingMetrics != null ? existingMetrics : cfMetrics;
+  }
+
+  private static final int getBlockMetricIndex(BlockCategory blockCategory,
+      boolean isCompaction, BlockMetricType metricType) {
+    int i = 0;
+    i = i * NUM_BLOCK_CATEGORIES + blockCategory.ordinal();
+    i = i * BOOL_VALUES.length + (isCompaction ? 1 : 0);
+    i = i * NUM_METRIC_TYPES + metricType.ordinal();
+    return i;
+  }
+
+  public String getBlockMetricName(BlockCategory blockCategory,
+      boolean isCompaction, BlockMetricType metricType) {
+    if (isCompaction && !metricType.compactionAware) {
+      throw new IllegalArgumentException("isCompaction cannot be true for "
+          + metricType);
+    }
+    return blockMetricNames[getBlockMetricIndex(blockCategory, isCompaction,
+        metricType)];
+  }
+
+  public String getBloomMetricName(boolean isInBloom) {
+    return bloomMetricNames[isInBloom ? 1 : 0];
+  }
+
+  /** Used in testing */
+  void printMetricNames() {
+    for (BlockCategory blockCategory : BlockCategory.values()) {
+      for (boolean isCompaction : BOOL_VALUES) {
+        for (BlockMetricType metricType : BlockMetricType.values()) {
+          int i = getBlockMetricIndex(blockCategory, isCompaction, metricType);
+          System.err.println("blockCategory=" + blockCategory + ", "
+              + "metricType=" + metricType + ", isCompaction=" + isCompaction
+              + ", metricName=" + blockMetricNames[i]);
+        }
+      }
+    }
+  }
+
+  /**
+   * Increments the given metric, both per-CF and aggregate, for both the given
+   * category and all categories in aggregate (four counters total).
+   */
+  private void incrNumericMetric(BlockCategory blockCategory,
+      boolean isCompaction, BlockMetricType metricType) {
+    if (blockCategory == null) {
+      blockCategory = BlockCategory.UNKNOWN;  // So that we see this in stats.
+    }
+    HRegion.incrNumericMetric(getBlockMetricName(blockCategory,
+        isCompaction, metricType), 1);
+
+    if (blockCategory != BlockCategory.ALL_CATEGORIES) {
+      incrNumericMetric(BlockCategory.ALL_CATEGORIES, isCompaction, metricType);
+    }
+  }
+
+  private void addToReadTime(BlockCategory blockCategory,
+      boolean isCompaction, long timeMs) {
+    HRegion.incrTimeVaryingMetric(getBlockMetricName(blockCategory,
+        isCompaction, BlockMetricType.READ_TIME), timeMs);
+
+    // Also update the read time aggregated across all block categories
+    if (blockCategory != BlockCategory.ALL_CATEGORIES) {
+      addToReadTime(BlockCategory.ALL_CATEGORIES, isCompaction, timeMs);
+    }
+  }
+
+  /**
+   * Updates the number of hits and the total number of block reads on a block
+   * cache hit.
+   */
+  public void updateOnCacheHit(BlockCategory blockCategory,
+      boolean isCompaction) {
+    incrNumericMetric(blockCategory, isCompaction, BlockMetricType.CACHE_HIT);
+    incrNumericMetric(blockCategory, isCompaction, BlockMetricType.READ_COUNT);
+    if (this != ALL_CF_METRICS) {
+      ALL_CF_METRICS.updateOnCacheHit(blockCategory, isCompaction);
+    }
+  }
+
+  /**
+   * Updates read time, the number of misses, and the total number of block
+   * reads on a block cache miss.
+   */
+  public void updateOnCacheMiss(BlockCategory blockCategory,
+      boolean isCompaction, long timeMs) {
+    addToReadTime(blockCategory, isCompaction, timeMs);
+    incrNumericMetric(blockCategory, isCompaction, BlockMetricType.CACHE_MISS);
+    incrNumericMetric(blockCategory, isCompaction, BlockMetricType.READ_COUNT);
+    if (this != ALL_CF_METRICS) {
+      ALL_CF_METRICS.updateOnCacheMiss(blockCategory, isCompaction, timeMs);
+    }
+  }
+
+  /**
+   * Adds the given delta to the cache size for the given block category and
+   * the aggregate metric for all block categories. Updates both the per-CF
+   * counter and the counter for all CFs (four metrics total). The cache size
+   * metric is "persistent", i.e. it does not get reset when metrics are
+   * collected.
+   */
+  public void addToCacheSize(BlockCategory category, long cacheSizeDelta) {
+    if (category == null) {
+      category = BlockCategory.ALL_CATEGORIES;
+    }
+    HRegion.incrNumericPersistentMetric(getBlockMetricName(category, false,
+        BlockMetricType.CACHE_SIZE), cacheSizeDelta);
+
+    if (category != BlockCategory.ALL_CATEGORIES) {
+      addToCacheSize(BlockCategory.ALL_CATEGORIES, cacheSizeDelta);
+    }
+  }
+
+  public void updateBlockCacheMetrics(BlockCategory blockCategory,
+      long cacheSizeDelta, boolean isEviction) {
+    addToCacheSize(blockCategory, cacheSizeDelta);
+    incrNumericMetric(blockCategory, false,
+        isEviction ? BlockMetricType.EVICTED : BlockMetricType.CACHED);
+    if (this != ALL_CF_METRICS) {
+      ALL_CF_METRICS.updateBlockCacheMetrics(blockCategory, cacheSizeDelta,
+          isEviction);
+    }
+  }
+
+  /**
+   * Increments both the per-CF and the aggregate counter of bloom
+   * positives/negatives as specified by the argument.
+   */
+  public void updateBloomMetrics(boolean isInBloom) {
+    HRegion.incrNumericMetric(getBloomMetricName(isInBloom), 1);
+    if (this != ALL_CF_METRICS) {
+      ALL_CF_METRICS.updateBloomMetrics(isInBloom);
+    }
+  }
+
+  // Test code
+
+  private Collection<String> getAllMetricNames() {
+    List<String> allMetricNames = new ArrayList<String>();
+    for (String blockMetricName : blockMetricNames)
+      if (blockMetricName != null)
+        allMetricNames.add(blockMetricName);
+    allMetricNames.addAll(Arrays.asList(bloomMetricNames));
+    return allMetricNames;
+  }
+
+  public static Map<String, Long> getMetricsSnapshot() {
+    Map<String, Long> metricsSnapshot = new TreeMap<String, Long>();
+    for (ColumnFamilyMetrics cfm : cfToMetrics.values()) {
+      for (String metricName : cfm.getAllMetricNames()) {
+        metricsSnapshot.put(metricName, HRegion.getNumericMetric(metricName));
+      }
+    }
+    return metricsSnapshot;
+  }
+
+  private static final Pattern CF_NAME_REGEX = Pattern.compile(
+      "\\b" + CF_PREFIX.replace(".", "\\.") + "[^.]+\\.");
+  private static final Pattern BLOCK_CATEGORY_REGEX = Pattern.compile(
+      "\\b" + BLOCK_TYPE_PREFIX.replace(".", "\\.") + "[^.]+\\." +
+      // Also remove the special-case block type marker for meta blocks
+      "|" + META_BLOCK_CATEGORY_STR + "(?=" +
+      BlockMetricType.BLOCK_METRIC_TYPE_REGEX + ")");
+
+  private static long getLong(Map<String, Long> m, String k) {
+    Long l = m.get(k);
+    return l != null ? l : 0;
+  }
+
+  private static void putLong(Map<String, Long> m, String k, long v) {
+    if (v != 0) {
+      m.put(k, v);
+    } else {
+      m.remove(k);
+    }
+  }
+  private static Map<String, Long> diffMetrics(Map<String, Long> a,
+      Map<String, Long> b) {
+    Set<String> allKeys = new TreeSet<String>(a.keySet());
+    allKeys.addAll(b.keySet());
+    Map<String, Long> diff = new TreeMap<String, Long>();
+    for (String k : allKeys) {
+      long aVal = getLong(a, k);
+      long bVal = getLong(b, k);
+      if (aVal != bVal) {
+        diff.put(k, bVal - aVal);
+      }
+    }
+    return diff;
+  }
+
+  /** Used in unit tests */
+  public static void validateMetricChanges(Map<String, Long> oldMetrics) {
+    final Map<String, Long> newMetrics = getMetricsSnapshot();
+    final Set<String> allKeys = new TreeSet<String>(oldMetrics.keySet());
+    allKeys.addAll(newMetrics.keySet());
+    final Map<String, Long> allCfDeltas = new TreeMap<String, Long>();
+    final Map<String, Long> allBlockCategoryDeltas =
+        new TreeMap<String, Long>();
+    final Map<String, Long> deltas = diffMetrics(oldMetrics, newMetrics);
+
+    for (ColumnFamilyMetrics cfm : cfToMetrics.values()) {
+      for (String metricName : cfm.getAllMetricNames()) {
+        if (metricName.startsWith(CF_PREFIX + CF_PREFIX)) {
+          throw new AssertionError("Column family prefix used twice: " +
+              metricName);
+        }
+
+        final long oldValue = getLong(oldMetrics, metricName);
+        final long newValue = getLong(newMetrics, metricName);
+        final long delta = newValue - oldValue;
+        if (oldValue != newValue) {
+          // Debug output for the unit test
+          System.err.println("key=" + metricName + ", delta=" + delta);
+        }
+
+        if (cfm != ALL_CF_METRICS) {
+          // Re-calculate values of metrics with no column family specified
+          // based on all metrics with cf specified, like this one.
+          final String metricNoCF =
+              CF_NAME_REGEX.matcher(metricName).replaceAll("");
+          putLong(allCfDeltas, metricNoCF,
+              getLong(allCfDeltas, metricNoCF) + delta);
+        }
+
+        Matcher matcher = BLOCK_CATEGORY_REGEX.matcher(metricName);
+        if (matcher.find()) {
+           // Only process per-block-category metrics
+          String metricNoBlockCategory = matcher.replaceAll("");
+          putLong(allBlockCategoryDeltas, metricNoBlockCategory,
+              getLong(allBlockCategoryDeltas, metricNoBlockCategory) + delta);
+        }
+      }
+    }
+
+    StringBuilder errors = new StringBuilder();
+    for (String key : ALL_CF_METRICS.getAllMetricNames()) {
+      long actual = getLong(deltas, key);
+      long expected = getLong(allCfDeltas, key);
+      if (actual != expected) {
+        if (errors.length() > 0)
+          errors.append("\n");
+        errors.append("The all-CF metric " + key + " changed by "
+            + actual + " but the aggregation of per-column-family metrics "
+            + "yields " + expected);
+      }
+    }
+
+    // Verify metrics computed for all block types based on the aggregation
+    // of per-block-type metrics.
+    for (String key : allKeys) {
+      if (BLOCK_CATEGORY_REGEX.matcher(key).find() ||
+          key.contains(ALL_CF_METRICS.getBloomMetricName(false)) ||
+          key.contains(ALL_CF_METRICS.getBloomMetricName(true))){
+        // Skip per-block-category metrics. Also skip bloom filters, because
+        // they are not aggregated per block type.
+        continue;
+      }
+      long actual = getLong(deltas, key);
+      long expected = getLong(allBlockCategoryDeltas, key);
+      if (actual != expected) {
+        if (errors.length() > 0)
+          errors.append("\n");
+        errors.append("The all-block-category metric " + key
+            + " changed by " + actual + " but the aggregation of "
+            + "per-block-category metrics yields " + expected);
+      }
+    }
+
+    if (errors.length() > 0) {
+      throw new AssertionError(errors.toString());
+    }
+  }
+
+}

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=1181972&r1=1181971&r2=1181972&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java Tue Oct 11 17:45:00 2011
@@ -303,55 +303,55 @@ public class HFile {
      * Needs to be constant as the file is being moved to support caching on
      * write.
      */
-     String getName();
+    String getName();
 
-     String getColumnFamilyName();
+    String getColumnFamilyName();
 
-     RawComparator<byte []> getComparator();
+    RawComparator<byte []> getComparator();
 
-     HFileScanner getScanner(boolean cacheBlocks,
-        final boolean pread, final boolean isCompaction);
+    HFileScanner getScanner(boolean cacheBlocks,
+       final boolean pread, final boolean isCompaction);
 
-     ByteBuffer getMetaBlock(String metaBlockName,
-        boolean cacheBlock) throws IOException;
+    ByteBuffer getMetaBlock(String metaBlockName,
+       boolean cacheBlock) throws IOException;
 
-     HFileBlock readBlock(long offset, int onDiskBlockSize,
-         boolean cacheBlock, final boolean pread, final boolean isCompaction)
-         throws IOException;
+    HFileBlock readBlock(long offset, int onDiskBlockSize,
+        boolean cacheBlock, final boolean pread, final boolean isCompaction)
+        throws IOException;
 
-     Map<byte[], byte[]> loadFileInfo() throws IOException;
+    Map<byte[], byte[]> loadFileInfo() throws IOException;
 
-     byte[] getLastKey();
+    byte[] getLastKey();
 
-     byte[] midkey() throws IOException;
+    byte[] midkey() throws IOException;
 
-     long length();
+    long length();
 
-     long getEntries();
+    long getEntries();
 
-     byte[] getFirstKey();
+    byte[] getFirstKey();
 
-     long indexSize();
+    long indexSize();
 
-     byte[] getFirstRowKey();
+    byte[] getFirstRowKey();
 
-     byte[] getLastRowKey();
+    byte[] getLastRowKey();
 
-     FixedFileTrailer getTrailer();
+    FixedFileTrailer getTrailer();
 
-     HFileBlockIndex.BlockIndexReader getDataBlockIndexReader();
+    HFileBlockIndex.BlockIndexReader getDataBlockIndexReader();
 
-     HFileScanner getScanner(boolean cacheBlocks, boolean pread);
+    HFileScanner getScanner(boolean cacheBlocks, boolean pread);
 
-     Compression.Algorithm getCompressionAlgorithm();
+    Compression.Algorithm getCompressionAlgorithm();
 
     /**
      * Retrieves Bloom filter metadata as appropriate for each {@link HFile}
      * version. Knows nothing about how that metadata is structured.
      */
-     DataInput getBloomFilterMetadata() throws IOException;
+    DataInput getBloomFilterMetadata() throws IOException;
 
-     Path getPath();
+    Path getPath();
   }
 
   private static Reader pickReaderVersion(Path path, FSDataInputStream fsdis,

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockInfo.java?rev=1181972&r1=1181971&r2=1181972&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockInfo.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockInfo.java Tue Oct 11 17:45:00 2011
@@ -20,6 +20,7 @@
 package org.apache.hadoop.hbase.io.hfile;
 
 import org.apache.hadoop.hbase.io.hfile.BlockType;
+import org.apache.hadoop.hbase.io.hfile.ColumnFamilyMetrics.ColumnFamilyAware;
 
 /**
  * An interface that exposes methods to retrieve the column type and BlockType
@@ -28,11 +29,7 @@ import org.apache.hadoop.hbase.io.hfile.
  * metrics, for example. Used by implementations of HeapSize, such as
  * {@link HFileBlock}
  */
-public interface HFileBlockInfo {
-  /**
-   * @return Column family name of this cached item.
-   */
-  public String getColumnFamilyName();
+public interface HFileBlockInfo extends ColumnFamilyAware {
 
   /**
    * @return BlockType descriptor of this cached item. Indicates the type of

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java?rev=1181972&r1=1181971&r2=1181972&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java Tue Oct 11 17:45:00 2011
@@ -30,10 +30,10 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
 import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
 import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
 import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.RawComparator;
@@ -220,14 +220,14 @@ public class HFileReaderV1 extends Abstr
     // Per meta key from any given file, synchronize reads for said block
     synchronized (metaBlockIndexReader.getRootBlockKey(block)) {
       metaLoads++;
-      HRegion.incrNumericMetric(this.fsMetaBlockReadCntMetric, 1);
+
       // Check cache for block.  If found return.
       if (blockCache != null) {
         HFileBlock cachedBlock = (HFileBlock) blockCache.getBlock(cacheKey,
             cacheBlock);
         if (cachedBlock != null) {
           cacheHits++;
-          HRegion.incrNumericMetric(this.fsMetaBlockReadCacheHitCntMetric, 1);
+          cfMetrics.updateOnCacheHit(BlockCategory.META, false);
           return cachedBlock.getBufferWithoutHeader();
         }
         // Cache Miss, please load.
@@ -240,9 +240,9 @@ public class HFileReaderV1 extends Abstr
       hfileBlock.expectType(BlockType.META);
 
       long delta = System.currentTimeMillis() - now;
-      HRegion.incrTimeVaryingMetric(this.fsReadTimeMetric, delta);
       HFile.readTime += delta;
       HFile.readOps++;
+      cfMetrics.updateOnCacheMiss(BlockCategory.META, false, delta);
 
       // Cache the block
       if (cacheBlock && blockCache != null) {
@@ -283,27 +283,13 @@ public class HFileReaderV1 extends Abstr
     synchronized (dataBlockIndexReader.getRootBlockKey(block)) {
       blockLoads++;
 
-      if (isCompaction) {
-        HRegion.incrNumericMetric(this.compactionBlockReadCntMetric, 1);
-      } else {
-        HRegion.incrNumericMetric(this.fsBlockReadCntMetric, 1);
-      }
-
       // Check cache for block.  If found return.
       if (blockCache != null) {
         HFileBlock cachedBlock = (HFileBlock) blockCache.getBlock(cacheKey,
             cacheBlock);
         if (cachedBlock != null) {
           cacheHits++;
-
-          if (isCompaction) {
-            HRegion.incrNumericMetric(
-                this.compactionBlockReadCacheHitCntMetric, 1);
-          } else {
-            HRegion.incrNumericMetric(
-                this.fsBlockReadCacheHitCntMetric, 1);
-          }
-
+          cfMetrics.updateOnCacheHit(BlockCategory.DATA, isCompaction);
           return cachedBlock.getBufferWithoutHeader();
         }
         // Carry on, please load.
@@ -332,11 +318,7 @@ public class HFileReaderV1 extends Abstr
       long delta = System.currentTimeMillis() - now;
       HFile.readTime += delta;
       HFile.readOps++;
-      if (isCompaction) {
-        HRegion.incrTimeVaryingMetric(this.compactionReadTimeMetric, delta);
-      } else {
-        HRegion.incrTimeVaryingMetric(this.fsReadTimeMetric, delta);
-      }
+      cfMetrics.updateOnCacheMiss(BlockCategory.DATA, isCompaction, delta);
 
       // Cache the block
       if (cacheBlock && blockCache != null) {

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java?rev=1181972&r1=1181971&r2=1181972&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java Tue Oct 11 17:45:00 2011
@@ -30,8 +30,8 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
 import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.IdLock;
 
@@ -169,7 +169,6 @@ public class HFileReaderV2 extends Abstr
     // single-level.
     synchronized (metaBlockIndexReader.getRootBlockKey(block)) {
       metaLoads++;
-      HRegion.incrNumericMetric(fsMetaBlockReadCntMetric, 1);
 
       // Check cache for block. If found return.
       long metaBlockOffset = metaBlockIndexReader.getRootBlockOffset(block);
@@ -182,7 +181,7 @@ public class HFileReaderV2 extends Abstr
           // Return a distinct 'shallow copy' of the block,
           // so pos does not get messed by the scanner
           cacheHits++;
-          HRegion.incrNumericMetric(fsMetaBlockReadCacheHitCntMetric, 1);
+          cfMetrics.updateOnCacheHit(BlockCategory.META, false);
           return cachedBlock.getBufferWithoutHeader();
         }
         // Cache Miss, please load.
@@ -191,12 +190,11 @@ public class HFileReaderV2 extends Abstr
       HFileBlock metaBlock = fsBlockReader.readBlockData(metaBlockOffset,
           blockSize, -1, true);
       metaBlock.setColumnFamilyName(this.getColumnFamilyName());
-      HRegion.incrNumericMetric(fsMetaBlockReadCacheMissCntMetric, 1);
 
       long delta = System.currentTimeMillis() - now;
-      HRegion.incrTimeVaryingMetric(fsReadTimeMetric, delta);
       HFile.readTime += delta;
       HFile.readOps++;
+      cfMetrics.updateOnCacheMiss(BlockCategory.META, false, delta);
 
       // Cache the block
       if (cacheBlock && blockCache != null) {
@@ -258,30 +256,15 @@ public class HFileReaderV2 extends Abstr
     try {
       blockLoads++;
 
-      if (isCompaction) {
-        HRegion.incrNumericMetric(compactionBlockReadCntMetric, 1);
-      } else {
-        HRegion.incrNumericMetric(fsBlockReadCntMetric, 1);
-      }
-
       // Check cache for block. If found return.
       if (blockCache != null) {
         HFileBlock cachedBlock = (HFileBlock) blockCache.getBlock(cacheKey,
             cacheBlock);
         if (cachedBlock != null) {
+          BlockCategory blockCategory =
+              cachedBlock.getBlockType().getCategory();
           cacheHits++;
-          if (isCompaction) {
-            HRegion.incrNumericMetric(
-                compactionBlockReadCacheHitCntMetric, 1);
-            HRegion.incrNumericMetric("bt." +
-                cachedBlock.getBlockType().getMetricName() +
-                ".compactionBlockReadCacheHitCnt", 1);
-          } else {
-            HRegion.incrNumericMetric(fsBlockReadCacheHitCntMetric, 1);
-            HRegion.incrNumericMetric("bt." +
-                cachedBlock.getBlockType().getMetricName() +
-                ".fsBlockReadCacheHitCnt", 1);
-          }
+          cfMetrics.updateOnCacheHit(blockCategory, isCompaction);
           return cachedBlock;
         }
         // Carry on, please load.
@@ -292,20 +275,12 @@ public class HFileReaderV2 extends Abstr
       HFileBlock dataBlock = fsBlockReader.readBlockData(dataBlockOffset,
           onDiskBlockSize, -1, pread);
       dataBlock.setColumnFamilyName(this.getColumnFamilyName());
-      HRegion.incrNumericMetric("bt." + dataBlock.getBlockType().getMetricName()
-          + ".blockReadCacheMissCnt", 1);
+      BlockCategory blockCategory = dataBlock.getBlockType().getCategory();
 
       long delta = System.currentTimeMillis() - now;
       HFile.readTime += delta;
       HFile.readOps++;
-      if (isCompaction) {
-        HRegion.incrNumericMetric(
-            this.compactionBlockReadCacheMissCntMetric, 1);
-        HRegion.incrTimeVaryingMetric(compactionReadTimeMetric, delta);
-      } else {
-        HRegion.incrNumericMetric(this.fsBlockReadCacheMissCntMetric, 1);
-        HRegion.incrTimeVaryingMetric(fsReadTimeMetric, delta);
-      }
+      cfMetrics.updateOnCacheMiss(blockCategory, isCompaction, delta);
 
       // Cache the block
       if (cacheBlock && blockCache != null) {

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java?rev=1181972&r1=1181971&r2=1181972&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java Tue Oct 11 17:45:00 2011
@@ -31,9 +31,11 @@ import java.util.concurrent.TimeUnit;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
+import org.apache.hadoop.hbase.io.hfile.ColumnFamilyMetrics;
+import org.apache.hadoop.hbase.io.hfile.ColumnFamilyMetrics.BlockMetricType;
 import org.apache.hadoop.hbase.io.hfile.HFileBlockInfo;
 import org.apache.hadoop.hbase.io.HeapSize;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
 
@@ -266,9 +268,7 @@ public class LruBlockCache implements Bl
    * Cache the block with the specified name and buffer.
    * <p>
    * It is assumed this will NEVER be called on an already cached block.  If
-   * that is done, it is assumed that you are reinserting the same exact
-   * block due to a race condition and will update the buffer but not modify
-   * the size of the cache.
+   * that is done, an exception will be thrown.
    * @param blockName block name
    * @param buf block buffer
    * @param inMemory if block is in-memory
@@ -315,30 +315,11 @@ public class LruBlockCache implements Bl
       heapsize *= -1;
     }
     if (cb.getBuffer() instanceof HFileBlockInfo) {
-      HFileBlockInfo cb_hfbi = (HFileBlockInfo) cb.getBuffer();
-      // CF total size
-      HRegion.incrNumericPersistentMetric(cb_hfbi.getColumnFamilyName()
-          + ".blockCacheSize", heapsize);
-      // BlockType total size
-      HRegion.incrNumericPersistentMetric("bt."
-          + cb_hfbi.getBlockType().getMetricName() + ".blockCacheSize",
-          heapsize);
-      if (evict) {
-        // CF number evicted
-        HRegion.incrNumericMetric(cb_hfbi.getColumnFamilyName()
-            + ".blockCacheNumEvicted", 1);
-        // BlockType number evicted
-        HRegion.incrNumericMetric("bt." +
-            cb_hfbi.getBlockType().getMetricName() + ".blockCacheNumEvicted",
-            1);
-      } else {
-        // CF number cached
-        HRegion.incrNumericMetric(cb_hfbi.getColumnFamilyName()
-            + ".blockCacheNumCached", 1);
-        // BlockType number cached
-        HRegion.incrNumericMetric("bt." +
-            cb_hfbi.getBlockType().getMetricName() + ".blockCacheNumCached", 1);
-      }
+      final HFileBlockInfo blockInfo = (HFileBlockInfo) cb.getBuffer();
+      ColumnFamilyMetrics cfMetrics = ColumnFamilyMetrics.getInstance(
+          blockInfo.getColumnFamilyName());
+      cfMetrics.updateBlockCacheMetrics(blockInfo.getBlockType().getCategory(),
+          heapsize, evict);
     }
     return size.addAndGet(heapsize);
   }

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1181972&r1=1181971&r2=1181972&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Tue Oct 11 17:45:00 2011
@@ -349,7 +349,7 @@ public class HRegion implements HeapSize
     oldVal.getSecond().incrementAndGet(); // increment ops by 1
   }
 
-  static long getNumericMetric(String key) {
+  public static long getNumericMetric(String key) {
     AtomicLong m = numericMetrics.get(key);
     if (m == null)
       return 0;

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=1181972&r1=1181971&r2=1181972&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Tue Oct 11 17:45:00 2011
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.client.Sc
 import org.apache.hadoop.hbase.io.HalfStoreFileReader;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
+import org.apache.hadoop.hbase.io.hfile.ColumnFamilyMetrics;
 import org.apache.hadoop.hbase.io.hfile.Compression;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
@@ -961,9 +962,8 @@ public class StoreFile {
     private final HFile.Reader reader;
     protected TimeRangeTracker timeRangeTracker = null;
     protected long sequenceID = -1;
-    private final String bloomAccessedMetric;
-    private final String bloomSkippedMetric;
     private byte[] lastBloomKey;
+    private final ColumnFamilyMetrics cfMetrics;
 
     public Reader(FileSystem fs, Path path, BlockCache blockCache,
         boolean inMemory, boolean evictOnClose)
@@ -971,9 +971,8 @@ public class StoreFile {
       reader = HFile.createReader(fs, path, blockCache, inMemory, evictOnClose);
 
       // prepare the text (key) for the metrics
-      bloomAccessedMetric = reader.getColumnFamilyName()
-          + ".keyMaybeInBloomCnt";
-      bloomSkippedMetric = reader.getColumnFamilyName() + ".keyNotInBloomCnt";
+      cfMetrics = ColumnFamilyMetrics.getInstance(
+          reader.getColumnFamilyName());
       bloomFilterType = BloomType.NONE;
     }
 
@@ -982,8 +981,7 @@ public class StoreFile {
      */
     Reader() {
       this.reader = null;
-      bloomAccessedMetric = "";
-      bloomSkippedMetric = "";
+      this.cfMetrics = ColumnFamilyMetrics.getInstance(null);
     }
 
     public RawComparator<byte []> getComparator() {
@@ -1208,10 +1206,7 @@ public class StoreFile {
                 && bloomFilter.contains(key, 0, key.length, bloom);
           }
 
-          if (exists)
-            HRegion.incrNumericMetric(bloomAccessedMetric, 1);
-          else
-            HRegion.incrNumericMetric(bloomSkippedMetric, 1);
+          cfMetrics.updateBloomMetrics(exists);
           return exists;
         }
       } catch (IOException e) {

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestColumnFamilyMetrics.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestColumnFamilyMetrics.java?rev=1181972&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestColumnFamilyMetrics.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestColumnFamilyMetrics.java Tue Oct 11 17:45:00 2011
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2011 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.io.hfile;
+
+import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
+import static org.apache.hadoop.hbase.io.hfile.ColumnFamilyMetrics.BlockMetricType;
+import static org.apache.hadoop.hbase.io.hfile.ColumnFamilyMetrics.ALL_CF_METRICS;
+import static org.junit.Assert.*;
+
+import org.junit.Test;
+
+public class TestColumnFamilyMetrics {
+
+  @Test
+  public void testNaming() {
+    final String cfName = "myColumnFamily";
+    final String cfPrefix = "cf." + cfName + ".";
+    ColumnFamilyMetrics cfMetrics = ColumnFamilyMetrics.getInstance(cfName);
+
+    // fsReadTimeMetric
+    assertEquals(cfPrefix + "fsRead", cfMetrics.getBlockMetricName(
+        BlockCategory.ALL_CATEGORIES, false, BlockMetricType.READ_TIME));
+
+    // compactionReadTimeMetric
+    assertEquals(cfPrefix + "compactionRead", cfMetrics.getBlockMetricName(
+        BlockCategory.ALL_CATEGORIES, true, BlockMetricType.READ_TIME));
+
+    // fsBlockReadCntMetric
+    assertEquals(cfPrefix + "fsBlockReadCnt", cfMetrics.getBlockMetricName(
+        BlockCategory.ALL_CATEGORIES, false, BlockMetricType.READ_COUNT));
+
+    // fsBlockReadCacheHitCntMetric
+    assertEquals(cfPrefix + "fsBlockReadCacheHitCnt",
+        cfMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, false,
+            BlockMetricType.CACHE_HIT));
+
+    // fsBlockReadCacheMissCntMetric
+    assertEquals(cfPrefix + "fsBlockReadCacheMissCnt",
+        cfMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, false,
+            BlockMetricType.CACHE_MISS));
+
+    // compactionBlockReadCntMetric
+    assertEquals(cfPrefix + "compactionBlockReadCnt",
+        cfMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, true,
+            BlockMetricType.READ_COUNT));
+
+    // compactionBlockReadCacheHitCntMetric
+    assertEquals(cfPrefix + "compactionBlockReadCacheHitCnt",
+        cfMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, true,
+            BlockMetricType.CACHE_HIT));
+
+    // compactionBlockReadCacheMissCntMetric
+    assertEquals(cfPrefix + "compactionBlockReadCacheMissCnt",
+        cfMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, true,
+            BlockMetricType.CACHE_MISS));
+
+    // fsMetaBlockReadCntMetric
+    assertEquals("fsMetaBlockReadCnt", ALL_CF_METRICS.getBlockMetricName(
+        BlockCategory.META, false, BlockMetricType.READ_COUNT));
+
+    // fsMetaBlockReadCacheHitCntMetric
+    assertEquals("fsMetaBlockReadCacheHitCnt",
+        ALL_CF_METRICS.getBlockMetricName(BlockCategory.META, false,
+            BlockMetricType.CACHE_HIT));
+
+    // fsMetaBlockReadCacheMissCntMetric
+    assertEquals("fsMetaBlockReadCacheMissCnt",
+        ALL_CF_METRICS.getBlockMetricName(BlockCategory.META, false,
+            BlockMetricType.CACHE_MISS));
+
+    // Per-(column family, block type) statistics.
+    assertEquals(cfPrefix + "bt.Index.fsBlockReadCnt",
+        cfMetrics.getBlockMetricName(BlockCategory.INDEX, false,
+            BlockMetricType.READ_COUNT));
+
+    assertEquals(cfPrefix + "bt.Data.compactionBlockReadCacheHitCnt",
+        cfMetrics.getBlockMetricName(BlockCategory.DATA, true,
+            BlockMetricType.CACHE_HIT));
+
+    // A special case for Meta blocks
+    assertEquals(cfPrefix + "compactionMetaBlockReadCacheHitCnt",
+        cfMetrics.getBlockMetricName(BlockCategory.META, true,
+            BlockMetricType.CACHE_HIT));
+
+    // Cache metrics
+    assertEquals(cfPrefix + "blockCacheSize", cfMetrics.getBlockMetricName(
+        BlockCategory.ALL_CATEGORIES, false, BlockMetricType.CACHE_SIZE));
+
+    assertEquals(cfPrefix + "bt.Index.blockCacheNumEvicted",
+        cfMetrics.getBlockMetricName(BlockCategory.INDEX, false,
+            BlockMetricType.EVICTED));
+
+    assertEquals("bt.Data.blockCacheNumCached",
+        ALL_CF_METRICS.getBlockMetricName(BlockCategory.DATA, false,
+            BlockMetricType.CACHED));
+
+    assertEquals("blockCacheNumCached", ALL_CF_METRICS.getBlockMetricName(
+        BlockCategory.ALL_CATEGORIES, false, BlockMetricType.CACHED));
+
+    // "Non-compaction aware" metrics
+    try {
+      ALL_CF_METRICS.getBlockMetricName(BlockCategory.ALL_CATEGORIES, true,
+          BlockMetricType.CACHE_SIZE);
+      fail("Exception expected");
+    } catch (IllegalArgumentException ex) {
+    }
+
+    // Bloom metrics
+    assertEquals("keyMaybeInBloomCnt", ALL_CF_METRICS.getBloomMetricName(true));
+    assertEquals(cfPrefix + "keyNotInBloomCnt",
+        cfMetrics.getBloomMetricName(false));
+
+    cfMetrics.printMetricNames();
+  }
+
+}

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java?rev=1181972&r1=1181971&r2=1181972&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java Tue Oct 11 17:45:00 2011
@@ -24,6 +24,7 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -53,6 +54,7 @@ public class TestHFile extends HBaseTest
     HBaseTestingUtility.getTestDir("TestHFile").toString();
   private final int minBlockSize = 512;
   private static String localFormatter = "%010d";
+  private Map<String, Long> startingMetrics;
 
   /**
    * Test empty HFile.
@@ -69,6 +71,18 @@ public class TestHFile extends HBaseTest
     assertNull(r.getLastKey());
   }
 
+  @Override
+  public void setUp() throws Exception {
+    startingMetrics = ColumnFamilyMetrics.getMetricsSnapshot();
+    super.setUp();
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    super.tearDown();
+    ColumnFamilyMetrics.validateMetricChanges(startingMetrics);
+  }
+
   // write some records into the tfile
   // write them twice
   private int writeSomeRecords(Writer writer, int start, int n)

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderV1.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderV1.java?rev=1181972&r1=1181971&r2=1181972&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderV1.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderV1.java Tue Oct 11 17:45:00 2011
@@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.io.hfile
 
 import java.io.IOException;
 import java.net.URL;
+import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -29,6 +30,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.util.Bytes;
 
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -41,15 +43,22 @@ public class TestHFileReaderV1 {
 
   private Configuration conf;
   private FileSystem fs;
+  private Map<String, Long> startingMetrics;
 
   private static final int N = 1000;
 
   @Before
   public void setUp() throws IOException {
+    startingMetrics = ColumnFamilyMetrics.getMetricsSnapshot();
     conf = TEST_UTIL.getConfiguration();
     fs = FileSystem.get(conf);
   }
 
+  @After
+  public void tearDown() throws Exception {
+    ColumnFamilyMetrics.validateMetricChanges(startingMetrics);
+  }
+
   @Test
   public void testReadingExistingVersion1HFile() throws IOException {
     URL url = TestHFileReaderV1.class.getResource(

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java?rev=1181972&r1=1181971&r2=1181972&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java Tue Oct 11 17:45:00 2011
@@ -19,6 +19,7 @@
  */
 package org.apache.hadoop.hbase.io.hfile;
 
+import java.util.Map;
 import java.util.Random;
 
 import org.apache.hadoop.hbase.io.HeapSize;
@@ -35,6 +36,20 @@ import junit.framework.TestCase;
  */
 public class TestLruBlockCache extends TestCase {
 
+  private Map<String, Long> startingMetrics;
+
+  @Override
+  public void setUp() throws Exception {
+    startingMetrics = ColumnFamilyMetrics.getMetricsSnapshot();
+    super.setUp();
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    super.tearDown();
+    ColumnFamilyMetrics.validateMetricChanges(startingMetrics);
+  }
+
   public void testBackgroundEvictionThread() throws Exception {
 
     long maxSize = 100000;

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java?rev=1181972&r1=1181971&r2=1181972&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java Tue Oct 11 17:45:00 2011
@@ -57,6 +57,8 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.hfile.AbstractHFileReader;
+import org.apache.hadoop.hbase.io.hfile.ColumnFamilyMetrics;
+import org.apache.hadoop.hbase.io.hfile.BlockType;
 import org.apache.hadoop.hbase.io.hfile.Compression;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileBlock;
@@ -589,7 +591,7 @@ public class HFileReadWriteTest {
     private volatile boolean stopRequested;
     private volatile Thread thread;
     private Set<String> fsBlockReadMetrics = new TreeSet<String>();
-    private boolean isRead;
+    private boolean isCompaction;
 
     public StatisticsPrinter(HFile.Reader reader) {
       this(new HFile.Reader[] { reader });
@@ -612,11 +614,12 @@ public class HFileReadWriteTest {
     }
 
     public StatisticsPrinter(HFile.Reader[] readers) {
-      isRead = workload == Workload.RANDOM_READS;
+      isCompaction = workload == Workload.MERGE;
       for (HFile.Reader reader : readers) {
-        AbstractHFileReader r = (AbstractHFileReader) reader;
-        fsBlockReadMetrics.add(isRead ? r.fsBlockReadCntMetric
-            : r.compactionBlockReadCntMetric);
+        fsBlockReadMetrics.add(
+            ColumnFamilyMetrics.ALL_CF_METRICS.getBlockMetricName(
+                BlockType.BlockCategory.ALL_CATEGORIES, isCompaction,
+                ColumnFamilyMetrics.BlockMetricType.READ_COUNT));
       }
 
       LOG.info("Using the following metrics for the number of data blocks " +

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java?rev=1181972&r1=1181971&r2=1181972&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java Tue Oct 11 17:45:00 2011
@@ -44,7 +44,10 @@ import org.apache.hadoop.hbase.KeyValueT
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.hfile.ColumnFamilyMetrics;
+import org.apache.hadoop.hbase.io.hfile.BlockType;
 import org.apache.hadoop.hbase.io.hfile.Compression;
+import org.apache.hadoop.hbase.io.hfile.ColumnFamilyMetrics.BlockMetricType;
 import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.Test;
@@ -62,9 +65,14 @@ public class TestMultiColumnScanner {
       LogFactory.getLog(TestMultiColumnScanner.class);
 
   private static final String TABLE_NAME = "TestMultiColumnScanner";
+  private static final int MAX_VERSIONS = 50;
+
+  // These fields are used in TestScanWithBloomError
   static final String FAMILY = "CF";
   static final byte[] FAMILY_BYTES = Bytes.toBytes(FAMILY);
-  static final int MAX_VERSIONS = 50;
+
+  private final ColumnFamilyMetrics cfMetrics =
+      ColumnFamilyMetrics.getInstance(FAMILY);
 
   /**
    * The size of the column qualifier set used. Increasing this parameter
@@ -135,12 +143,13 @@ public class TestMultiColumnScanner {
   }
 
   private long getBlocksRead() {
-    return HRegion.getNumericMetric("cf." + FAMILY + ".fsBlockReadCnt");
+    return HRegion.getNumericMetric(cfMetrics.getBlockMetricName(
+        BlockType.BlockCategory.ALL_CATEGORIES, false, BlockMetricType.READ_COUNT));
   }
 
   private long getCacheHits() {
-    return HRegion.getNumericMetric("cf." + FAMILY +
-        ".fsBlockReadCacheHitCnt");
+    return HRegion.getNumericMetric(cfMetrics.getBlockMetricName(
+        BlockType.BlockCategory.ALL_CATEGORIES, false, BlockMetricType.CACHE_HIT));
   }
 
   private void saveBlockStats() {

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java?rev=1181972&r1=1181971&r2=1181972&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java Tue Oct 11 17:45:00 2011
@@ -26,6 +26,8 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
 import java.util.TreeSet;
 
 import org.apache.commons.logging.Log;
@@ -40,6 +42,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.Reference.Range;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
+import org.apache.hadoop.hbase.io.hfile.ColumnFamilyMetrics;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.io.hfile.LruBlockCache.CacheStats;
@@ -58,9 +61,11 @@ import com.google.common.collect.Lists;
 public class TestStoreFile extends HBaseTestCase {
   static final Log LOG = LogFactory.getLog(TestStoreFile.class);
   private MiniDFSCluster cluster;
+  private Map<String, Long> startingMetrics;
 
   @Override
   public void setUp() throws Exception {
+    startingMetrics = ColumnFamilyMetrics.getMetricsSnapshot();
     try {
       this.cluster = new MiniDFSCluster(this.conf, 2, true, (String[])null);
       // Set the hbase.rootdir to be the home directory in mini dfs.
@@ -78,6 +83,7 @@ public class TestStoreFile extends HBase
     shutdownDfs(cluster);
     // ReflectionUtils.printThreadInfo(new PrintWriter(System.out),
     //  "Temporary end-of-test thread dump debugging HADOOP-2040: " + getName());
+    ColumnFamilyMetrics.validateMetricChanges(startingMetrics);
   }
 
   /**