You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ch...@apache.org on 2016/02/05 03:58:19 UTC

hbase git commit: HBASE-15197 Expose filtered read requests metric to metrics framework and Web UI (Eungsop Yoo)

Repository: hbase
Updated Branches:
  refs/heads/master b80325fb1 -> 8f20bc748


HBASE-15197 Expose filtered read requests metric to metrics framework and Web UI (Eungsop Yoo)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8f20bc74
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8f20bc74
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8f20bc74

Branch: refs/heads/master
Commit: 8f20bc748de60f8da3ab7f66c6a198515d53de35
Parents: b80325f
Author: chenheng <ch...@apache.org>
Authored: Fri Feb 5 10:57:14 2016 +0800
Committer: chenheng <ch...@apache.org>
Committed: Fri Feb 5 10:57:14 2016 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/RegionLoad.java     |   7 +
 .../org/apache/hadoop/hbase/ServerLoad.java     |   8 +
 .../regionserver/MetricsRegionServerSource.java |   3 +
 .../MetricsRegionServerWrapper.java             |   5 +
 .../regionserver/MetricsRegionWrapper.java      |   5 +
 .../MetricsRegionServerSourceImpl.java          |   2 +
 .../regionserver/MetricsRegionSourceImpl.java   |   4 +
 .../TestMetricsRegionSourceImpl.java            |   5 +
 .../protobuf/generated/ClusterStatusProtos.java | 191 ++++++++--
 .../src/main/protobuf/ClusterStatus.proto       |   3 +
 .../tmpl/master/RegionServerListTmpl.jamon      |   2 +
 .../tmpl/regionserver/RegionListTmpl.jamon      |   2 +
 .../tmpl/regionserver/ServerMetricsTmpl.jamon   |   2 +
 .../hadoop/hbase/regionserver/HRegion.java      |  13 +-
 .../hbase/regionserver/HRegionServer.java       |   1 +
 .../MetricsRegionServerWrapperImpl.java         |  11 +-
 .../regionserver/MetricsRegionWrapperImpl.java  |   5 +
 .../hadoop/hbase/regionserver/Region.java       |   3 +
 .../hadoop/hbase/TestRegionServerMetrics.java   | 379 +++++++++++++++++++
 .../org/apache/hadoop/hbase/TestServerLoad.java |   4 +
 .../MetricsRegionServerWrapperStub.java         |   5 +
 .../regionserver/MetricsRegionWrapperStub.java  |   5 +
 .../hbase/regionserver/TestMetricsRegion.java   |   8 +
 .../regionserver/TestMetricsRegionServer.java   |   1 +
 24 files changed, 638 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
index a6e846e..5bf2ec7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
@@ -106,6 +106,13 @@ public class RegionLoad {
   }
 
   /**
+   * @return the number of filtered read requests made to region
+   */
+  public long getFilteredReadRequestsCount() {
+    return regionLoadPB.getFilteredReadRequestsCount();
+  }
+
+  /**
    * @return the number of write requests made to region
    */
   public long getWriteRequestsCount() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
index 60fae85..1ddcc20 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
@@ -49,6 +49,7 @@ public class ServerLoad {
   private int memstoreSizeMB = 0;
   private int storefileIndexSizeMB = 0;
   private long readRequestsCount = 0;
+  private long filteredReadRequestsCount = 0;
   private long writeRequestsCount = 0;
   private int rootIndexSizeKB = 0;
   private int totalStaticIndexSizeKB = 0;
@@ -66,6 +67,7 @@ public class ServerLoad {
       memstoreSizeMB += rl.getMemstoreSizeMB();
       storefileIndexSizeMB += rl.getStorefileIndexSizeMB();
       readRequestsCount += rl.getReadRequestsCount();
+      filteredReadRequestsCount += rl.getFilteredReadRequestsCount();
       writeRequestsCount += rl.getWriteRequestsCount();
       rootIndexSizeKB += rl.getRootIndexSizeKB();
       totalStaticIndexSizeKB += rl.getTotalStaticIndexSizeKB();
@@ -145,6 +147,10 @@ public class ServerLoad {
     return readRequestsCount;
   }
 
+  public long getFilteredReadRequestsCount() {
+    return filteredReadRequestsCount;
+  }
+
   public long getWriteRequestsCount() {
     return writeRequestsCount;
   }
@@ -297,6 +303,8 @@ public class ServerLoad {
         Strings.appendKeyValue(sb, "storefileIndexSizeMB",
           Integer.valueOf(this.storefileIndexSizeMB));
     sb = Strings.appendKeyValue(sb, "readRequestsCount", Long.valueOf(this.readRequestsCount));
+    sb = Strings.appendKeyValue(sb, "filteredReadRequestsCount",
+      Long.valueOf(this.filteredReadRequestsCount));
     sb = Strings.appendKeyValue(sb, "writeRequestsCount", Long.valueOf(this.writeRequestsCount));
     sb = Strings.appendKeyValue(sb, "rootIndexSizeKB", Integer.valueOf(this.rootIndexSizeKB));
     sb =

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index ee0217a..e4df1c0 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -162,6 +162,9 @@ public interface MetricsRegionServerSource extends BaseSource {
   String READ_REQUEST_COUNT = "readRequestCount";
   String READ_REQUEST_COUNT_DESC =
       "Number of read requests this region server has answered.";
+  String FILTERED_READ_REQUEST_COUNT = "filteredReadRequestCount";
+  String FILTERED_READ_REQUEST_COUNT_DESC =
+    "Number of filtered read requests this region server has answered.";
   String WRITE_REQUEST_COUNT = "writeRequestCount";
   String WRITE_REQUEST_COUNT_DESC =
       "Number of mutation requests this region server has answered.";

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
index 02dec8d..07c3773 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
@@ -113,6 +113,11 @@ public interface MetricsRegionServerWrapper {
   long getReadRequestsCount();
 
   /**
+   * Get the number of filtered read requests to regions hosted on this region server.
+   */
+  long getFilteredReadRequestsCount();
+
+  /**
    * Get the number of write requests to regions hosted on this region server.
    */
   long getWriteRequestsCount();

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
index 0997f7c..20ca9bd 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
@@ -70,6 +70,11 @@ public interface MetricsRegionWrapper {
   long getReadRequestCount();
 
   /**
+   * Get the total number of filtered read requests that have been issued against this region
+   */
+  long getFilteredReadRequestCount();
+
+  /**
    * Get the total number of mutations that have been issued against this region.
    */
   long getWriteRequestCount();

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
index f40811c..42476a7 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
@@ -201,6 +201,8 @@ public class MetricsRegionServerSourceImpl
               rsWrap.getTotalRequestCount())
           .addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC),
               rsWrap.getReadRequestsCount())
+          .addCounter(Interns.info(FILTERED_READ_REQUEST_COUNT, FILTERED_READ_REQUEST_COUNT_DESC),
+              rsWrap.getFilteredReadRequestsCount())
           .addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC),
               rsWrap.getWriteRequestsCount())
           .addCounter(Interns.info(CHECK_MUTATE_FAILED_COUNT, CHECK_MUTATE_FAILED_COUNT_DESC),

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
index 1df72d5..fab6b51 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
@@ -237,6 +237,10 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource {
               MetricsRegionServerSource.READ_REQUEST_COUNT_DESC),
           this.regionWrapper.getReadRequestCount());
       mrb.addCounter(Interns.info(
+              regionNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT,
+              MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC),
+          this.regionWrapper.getFilteredReadRequestCount());
+      mrb.addCounter(Interns.info(
               regionNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT,
               MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC),
           this.regionWrapper.getWriteRequestCount());

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
index 3088260..19624aa 100644
--- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
+++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
@@ -107,6 +107,11 @@ public class TestMetricsRegionSourceImpl {
     }
 
     @Override
+    public long getFilteredReadRequestCount() {
+      return 0;
+    }
+
+    @Override
     public long getWriteRequestCount() {
       return 0;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
index 5fd4e18..048f5cf 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
@@ -3613,6 +3613,24 @@ public final class ClusterStatusProtos {
      */
     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreCompleteSequenceIdOrBuilder(
         int index);
+
+    // optional uint64 filtered_read_requests_count = 19;
+    /**
+     * <code>optional uint64 filtered_read_requests_count = 19;</code>
+     *
+     * <pre>
+     ** the current total filtered read requests made to region 
+     * </pre>
+     */
+    boolean hasFilteredReadRequestsCount();
+    /**
+     * <code>optional uint64 filtered_read_requests_count = 19;</code>
+     *
+     * <pre>
+     ** the current total filtered read requests made to region 
+     * </pre>
+     */
+    long getFilteredReadRequestsCount();
   }
   /**
    * Protobuf type {@code hbase.pb.RegionLoad}
@@ -3766,6 +3784,11 @@ public final class ClusterStatusProtos {
               storeCompleteSequenceId_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.PARSER, extensionRegistry));
               break;
             }
+            case 152: {
+              bitField0_ |= 0x00020000;
+              filteredReadRequestsCount_ = input.readUInt64();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -4283,6 +4306,30 @@ public final class ClusterStatusProtos {
       return storeCompleteSequenceId_.get(index);
     }
 
+    // optional uint64 filtered_read_requests_count = 19;
+    public static final int FILTERED_READ_REQUESTS_COUNT_FIELD_NUMBER = 19;
+    private long filteredReadRequestsCount_;
+    /**
+     * <code>optional uint64 filtered_read_requests_count = 19;</code>
+     *
+     * <pre>
+     ** the current total filtered read requests made to region 
+     * </pre>
+     */
+    public boolean hasFilteredReadRequestsCount() {
+      return ((bitField0_ & 0x00020000) == 0x00020000);
+    }
+    /**
+     * <code>optional uint64 filtered_read_requests_count = 19;</code>
+     *
+     * <pre>
+     ** the current total filtered read requests made to region 
+     * </pre>
+     */
+    public long getFilteredReadRequestsCount() {
+      return filteredReadRequestsCount_;
+    }
+
     private void initFields() {
       regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
       stores_ = 0;
@@ -4302,6 +4349,7 @@ public final class ClusterStatusProtos {
       dataLocality_ = 0F;
       lastMajorCompactionTs_ = 0L;
       storeCompleteSequenceId_ = java.util.Collections.emptyList();
+      filteredReadRequestsCount_ = 0L;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -4383,6 +4431,9 @@ public final class ClusterStatusProtos {
       for (int i = 0; i < storeCompleteSequenceId_.size(); i++) {
         output.writeMessage(18, storeCompleteSequenceId_.get(i));
       }
+      if (((bitField0_ & 0x00020000) == 0x00020000)) {
+        output.writeUInt64(19, filteredReadRequestsCount_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -4464,6 +4515,10 @@ public final class ClusterStatusProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeMessageSize(18, storeCompleteSequenceId_.get(i));
       }
+      if (((bitField0_ & 0x00020000) == 0x00020000)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(19, filteredReadRequestsCount_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -4573,6 +4628,11 @@ public final class ClusterStatusProtos {
       }
       result = result && getStoreCompleteSequenceIdList()
           .equals(other.getStoreCompleteSequenceIdList());
+      result = result && (hasFilteredReadRequestsCount() == other.hasFilteredReadRequestsCount());
+      if (hasFilteredReadRequestsCount()) {
+        result = result && (getFilteredReadRequestsCount()
+            == other.getFilteredReadRequestsCount());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -4659,6 +4719,10 @@ public final class ClusterStatusProtos {
         hash = (37 * hash) + STORE_COMPLETE_SEQUENCE_ID_FIELD_NUMBER;
         hash = (53 * hash) + getStoreCompleteSequenceIdList().hashCode();
       }
+      if (hasFilteredReadRequestsCount()) {
+        hash = (37 * hash) + FILTERED_READ_REQUESTS_COUNT_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getFilteredReadRequestsCount());
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -4814,6 +4878,8 @@ public final class ClusterStatusProtos {
         } else {
           storeCompleteSequenceIdBuilder_.clear();
         }
+        filteredReadRequestsCount_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00040000);
         return this;
       }
 
@@ -4923,6 +4989,10 @@ public final class ClusterStatusProtos {
         } else {
           result.storeCompleteSequenceId_ = storeCompleteSequenceIdBuilder_.build();
         }
+        if (((from_bitField0_ & 0x00040000) == 0x00040000)) {
+          to_bitField0_ |= 0x00020000;
+        }
+        result.filteredReadRequestsCount_ = filteredReadRequestsCount_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -5016,6 +5086,9 @@ public final class ClusterStatusProtos {
             }
           }
         }
+        if (other.hasFilteredReadRequestsCount()) {
+          setFilteredReadRequestsCount(other.getFilteredReadRequestsCount());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -6306,6 +6379,55 @@ public final class ClusterStatusProtos {
         return storeCompleteSequenceIdBuilder_;
       }
 
+      // optional uint64 filtered_read_requests_count = 19;
+      private long filteredReadRequestsCount_ ;
+      /**
+       * <code>optional uint64 filtered_read_requests_count = 19;</code>
+       *
+       * <pre>
+       ** the current total filtered read requests made to region 
+       * </pre>
+       */
+      public boolean hasFilteredReadRequestsCount() {
+        return ((bitField0_ & 0x00040000) == 0x00040000);
+      }
+      /**
+       * <code>optional uint64 filtered_read_requests_count = 19;</code>
+       *
+       * <pre>
+       ** the current total filtered read requests made to region 
+       * </pre>
+       */
+      public long getFilteredReadRequestsCount() {
+        return filteredReadRequestsCount_;
+      }
+      /**
+       * <code>optional uint64 filtered_read_requests_count = 19;</code>
+       *
+       * <pre>
+       ** the current total filtered read requests made to region 
+       * </pre>
+       */
+      public Builder setFilteredReadRequestsCount(long value) {
+        bitField0_ |= 0x00040000;
+        filteredReadRequestsCount_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional uint64 filtered_read_requests_count = 19;</code>
+       *
+       * <pre>
+       ** the current total filtered read requests made to region 
+       * </pre>
+       */
+      public Builder clearFilteredReadRequestsCount() {
+        bitField0_ = (bitField0_ & ~0x00040000);
+        filteredReadRequestsCount_ = 0L;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:hbase.pb.RegionLoad)
     }
 
@@ -14723,7 +14845,7 @@ public final class ClusterStatusProtos {
       "e\030\001 \002(\014\022\023\n\013sequence_id\030\002 \002(\004\"p\n\026RegionSt" +
       "oreSequenceIds\022 \n\030last_flushed_sequence_" +
       "id\030\001 \002(\004\0224\n\021store_sequence_id\030\002 \003(\0132\031.hb" +
-      "ase.pb.StoreSequenceId\"\324\004\n\nRegionLoad\0223\n" +
+      "ase.pb.StoreSequenceId\"\372\004\n\nRegionLoad\0223\n" +
       "\020region_specifier\030\001 \002(\0132\031.hbase.pb.Regio" +
       "nSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\nstorefiles",
       "\030\003 \001(\r\022\"\n\032store_uncompressed_size_MB\030\004 \001" +
@@ -14738,38 +14860,39 @@ public final class ClusterStatusProtos {
       "_sequence_id\030\017 \001(\004\022\025\n\rdata_locality\030\020 \001(",
       "\002\022#\n\030last_major_compaction_ts\030\021 \001(\004:\0010\022=" +
       "\n\032store_complete_sequence_id\030\022 \003(\0132\031.hba" +
-      "se.pb.StoreSequenceId\"T\n\023ReplicationLoad" +
-      "Sink\022\032\n\022ageOfLastAppliedOp\030\001 \002(\004\022!\n\031time" +
-      "StampsOfLastAppliedOp\030\002 \002(\004\"\225\001\n\025Replicat" +
-      "ionLoadSource\022\016\n\006peerID\030\001 \002(\t\022\032\n\022ageOfLa" +
-      "stShippedOp\030\002 \002(\004\022\026\n\016sizeOfLogQueue\030\003 \002(" +
-      "\r\022 \n\030timeStampOfLastShippedOp\030\004 \002(\004\022\026\n\016r" +
-      "eplicationLag\030\005 \002(\004\"\212\003\n\nServerLoad\022\032\n\022nu" +
-      "mber_of_requests\030\001 \001(\004\022 \n\030total_number_o",
-      "f_requests\030\002 \001(\004\022\024\n\014used_heap_MB\030\003 \001(\r\022\023" +
-      "\n\013max_heap_MB\030\004 \001(\r\022*\n\014region_loads\030\005 \003(" +
-      "\0132\024.hbase.pb.RegionLoad\022+\n\014coprocessors\030" +
-      "\006 \003(\0132\025.hbase.pb.Coprocessor\022\031\n\021report_s" +
-      "tart_time\030\007 \001(\004\022\027\n\017report_end_time\030\010 \001(\004" +
-      "\022\030\n\020info_server_port\030\t \001(\r\0227\n\016replLoadSo" +
-      "urce\030\n \003(\0132\037.hbase.pb.ReplicationLoadSou" +
-      "rce\0223\n\014replLoadSink\030\013 \001(\0132\035.hbase.pb.Rep" +
-      "licationLoadSink\"a\n\016LiveServerInfo\022$\n\006se" +
-      "rver\030\001 \002(\0132\024.hbase.pb.ServerName\022)\n\013serv",
-      "er_load\030\002 \002(\0132\024.hbase.pb.ServerLoad\"\250\003\n\r" +
-      "ClusterStatus\0228\n\rhbase_version\030\001 \001(\0132!.h" +
-      "base.pb.HBaseVersionFileContent\022.\n\014live_" +
-      "servers\030\002 \003(\0132\030.hbase.pb.LiveServerInfo\022" +
-      "*\n\014dead_servers\030\003 \003(\0132\024.hbase.pb.ServerN" +
-      "ame\022;\n\025regions_in_transition\030\004 \003(\0132\034.hba" +
-      "se.pb.RegionInTransition\022\'\n\ncluster_id\030\005" +
-      " \001(\0132\023.hbase.pb.ClusterId\0222\n\023master_copr" +
-      "ocessors\030\006 \003(\0132\025.hbase.pb.Coprocessor\022$\n" +
-      "\006master\030\007 \001(\0132\024.hbase.pb.ServerName\022,\n\016b",
-      "ackup_masters\030\010 \003(\0132\024.hbase.pb.ServerNam" +
-      "e\022\023\n\013balancer_on\030\t \001(\010BF\n*org.apache.had" +
-      "oop.hbase.protobuf.generatedB\023ClusterSta" +
-      "tusProtosH\001\240\001\001"
+      "se.pb.StoreSequenceId\022$\n\034filtered_read_r" +
+      "equests_count\030\023 \001(\004\"T\n\023ReplicationLoadSi" +
+      "nk\022\032\n\022ageOfLastAppliedOp\030\001 \002(\004\022!\n\031timeSt" +
+      "ampsOfLastAppliedOp\030\002 \002(\004\"\225\001\n\025Replicatio" +
+      "nLoadSource\022\016\n\006peerID\030\001 \002(\t\022\032\n\022ageOfLast" +
+      "ShippedOp\030\002 \002(\004\022\026\n\016sizeOfLogQueue\030\003 \002(\r\022" +
+      " \n\030timeStampOfLastShippedOp\030\004 \002(\004\022\026\n\016rep" +
+      "licationLag\030\005 \002(\004\"\212\003\n\nServerLoad\022\032\n\022numb",
+      "er_of_requests\030\001 \001(\004\022 \n\030total_number_of_" +
+      "requests\030\002 \001(\004\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013" +
+      "max_heap_MB\030\004 \001(\r\022*\n\014region_loads\030\005 \003(\0132" +
+      "\024.hbase.pb.RegionLoad\022+\n\014coprocessors\030\006 " +
+      "\003(\0132\025.hbase.pb.Coprocessor\022\031\n\021report_sta" +
+      "rt_time\030\007 \001(\004\022\027\n\017report_end_time\030\010 \001(\004\022\030" +
+      "\n\020info_server_port\030\t \001(\r\0227\n\016replLoadSour" +
+      "ce\030\n \003(\0132\037.hbase.pb.ReplicationLoadSourc" +
+      "e\0223\n\014replLoadSink\030\013 \001(\0132\035.hbase.pb.Repli" +
+      "cationLoadSink\"a\n\016LiveServerInfo\022$\n\006serv",
+      "er\030\001 \002(\0132\024.hbase.pb.ServerName\022)\n\013server" +
+      "_load\030\002 \002(\0132\024.hbase.pb.ServerLoad\"\250\003\n\rCl" +
+      "usterStatus\0228\n\rhbase_version\030\001 \001(\0132!.hba" +
+      "se.pb.HBaseVersionFileContent\022.\n\014live_se" +
+      "rvers\030\002 \003(\0132\030.hbase.pb.LiveServerInfo\022*\n" +
+      "\014dead_servers\030\003 \003(\0132\024.hbase.pb.ServerNam" +
+      "e\022;\n\025regions_in_transition\030\004 \003(\0132\034.hbase" +
+      ".pb.RegionInTransition\022\'\n\ncluster_id\030\005 \001" +
+      "(\0132\023.hbase.pb.ClusterId\0222\n\023master_coproc" +
+      "essors\030\006 \003(\0132\025.hbase.pb.Coprocessor\022$\n\006m",
+      "aster\030\007 \001(\0132\024.hbase.pb.ServerName\022,\n\016bac" +
+      "kup_masters\030\010 \003(\0132\024.hbase.pb.ServerName\022" +
+      "\023\n\013balancer_on\030\t \001(\010BF\n*org.apache.hadoo" +
+      "p.hbase.protobuf.generatedB\023ClusterStatu" +
+      "sProtosH\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -14805,7 +14928,7 @@ public final class ClusterStatusProtos {
           internal_static_hbase_pb_RegionLoad_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RegionLoad_descriptor,
-              new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", "LastMajorCompactionTs", "StoreCompleteSequenceId", });
+              new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", "LastMajorCompactionTs", "StoreCompleteSequenceId", "FilteredReadRequestsCount", });
           internal_static_hbase_pb_ReplicationLoadSink_descriptor =
             getDescriptor().getMessageTypes().get(5);
           internal_static_hbase_pb_ReplicationLoadSink_fieldAccessorTable = new

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-protocol/src/main/protobuf/ClusterStatus.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/ClusterStatus.proto b/hbase-protocol/src/main/protobuf/ClusterStatus.proto
index 228be7e..54bc0c3 100644
--- a/hbase-protocol/src/main/protobuf/ClusterStatus.proto
+++ b/hbase-protocol/src/main/protobuf/ClusterStatus.proto
@@ -136,6 +136,9 @@ message RegionLoad {
 
   /** the most recent sequence Id of store from cache flush */
   repeated StoreSequenceId store_complete_sequence_id = 18;
+
+  /** the current total filtered read requests made to region */
+  optional uint64 filtered_read_requests_count = 19;
 }
 
 /* Server-level protobufs */

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
index b7c894f..c051743 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
@@ -174,6 +174,7 @@ for (ServerName serverName: serverNames) {
     <th>ServerName</th>
     <th>Request Per Second</th>
     <th>Read Request Count</th>
+    <th>Filtered Read Request Count</th>
     <th>Write Request Count</th>
 </tr>
 <%java>
@@ -186,6 +187,7 @@ if (sl != null) {
 <td><& serverNameLink; serverName=serverName; serverLoad = sl; &></td>
 <td><% String.format("%.0f", sl.getRequestsPerSecond()) %></td>
 <td><% sl.getReadRequestsCount() %></td>
+<td><% sl.getFilteredReadRequestsCount() %></td>
 <td><% sl.getWriteRequestsCount() %></td>
 </tr>
 <%java>

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
index 7740c53..cefd476 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
@@ -118,6 +118,7 @@
         <tr>
             <th>Region Name</th>
             <th>Read Request Count</th>
+            <th>Filtered Read Request Count</th>
             <th>Write Request Count</th>
         </tr>
 
@@ -132,6 +133,7 @@
             <td><a href="region.jsp?name=<% r.getEncodedName() %>"><% displayName %></a></td>
             <%if load != null %>
             <td><% load.getReadRequestsCount() %></td>
+            <td><% load.getFilteredReadRequestsCount() %></td>
             <td><% load.getWriteRequestsCount() %></td>
             </%if>
         </tr>

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
index 4f6a754..a55a863 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
@@ -173,11 +173,13 @@ MetricsRegionServerWrapper mWrap;
 <tr>
     <th>Request Per Second</th>
     <th>Read Request Count</th>
+    <th>Filtered Read Request Count</th>
     <th>Write Request Count</th>
 </tr>
 <tr>
     <td><% String.format("%.0f", mWrap.getRequestsPerSecond()) %></td>
     <td><% mWrap.getReadRequestsCount() %></td>
+    <td><% mWrap.getFilteredReadRequestsCount() %></td>
     <td><% mWrap.getWriteRequestsCount() %></td>
 </tr>
 </table>

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 99b571f..f03c205 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -284,6 +284,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
 
   //Number of requests
   final Counter readRequestsCount = new Counter();
+  final Counter filteredReadRequestsCount = new Counter();
   final Counter writeRequestsCount = new Counter();
 
   // Number of requests blocked by memstore size.
@@ -1113,6 +1114,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   }
 
   @Override
+  public long getFilteredReadRequestsCount() {
+    return filteredReadRequestsCount.get();
+  }
+
+  @Override
   public long getWriteRequestsCount() {
     return writeRequestsCount.get();
   }
@@ -6025,6 +6031,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     }
 
     protected void incrementCountOfRowsFilteredMetric(ScannerContext scannerContext) {
+      filteredReadRequestsCount.increment();
+
       if (scannerContext == null || !scannerContext.isTrackingMetrics()) return;
 
       scannerContext.getMetrics().countOfRowsFiltered.incrementAndGet();
@@ -6524,6 +6532,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getWAL(), fs.getFileSystem(),
         this.getBaseConf(), hri, this.getTableDesc(), rsServices);
     r.readRequestsCount.set(this.getReadRequestsCount() / 2);
+    r.filteredReadRequestsCount.set(this.getFilteredReadRequestsCount() / 2);
     r.writeRequestsCount.set(this.getWriteRequestsCount() / 2);
     return r;
   }
@@ -6541,6 +6550,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         this.getTableDesc(), this.rsServices);
     r.readRequestsCount.set(this.getReadRequestsCount()
         + region_b.getReadRequestsCount());
+    r.filteredReadRequestsCount.set(this.getFilteredReadRequestsCount()
+      + region_b.getFilteredReadRequestsCount());
     r.writeRequestsCount.set(this.getWriteRequestsCount()
 
         + region_b.getWriteRequestsCount());
@@ -7590,7 +7601,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   public static final long FIXED_OVERHEAD = ClassSize.align(
       ClassSize.OBJECT +
       ClassSize.ARRAY +
-      44 * ClassSize.REFERENCE + 3 * Bytes.SIZEOF_INT +
+      45 * ClassSize.REFERENCE + 3 * Bytes.SIZEOF_INT +
       (14 * Bytes.SIZEOF_LONG) +
       5 * Bytes.SIZEOF_BOOLEAN);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 1183f96..9cb100f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1518,6 +1518,7 @@ public class HRegionServer extends HasThread implements
       .setTotalStaticIndexSizeKB(totalStaticIndexSizeKB)
       .setTotalStaticBloomSizeKB(totalStaticBloomSizeKB)
       .setReadRequestsCount(r.getReadRequestsCount())
+      .setFilteredReadRequestsCount(r.getFilteredReadRequestsCount())
       .setWriteRequestsCount(r.getWriteRequestsCount())
       .setTotalCompactingKVs(totalCompactingKVs)
       .setCurrentCompactedKVs(currentCompactedKVs)

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
index f3e8916..4f9ba5b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@ -68,6 +68,7 @@ class MetricsRegionServerWrapperImpl
   private volatile long storeFileSize = 0;
   private volatile double requestsPerSecond = 0.0;
   private volatile long readRequestsCount = 0;
+  private volatile long filteredReadRequestsCount = 0;
   private volatile long writeRequestsCount = 0;
   private volatile long checkAndMutateChecksFailed = 0;
   private volatile long checkAndMutateChecksPassed = 0;
@@ -408,6 +409,11 @@ class MetricsRegionServerWrapperImpl
   }
 
   @Override
+  public long getFilteredReadRequestsCount() {
+    return filteredReadRequestsCount;
+  }
+
+  @Override
   public long getWriteRequestsCount() {
     return writeRequestsCount;
   }
@@ -588,7 +594,8 @@ class MetricsRegionServerWrapperImpl
             new HDFSBlocksDistribution();
 
         long tempNumStores = 0, tempNumStoreFiles = 0, tempMemstoreSize = 0, tempStoreFileSize = 0;
-        long tempReadRequestsCount = 0, tempWriteRequestsCount = 0;
+        long tempReadRequestsCount = 0, tempFilteredReadRequestsCount = 0,
+          tempWriteRequestsCount = 0;
         long tempCheckAndMutateChecksFailed = 0;
         long tempCheckAndMutateChecksPassed = 0;
         long tempStorefileIndexSize = 0;
@@ -619,6 +626,7 @@ class MetricsRegionServerWrapperImpl
           tempNumMutationsWithoutWAL += r.getNumMutationsWithoutWAL();
           tempDataInMemoryWithoutWAL += r.getDataInMemoryWithoutWAL();
           tempReadRequestsCount += r.getReadRequestsCount();
+          tempFilteredReadRequestsCount += r.getFilteredReadRequestsCount();
           tempWriteRequestsCount += r.getWriteRequestsCount();
           tempCheckAndMutateChecksFailed += r.getCheckAndMutateChecksFailed();
           tempCheckAndMutateChecksPassed += r.getCheckAndMutateChecksPassed();
@@ -696,6 +704,7 @@ class MetricsRegionServerWrapperImpl
         memstoreSize = tempMemstoreSize;
         storeFileSize = tempStoreFileSize;
         readRequestsCount = tempReadRequestsCount;
+        filteredReadRequestsCount = tempFilteredReadRequestsCount;
         writeRequestsCount = tempWriteRequestsCount;
         checkAndMutateChecksFailed = tempCheckAndMutateChecksFailed;
         checkAndMutateChecksPassed = tempCheckAndMutateChecksPassed;

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
index 08865e6..2c54079 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
@@ -112,6 +112,11 @@ public class MetricsRegionWrapperImpl implements MetricsRegionWrapper, Closeable
   }
 
   @Override
+  public long getFilteredReadRequestCount() {
+    return this.region.getFilteredReadRequestsCount();
+  }
+
+  @Override
   public long getWriteRequestCount() {
     return this.region.getWriteRequestsCount();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
index 213b41a..976bddb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
@@ -185,6 +185,9 @@ public interface Region extends ConfigurationObserver {
    */
   void updateReadRequestsCount(long i);
 
+  /** @return filtered read requests count for this region */
+  long getFilteredReadRequestsCount();
+
   /** @return write request count for this region */
   long getWriteRequestsCount();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionServerMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionServerMetrics.java
new file mode 100644
index 0000000..76e5842
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionServerMetrics.java
@@ -0,0 +1,379 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.filter.RowFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+
+@Category(MediumTests.class)
+public class TestRegionServerMetrics {
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static final TableName TABLE_NAME = TableName.valueOf("test");
+  private static final byte[] CF1 = "c1".getBytes();
+  private static final byte[] CF2 = "c2".getBytes();
+
+  private static final byte[] ROW1 = "a".getBytes();
+  private static final byte[] ROW2 = "b".getBytes();
+  private static final byte[] ROW3 = "c".getBytes();
+  private static final byte[] COL1 = "q1".getBytes();
+  private static final byte[] COL2 = "q2".getBytes();
+  private static final byte[] COL3 = "q3".getBytes();
+  private static final byte[] VAL1 = "v1".getBytes();
+  private static final byte[] VAL2 = "v2".getBytes();
+  private static final byte[] VAL3 = Bytes.toBytes(0L);
+
+  private static final int MAX_TRY = 20;
+  private static final int SLEEP_MS = 100;
+  private static final int TTL = 1;
+
+  private static Admin admin;
+  private static Collection<ServerName> serverNames;
+  private static Table table;
+  private static List<HRegionInfo> tableRegions;
+
+  private static Map<Metric, Long> requestsMap = new HashMap<>();
+  private static Map<Metric, Long> requestsMapPrev = new HashMap<>();
+
+  @BeforeClass
+  public static void setUpOnce() throws Exception {
+    TEST_UTIL.startMiniCluster();
+    admin = TEST_UTIL.getAdmin();
+    serverNames = admin.getClusterStatus().getServers();
+    table = createTable();
+    putData();
+    tableRegions = admin.getTableRegions(TABLE_NAME);
+
+    for (Metric metric : Metric.values()) {
+      requestsMap.put(metric, 0L);
+      requestsMapPrev.put(metric, 0L);
+    }
+  }
+
+  private static Table createTable() throws IOException {
+    HTableDescriptor td = new HTableDescriptor(TABLE_NAME);
+    HColumnDescriptor cd1 = new HColumnDescriptor(CF1);
+    td.addFamily(cd1);
+    HColumnDescriptor cd2 = new HColumnDescriptor(CF2);
+    cd2.setTimeToLive(TTL);
+    td.addFamily(cd2);
+
+    admin.createTable(td);
+    return TEST_UTIL.getConnection().getTable(TABLE_NAME);
+  }
+
+  private static void testReadRequests(long resultCount,
+    long expectedReadRequests, long expectedFilteredReadRequests)
+    throws IOException, InterruptedException {
+    updateMetricsMap();
+    System.out.println("requestsMapPrev = " + requestsMapPrev);
+    System.out.println("requestsMap = " + requestsMap);
+
+    assertEquals(expectedReadRequests,
+      requestsMap.get(Metric.REGION_READ) - requestsMapPrev.get(Metric.REGION_READ));
+    assertEquals(expectedReadRequests,
+      requestsMap.get(Metric.SERVER_READ) - requestsMapPrev.get(Metric.SERVER_READ));
+    assertEquals(expectedFilteredReadRequests,
+      requestsMap.get(Metric.FILTERED_REGION_READ)
+        - requestsMapPrev.get(Metric.FILTERED_REGION_READ));
+    assertEquals(expectedFilteredReadRequests,
+      requestsMap.get(Metric.FILTERED_SERVER_READ)
+        - requestsMapPrev.get(Metric.FILTERED_SERVER_READ));
+    assertEquals(expectedReadRequests, resultCount);
+  }
+
+  private static void updateMetricsMap() throws IOException, InterruptedException {
+    for (Metric metric : Metric.values()) {
+      requestsMapPrev.put(metric, requestsMap.get(metric));
+    }
+
+    ServerLoad serverLoad = null;
+    RegionLoad regionLoadOuter = null;
+    boolean metricsUpdated = false;
+    for (int i = 0; i < MAX_TRY; i++) {
+      for (ServerName serverName : serverNames) {
+        serverLoad = admin.getClusterStatus().getLoad(serverName);
+
+        Map<byte[], RegionLoad> regionsLoad = serverLoad.getRegionsLoad();
+        for (HRegionInfo tableRegion : tableRegions) {
+          RegionLoad regionLoad = regionsLoad.get(tableRegion.getRegionName());
+          if (regionLoad != null) {
+            regionLoadOuter = regionLoad;
+            for (Metric metric : Metric.values()) {
+              if (getReadRequest(serverLoad, regionLoad, metric) > requestsMapPrev.get(metric)) {
+                for (Metric metricInner : Metric.values()) {
+                  requestsMap.put(metricInner, getReadRequest(serverLoad, regionLoad, metricInner));
+                }
+                metricsUpdated = true;
+                break;
+              }
+            }
+          }
+        }
+      }
+      if (metricsUpdated) {
+        break;
+      }
+      Thread.sleep(SLEEP_MS);
+    }
+    if (!metricsUpdated) {
+      for (Metric metric : Metric.values()) {
+        requestsMap.put(metric, getReadRequest(serverLoad, regionLoadOuter, metric));
+      }
+    }
+  }
+
+  private static long getReadRequest(ServerLoad serverLoad, RegionLoad regionLoad, Metric metric) {
+    switch (metric) {
+      case REGION_READ:
+        return regionLoad.getReadRequestsCount();
+      case SERVER_READ:
+        return serverLoad.getReadRequestsCount();
+      case FILTERED_REGION_READ:
+        return regionLoad.getFilteredReadRequestsCount();
+      case FILTERED_SERVER_READ:
+        return serverLoad.getFilteredReadRequestsCount();
+      default:
+        throw new IllegalStateException();
+    }
+  }
+
+  private static void putData() throws IOException {
+    Put put;
+
+    put = new Put(ROW1);
+    put.addColumn(CF1, COL1, VAL1);
+    put.addColumn(CF1, COL2, VAL2);
+    put.addColumn(CF1, COL3, VAL3);
+    table.put(put);
+    put = new Put(ROW2);
+    put.addColumn(CF1, COL1, VAL2);  // put val2 instead of val1
+    put.addColumn(CF1, COL2, VAL2);
+    table.put(put);
+    put = new Put(ROW3);
+    put.addColumn(CF1, COL1, VAL1);
+    put.addColumn(CF1, COL2, VAL2);
+    table.put(put);
+  }
+
+  private static void putTTLExpiredData() throws IOException, InterruptedException {
+    Put put;
+
+    put = new Put(ROW1);
+    put.addColumn(CF2, COL1, VAL1);
+    put.addColumn(CF2, COL2, VAL2);
+    table.put(put);
+
+    Thread.sleep(TTL * 1000);
+
+    put = new Put(ROW2);
+    put.addColumn(CF2, COL1, VAL1);
+    put.addColumn(CF2, COL2, VAL2);
+    table.put(put);
+
+    put = new Put(ROW3);
+    put.addColumn(CF2, COL1, VAL1);
+    put.addColumn(CF2, COL2, VAL2);
+    table.put(put);
+  }
+
+  @AfterClass
+  public static void tearDownOnce() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testReadRequestsCountNotFiltered() throws Exception {
+    int resultCount;
+    Scan scan;
+    Append append;
+    Put put;
+    Increment increment;
+    Get get;
+
+    // test for scan
+    scan = new Scan();
+    try (ResultScanner scanner = table.getScanner(scan)) {
+      resultCount = 0;
+      for (Result ignore : scanner) {
+        resultCount++;
+      }
+      testReadRequests(resultCount, 3, 0);
+    }
+
+    // test for scan
+    scan = new Scan(ROW2, ROW3);
+    try (ResultScanner scanner = table.getScanner(scan)) {
+      resultCount = 0;
+      for (Result ignore : scanner) {
+        resultCount++;
+      }
+      testReadRequests(resultCount, 1, 0);
+    }
+
+    // test for get
+    get = new Get(ROW2);
+    Result result = table.get(get);
+    resultCount = result.isEmpty() ? 0 : 1;
+    testReadRequests(resultCount, 1, 0);
+
+    // test for increment
+    increment = new Increment(ROW1);
+    increment.addColumn(CF1, COL3, 1);
+    result = table.increment(increment);
+    resultCount = result.isEmpty() ? 0 : 1;
+    testReadRequests(resultCount, 1, 0);
+
+    // test for checkAndPut
+    put = new Put(ROW1);
+    put.addColumn(CF1, COL2, VAL2);
+    boolean checkAndPut =
+      table.checkAndPut(ROW1, CF1, COL2, CompareFilter.CompareOp.EQUAL, VAL2, put);
+    resultCount = checkAndPut ? 1 : 0;
+    testReadRequests(resultCount, 1, 0);
+
+    // test for append
+    append = new Append(ROW1);
+    append.add(CF1, COL2, VAL2);
+    result = table.append(append);
+    resultCount = result.isEmpty() ? 0 : 1;
+    testReadRequests(resultCount, 1, 0);
+
+    // test for checkAndMutate
+    put = new Put(ROW1);
+    put.addColumn(CF1, COL1, VAL1);
+    RowMutations rm = new RowMutations(ROW1);
+    rm.add(put);
+    boolean checkAndMutate =
+      table.checkAndMutate(ROW1, CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1, rm);
+    resultCount = checkAndMutate ? 1 : 0;
+    testReadRequests(resultCount, 1, 0);
+  }
+
+  @Test
+  public void testReadRequestsCountWithFilter() throws Exception {
+    int resultCount;
+    Scan scan;
+
+    // test for scan
+    scan = new Scan();
+    scan.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1));
+    try (ResultScanner scanner = table.getScanner(scan)) {
+      resultCount = 0;
+      for (Result ignore : scanner) {
+        resultCount++;
+      }
+      testReadRequests(resultCount, 2, 1);
+    }
+
+    // test for scan
+    scan = new Scan();
+    scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1)));
+    try (ResultScanner scanner = table.getScanner(scan)) {
+      resultCount = 0;
+      for (Result ignore : scanner) {
+        resultCount++;
+      }
+      testReadRequests(resultCount, 1, 2);
+    }
+
+    // test for scan
+    scan = new Scan(ROW2, ROW3);
+    scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1)));
+    try (ResultScanner scanner = table.getScanner(scan)) {
+      resultCount = 0;
+      for (Result ignore : scanner) {
+        resultCount++;
+      }
+      testReadRequests(resultCount, 0, 1);
+    }
+
+    // fixme filtered get should not increase readRequestsCount
+//    Get get = new Get(ROW2);
+//    get.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1));
+//    Result result = table.get(get);
+//    resultCount = result.isEmpty() ? 0 : 1;
+//    testReadRequests(resultCount, 0, 1);
+  }
+
+  @Test
+  public void testReadRequestsCountWithDeletedRow() throws Exception {
+    try {
+      Delete delete = new Delete(ROW3);
+      table.delete(delete);
+
+      Scan scan = new Scan();
+      try (ResultScanner scanner = table.getScanner(scan)) {
+        int resultCount = 0;
+        for (Result ignore : scanner) {
+          resultCount++;
+        }
+        testReadRequests(resultCount, 2, 1);
+      }
+    } finally {
+      Put put = new Put(ROW3);
+      put.addColumn(CF1, COL1, VAL1);
+      put.addColumn(CF1, COL2, VAL2);
+      table.put(put);
+    }
+  }
+
+  @Test
+  public void testReadRequestsCountWithTTLExpiration() throws Exception {
+    putTTLExpiredData();
+
+    Scan scan = new Scan();
+    scan.addFamily(CF2);
+    try (ResultScanner scanner = table.getScanner(scan)) {
+      int resultCount = 0;
+      for (Result ignore : scanner) {
+        resultCount++;
+      }
+      testReadRequests(resultCount, 2, 1);
+    }
+  }
+
+  private enum Metric {REGION_READ, SERVER_READ, FILTERED_REGION_READ, FILTERED_SERVER_READ}
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
index 5c56e9a..e6c17a5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
@@ -44,6 +44,7 @@ public class TestServerLoad {
     assertEquals(820, sl.getStorefileSizeInMB());
     assertEquals(82, sl.getStorefileIndexSizeInMB());
     assertEquals(((long)Integer.MAX_VALUE)*2, sl.getReadRequestsCount());
+    assertEquals(300, sl.getFilteredReadRequestsCount());
     
   }
  
@@ -57,6 +58,7 @@ public class TestServerLoad {
     assertTrue(slToString.contains("storefileSizeMB=820")); 
     assertTrue(slToString.contains("rootIndexSizeKB=504"));
     assertTrue(slToString.contains("coprocessors=[]"));
+    assertTrue(slToString.contains("filteredReadRequestsCount=300"));
   }
 
   @Test
@@ -80,10 +82,12 @@ public class TestServerLoad {
     ClusterStatusProtos.RegionLoad rlOne =
         ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
             .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
+            .setFilteredReadRequestsCount(100)
             .setStorefileIndexSizeMB(42).setRootIndexSizeKB(201).setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
     ClusterStatusProtos.RegionLoad rlTwo =
         ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
             .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
+            .setFilteredReadRequestsCount(200)
             .setStorefileIndexSizeMB(40).setRootIndexSizeKB(303).setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
 
     ClusterStatusProtos.ServerLoad sl =

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
index 0d93284..4b00632 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
@@ -86,6 +86,11 @@ public class MetricsRegionServerWrapperStub implements MetricsRegionServerWrappe
   }
 
   @Override
+  public long getFilteredReadRequestsCount() {
+    return 1997;
+  }
+
+  @Override
   public long getWriteRequestsCount() {
     return 707;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java
index c43ccc3..8e6dd74 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java
@@ -76,6 +76,11 @@ public class MetricsRegionWrapperStub implements MetricsRegionWrapper {
   }
 
   @Override
+  public long getFilteredReadRequestCount() {
+    return 107;
+  }
+
+  @Override
   public long getWriteRequestCount() {
     return 106;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java
index e739890..cc09d15 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java
@@ -46,6 +46,10 @@ public class TestMetricsRegion {
       "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_memstoreSize",
       103, agg);
     HELPER.assertCounter(
+      "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_" +
+        "filteredReadRequestCount",
+      107, agg);
+    HELPER.assertCounter(
       "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_replicaid", 
       0, agg);
     mr.close();
@@ -63,6 +67,10 @@ public class TestMetricsRegion {
       "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_memstoreSize",
       103, agg);
     HELPER.assertCounter(
+      "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_" +
+        "filteredReadRequestCount",
+      107, agg);
+    HELPER.assertCounter(
       "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_replicaid", 
       1, agg);
     mr.close();

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java
index f3ce0bd..77d6a95 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java
@@ -67,6 +67,7 @@ public class TestMetricsRegionServer {
     HELPER.assertGauge("storeFileSize", 1900, serverSource);
     HELPER.assertCounter("totalRequestCount", 899, serverSource);
     HELPER.assertCounter("readRequestCount", 997, serverSource);
+    HELPER.assertCounter("filteredReadRequestCount", 1997, serverSource);
     HELPER.assertCounter("writeRequestCount", 707, serverSource);
     HELPER.assertCounter("checkMutateFailedCount", 401, serverSource);
     HELPER.assertCounter("checkMutatePassedCount", 405, serverSource);