You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by bu...@apache.org on 2018/02/28 20:31:04 UTC

[01/11] hbase git commit: HBASE-20093 Replace ServerLoad by ServerMetrics for ServerManager [Forced Update!]

Repository: hbase
Updated Branches:
  refs/heads/HBASE-15151 a9bc4d69c -> d7a3f2276 (forced update)


HBASE-20093 Replace ServerLoad by ServerMetrics for ServerManager

Signed-off-by: tedyu <yu...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7f6e971c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7f6e971c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7f6e971c

Branch: refs/heads/HBASE-15151
Commit: 7f6e971c4cc2e3906f959c6304fc05faa7703054
Parents: ba063ab
Author: Chia-Ping Tsai <ch...@gmail.com>
Authored: Tue Feb 27 23:20:06 2018 +0800
Committer: Chia-Ping Tsai <ch...@gmail.com>
Committed: Wed Feb 28 14:57:50 2018 +0800

----------------------------------------------------------------------
 .../hbase/coprocessor/TestClassLoading.java     |  36 ++-
 .../hbase/rsgroup/TestRSGroupsOfflineMode.java  |   4 +-
 .../hbase/tmpl/master/RSGroupListTmpl.jamon     |  83 ++---
 .../tmpl/master/RegionServerListTmpl.jamon      |  99 ++++--
 .../hbase/favored/FavoredNodeLoadBalancer.java  |  16 +-
 .../hadoop/hbase/master/MasterDumpServlet.java  |  10 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |  12 +-
 .../hadoop/hbase/master/ServerManager.java      |  47 +--
 .../hbase/master/balancer/BaseLoadBalancer.java |  10 +-
 .../balancer/FavoredStochasticBalancer.java     |  10 +-
 .../normalizer/SimpleRegionNormalizer.java      |  13 +-
 .../resources/hbase-webapps/master/rsgroup.jsp  |  89 ++++--
 .../resources/hbase-webapps/master/table.jsp    | 300 ++++++++-----------
 .../normalizer/TestSimpleRegionNormalizer.java  |  12 +-
 14 files changed, 381 insertions(+), 360 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7f6e971c/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java
----------------------------------------------------------------------
diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java
index 922977c..bc75881 100644
--- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java
+++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java
@@ -22,8 +22,14 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
-import java.io.*;
-import java.util.*;
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -34,8 +40,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
-import org.apache.hadoop.hbase.RegionLoad;
-import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.RegionMetrics;
+import org.apache.hadoop.hbase.ServerMetrics;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
@@ -47,8 +53,10 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.ClassLoaderTestHelper;
 import org.apache.hadoop.hbase.util.CoprocessorClassLoader;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.*;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
 import org.junit.ClassRule;
+import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -482,13 +490,13 @@ public class TestClassLoading {
    * @param tableName : given table.
    * @return subset of all servers.
    */
-  Map<ServerName, ServerLoad> serversForTable(String tableName) {
-    Map<ServerName, ServerLoad> serverLoadHashMap = new HashMap<>();
-    for(Map.Entry<ServerName,ServerLoad> server:
+  Map<ServerName, ServerMetrics> serversForTable(String tableName) {
+    Map<ServerName, ServerMetrics> serverLoadHashMap = new HashMap<>();
+    for(Map.Entry<ServerName, ServerMetrics> server:
         TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager().
             getOnlineServers().entrySet()) {
-      for( Map.Entry<byte[], RegionLoad> region:
-          server.getValue().getRegionsLoad().entrySet()) {
+      for(Map.Entry<byte[], RegionMetrics> region:
+          server.getValue().getRegionMetrics().entrySet()) {
         if (region.getValue().getNameAsString().equals(tableName)) {
           // this server hosts a region of tableName: add this server..
           serverLoadHashMap.put(server.getKey(),server.getValue());
@@ -501,8 +509,7 @@ public class TestClassLoading {
   }
 
   void assertAllRegionServers(String tableName) throws InterruptedException {
-    Map<ServerName, ServerLoad> servers;
-    String[] actualCoprocessors = null;
+    Map<ServerName, ServerMetrics> servers;
     boolean success = false;
     String[] expectedCoprocessors = regionServerSystemCoprocessors;
     if (tableName == null) {
@@ -513,8 +520,9 @@ public class TestClassLoading {
     }
     for (int i = 0; i < 5; i++) {
       boolean any_failed = false;
-      for(Map.Entry<ServerName,ServerLoad> server: servers.entrySet()) {
-        actualCoprocessors = server.getValue().getRsCoprocessors();
+      for(Map.Entry<ServerName, ServerMetrics> server: servers.entrySet()) {
+        String[] actualCoprocessors =
+          server.getValue().getCoprocessorNames().stream().toArray(size -> new String[size]);
         if (!Arrays.equals(actualCoprocessors, expectedCoprocessors)) {
           LOG.debug("failed comparison: actual: " +
               Arrays.toString(actualCoprocessors) +

http://git-wip-us.apache.org/repos/asf/hbase/blob/7f6e971c/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
index 4685c01..d6df910 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
@@ -124,8 +124,8 @@ public class TestRSGroupsOfflineMode {
       LOG.info("Waiting for region unassignments on failover RS...");
       TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
         @Override public boolean evaluate() throws Exception {
-          return master.getServerManager().getLoad(failoverRS.getServerName())
-              .getRegionsLoad().size() > 0;
+          return !master.getServerManager().getLoad(failoverRS.getServerName())
+              .getRegionMetrics().isEmpty();
         }
       });
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7f6e971c/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon
index 9f9831f..9a0e369 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon
@@ -30,7 +30,9 @@ ServerManager serverManager;
     java.util.Set;
     java.util.stream.Collectors;
     org.apache.hadoop.hbase.master.HMaster;
-    org.apache.hadoop.hbase.ServerLoad;
+    org.apache.hadoop.hbase.RegionMetrics;
+    org.apache.hadoop.hbase.ServerMetrics;
+    org.apache.hadoop.hbase.Size;
     org.apache.hadoop.hbase.RSGroupTableAccessor;
     org.apache.hadoop.hbase.master.ServerManager;
     org.apache.hadoop.hbase.net.Address;
@@ -45,7 +47,7 @@ List<RSGroupInfo> groups = RSGroupTableAccessor.getAllRSGroupInfo(master.getConn
 
 <%java>
 RSGroupInfo [] rsGroupInfos = groups.toArray(new RSGroupInfo[groups.size()]);
-Map<Address, ServerLoad> collectServers = Collections.emptyMap();
+Map<Address, ServerMetrics> collectServers = Collections.emptyMap();
 if (master.getServerManager() != null) {
   collectServers =
       master.getServerManager().getOnlineServers().entrySet().stream()
@@ -85,7 +87,7 @@ if (master.getServerManager() != null) {
 <%def rsgroup_baseStats>
 <%args>
     RSGroupInfo [] rsGroupInfos;
-    Map<Address, ServerLoad> collectServers;
+    Map<Address, ServerMetrics> collectServers;
 </%args>
 <table class="table table-striped">
 <tr>
@@ -112,13 +114,13 @@ if (master.getServerManager() != null) {
       int numRegionsOnline = 0;
       Set<Address> servers = rsGroupInfo.getServers();
       for (Address server : servers) {
-        ServerLoad sl = collectServers.get(server);
+        ServerMetrics sl = collectServers.get(server);
         if (sl != null) {
-          requestsPerSecond += sl.getNumberOfRequests();
-          numRegionsOnline += sl.getNumberOfRegions();
+          requestsPerSecond += sl.getRequestCountPerSecond();
+          numRegionsOnline += sl.getRegionMetrics().size();
           //rsgroup total
-          totalRegions += sl.getNumberOfRegions();
-          totalRequests += sl.getNumberOfRequests();
+          totalRegions += sl.getRegionMetrics().size();
+          totalRequests += sl.getRequestCountPerSecond();
           totalOnlineServers++;
           onlineServers++;
         } else {
@@ -157,7 +159,7 @@ if (master.getServerManager() != null) {
 <%def rsgroup_memoryStats>
 <%args>
   RSGroupInfo [] rsGroupInfos;
-  Map<Address, ServerLoad> collectServers;
+  Map<Address, ServerMetrics> collectServers;
 </%args>
 <table class="table table-striped">
 <tr>
@@ -174,11 +176,12 @@ if (master.getServerManager() != null) {
       long maxHeap = 0;
       long memstoreSize = 0;
       for (Address server : rsGroupInfo.getServers()) {
-        ServerLoad sl = collectServers.get(server);
+        ServerMetrics sl = collectServers.get(server);
         if (sl != null) {
-          usedHeap += sl.getUsedHeapMB();
-          maxHeap += sl.getMaxHeapMB();
-          memstoreSize += sl.getMemstoreSizeInMB();
+          usedHeap += (long) sl.getUsedHeapSize().get(Size.Unit.MEGABYTE);
+          maxHeap += (long) sl.getMaxHeapSize().get(Size.Unit.MEGABYTE);
+          memstoreSize += (long) sl.getRegionMetrics().values().stream().mapToDouble(
+            rm -> rm.getMemStoreSize().get(Size.Unit.MEGABYTE)).sum();
         }
       }
 </%java>
@@ -201,7 +204,7 @@ if (master.getServerManager() != null) {
 <%def rsgroup_requestStats>
 <%args>
   RSGroupInfo [] rsGroupInfos;
-  Map<Address, ServerLoad> collectServers;
+  Map<Address, ServerMetrics> collectServers;
 </%args>
 <table class="table table-striped">
 <tr>
@@ -217,11 +220,13 @@ if (master.getServerManager() != null) {
       long readRequests = 0;
       long writeRequests = 0;
       for (Address server : rsGroupInfo.getServers()) {
-        ServerLoad sl = collectServers.get(server);
+        ServerMetrics sl = collectServers.get(server);
         if (sl != null) {
-          requestsPerSecond += sl.getNumberOfRequests();
-          readRequests += sl.getReadRequestsCount();
-          writeRequests += sl.getWriteRequestsCount();
+          for (RegionMetrics rm : sl.getRegionMetrics().values()) {
+            readRequests += rm.getReadRequestCount();
+            writeRequests += rm.getWriteRequestCount();
+          }
+          requestsPerSecond += sl.getRequestCountPerSecond();
         }
       }
 </%java>
@@ -241,7 +246,7 @@ if (master.getServerManager() != null) {
 <%def rsgroup_storeStats>
 <%args>
   RSGroupInfo [] rsGroupInfos;
-  Map<Address, ServerLoad> collectServers;
+  Map<Address, ServerMetrics> collectServers;
 </%args>
 <table class="table table-striped">
 <tr>
@@ -264,14 +269,16 @@ if (master.getServerManager() != null) {
       long bloomSize  = 0;
       int count = 0;
       for (Address server : rsGroupInfo.getServers()) {
-        ServerLoad sl = collectServers.get(server);
+        ServerMetrics sl = collectServers.get(server);
         if (sl != null) {
-          numStores += sl.getStores();
-          numStorefiles += sl.getStorefiles();
-          uncompressedStorefileSize += sl.getStoreUncompressedSizeMB();
-          storefileSize += sl.getStorefileSizeInMB();
-          indexSize += sl.getTotalStaticIndexSizeKB();
-          bloomSize += sl.getTotalStaticBloomSizeKB();
+          for (RegionMetrics rm : sl.getRegionMetrics().values()) {
+            numStores += rm.getStoreCount();
+            numStorefiles += rm.getStoreFileCount();
+            uncompressedStorefileSize += rm.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE);
+            storefileSize += rm.getStoreFileSize().get(Size.Unit.MEGABYTE);
+            indexSize += rm.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
+            bloomSize += rm.getBloomFilterSize().get(Size.Unit.KILOBYTE);
+          }
           count++;
         }
       }
@@ -298,7 +305,7 @@ if (master.getServerManager() != null) {
 <%def rsgroup_compactStats>
 <%args>
   RSGroupInfo [] rsGroupInfos;
-  Map<Address, ServerLoad> collectServers;
+  Map<Address, ServerMetrics> collectServers;
 </%args>
 <table class="table table-striped">
 <tr>
@@ -312,28 +319,30 @@ if (master.getServerManager() != null) {
     for (RSGroupInfo rsGroupInfo: rsGroupInfos) {
       String rsGroupName = rsGroupInfo.getName();
       int numStores = 0;
-      long totalCompactingKVs = 0;
-      long numCompactedKVs = 0;
+      long totalCompactingCells = 0;
+      long totalCompactedCells = 0;
       long remainingKVs = 0;
       long compactionProgress  = 0;
       for (Address server : rsGroupInfo.getServers()) {
-        ServerLoad sl = collectServers.get(server);
+        ServerMetrics sl = collectServers.get(server);
         if (sl != null) {
-          totalCompactingKVs += sl.getTotalCompactingKVs();
-          numCompactedKVs += sl.getCurrentCompactedKVs();
+          for (RegionMetrics rl : sl.getRegionMetrics().values()) {
+            totalCompactingCells += rl.getCompactingCellCount();
+            totalCompactedCells += rl.getCompactedCellCount();
+          }
         }
       }
-      remainingKVs = totalCompactingKVs - numCompactedKVs;
+      remainingKVs = totalCompactingCells - totalCompactedCells;
       String percentDone = "";
-      if  (totalCompactingKVs > 0) {
+      if  (totalCompactingCells > 0) {
            percentDone = String.format("%.2f", 100 *
-              ((float) numCompactedKVs / totalCompactingKVs)) + "%";
+              ((float) totalCompactedCells / totalCompactingCells)) + "%";
       }
 </%java>
 <tr>
 <td><& rsGroupLink; rsGroupName=rsGroupName; &></td>
-<td><% totalCompactingKVs %></td>
-<td><% numCompactedKVs %></td>
+<td><% totalCompactingCells %></td>
+<td><% totalCompactedCells %></td>
 <td><% remainingKVs %></td>
 <td><% percentDone %></td>
 </tr>

http://git-wip-us.apache.org/repos/asf/hbase/blob/7f6e971c/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
index 0b5599d..fb7dd54 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
@@ -26,8 +26,10 @@ HMaster master;
 <%import>
         java.util.*;
         org.apache.hadoop.hbase.master.HMaster;
-        org.apache.hadoop.hbase.ServerLoad;
+        org.apache.hadoop.hbase.RegionMetrics;
+        org.apache.hadoop.hbase.ServerMetrics;
         org.apache.hadoop.hbase.ServerName;
+        org.apache.hadoop.hbase.Size;
         org.apache.hadoop.hbase.util.VersionInfo;
         org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
 </%import>
@@ -84,12 +86,12 @@ Arrays.sort(serverNames);
 </tr>
 <%java>
     int totalRegions = 0;
-    int totalRequests = 0;
+    int totalRequestsPerSecond = 0;
     int inconsistentNodeNum = 0;
     String masterVersion = VersionInfo.getVersion();
     for (ServerName serverName: serverNames) {
 
-    ServerLoad sl = master.getServerManager().getLoad(serverName);
+    ServerMetrics sl = master.getServerManager().getLoad(serverName);
     String version = master.getRegionServerVersion(serverName);
     if (!masterVersion.equals(version)) {
         inconsistentNodeNum ++;
@@ -100,12 +102,11 @@ Arrays.sort(serverNames);
     long lastContact = 0;
 
     if (sl != null) {
-        requestsPerSecond = sl.getRequestsPerSecond();
-        numRegionsOnline = sl.getNumberOfRegions();
-        totalRegions += sl.getNumberOfRegions();
-        // Is this correct?  Adding a rate to a measure.
-        totalRequests += sl.getNumberOfRequests();
-        lastContact = (System.currentTimeMillis() - sl.getReportTime())/1000;
+        requestsPerSecond = sl.getRequestCountPerSecond();
+        numRegionsOnline = sl.getRegionMetrics().size();
+        totalRegions += sl.getRegionMetrics().size();
+        totalRequestsPerSecond += sl.getRequestCountPerSecond();
+        lastContact = (System.currentTimeMillis() - sl.getReportTimestamp())/1000;
     }
     long startcode = serverName.getStartcode();
 </%java>
@@ -128,7 +129,7 @@ Arrays.sort(serverNames);
 <%else>
    <td></td>
 </%if>
-<td><% totalRequests %></td>
+<td><% totalRequestsPerSecond %></td>
 <td><% totalRegions %></td>
 </tr>
 </table>
@@ -149,16 +150,20 @@ Arrays.sort(serverNames);
 <%java>
 for (ServerName serverName: serverNames) {
 
-    ServerLoad sl = master.getServerManager().getLoad(serverName);
+    ServerMetrics sl = master.getServerManager().getLoad(serverName);
     if (sl != null) {
+      long memStoreSizeMB = 0;
+      for (RegionMetrics rl : sl.getRegionMetrics().values()) {
+        memStoreSizeMB += rl.getMemStoreSize().get(Size.Unit.MEGABYTE);
+      }
 </%java>
 <tr>
     <td><& serverNameLink; serverName=serverName; serverLoad = sl; &></td>
-    <td><% TraditionalBinaryPrefix.long2String(sl.getUsedHeapMB()
+    <td><% TraditionalBinaryPrefix.long2String((long) sl.getUsedHeapSize().get(Size.Unit.MEGABYTE)
       * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
-    <td><% TraditionalBinaryPrefix.long2String(sl.getMaxHeapMB()
+    <td><% TraditionalBinaryPrefix.long2String((long) sl.getMaxHeapSize().get(Size.Unit.MEGABYTE)
       * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
-    <td><% TraditionalBinaryPrefix.long2String(sl.getMemStoreSizeMB()
+    <td><% TraditionalBinaryPrefix.long2String(memStoreSizeMB
       * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
 
 </tr>
@@ -189,15 +194,23 @@ for (ServerName serverName: serverNames) {
 <%java>
 for (ServerName serverName: serverNames) {
 
-ServerLoad sl = master.getServerManager().getLoad(serverName);
+ServerMetrics sl = master.getServerManager().getLoad(serverName);
 if (sl != null) {
+  long readRequestCount = 0;
+  long writeRequestCount = 0;
+  long filteredReadRequestCount = 0;
+  for (RegionMetrics rl : sl.getRegionMetrics().values()) {
+    readRequestCount += rl.getReadRequestCount();
+    writeRequestCount += rl.getWriteRequestCount();
+    filteredReadRequestCount += rl.getFilteredReadRequestCount();
+  }
 </%java>
 <tr>
 <td><& serverNameLink; serverName=serverName; serverLoad = sl; &></td>
-<td><% String.format("%.0f", sl.getRequestsPerSecond()) %></td>
-<td><% sl.getReadRequestsCount() %></td>
-<td><% sl.getFilteredReadRequestsCount() %></td>
-<td><% sl.getWriteRequestsCount() %></td>
+<td><% sl.getRequestCountPerSecond() %></td>
+<td><% readRequestCount %></td>
+<td><% filteredReadRequestCount %></td>
+<td><% writeRequestCount %></td>
 </tr>
 <%java>
         }  else {
@@ -228,20 +241,34 @@ if (sl != null) {
 <%java>
 for (ServerName serverName: serverNames) {
 
-ServerLoad sl = master.getServerManager().getLoad(serverName);
+ServerMetrics sl = master.getServerManager().getLoad(serverName);
 if (sl != null) {
+  long storeCount = 0;
+  long storeFileCount = 0;
+  long storeUncompressedSizeMB = 0;
+  long storeFileSizeMB = 0;
+  long totalStaticIndexSizeKB = 0;
+  long totalStaticBloomSizeKB = 0;
+  for (RegionMetrics rl : sl.getRegionMetrics().values()) {
+    storeCount += rl.getStoreCount();
+    storeFileCount += rl.getStoreFileCount();
+    storeUncompressedSizeMB += rl.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE);
+    storeFileSizeMB += rl.getStoreFileSize().get(Size.Unit.MEGABYTE);
+    totalStaticIndexSizeKB += rl.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
+    totalStaticBloomSizeKB += rl.getBloomFilterSize().get(Size.Unit.KILOBYTE);
+  }
 </%java>
 <tr>
 <td><& serverNameLink; serverName=serverName; serverLoad = sl; &></td>
-<td><% sl.getStores() %></td>
-<td><% sl.getStorefiles() %></td>
+<td><% storeCount %></td>
+<td><% storeFileCount %></td>
 <td><% TraditionalBinaryPrefix.long2String(
-  sl.getStoreUncompressedSizeMB() * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
-<td><% TraditionalBinaryPrefix.long2String(sl.getStorefileSizeMB()
+  storeUncompressedSizeMB * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
+<td><% TraditionalBinaryPrefix.long2String(storeFileSizeMB
   * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
-<td><% TraditionalBinaryPrefix.long2String(sl.getTotalStaticIndexSizeKB()
+<td><% TraditionalBinaryPrefix.long2String(totalStaticIndexSizeKB
   * TraditionalBinaryPrefix.KILO.value, "B", 1) %></td>
-<td><% TraditionalBinaryPrefix.long2String(sl.getTotalStaticBloomSizeKB()
+<td><% TraditionalBinaryPrefix.long2String(totalStaticBloomSizeKB
   * TraditionalBinaryPrefix.KILO.value, "B", 1) %></td>
 </tr>
 <%java>
@@ -270,19 +297,25 @@ if (sl != null) {
 <%java>
 for (ServerName serverName: serverNames) {
 
-ServerLoad sl = master.getServerManager().getLoad(serverName);
+ServerMetrics sl = master.getServerManager().getLoad(serverName);
 if (sl != null) {
+long totalCompactingCells = 0;
+long totalCompactedCells = 0;
+for (RegionMetrics rl : sl.getRegionMetrics().values()) {
+  totalCompactingCells += rl.getCompactingCellCount();
+  totalCompactedCells += rl.getCompactedCellCount();
+}
 String percentDone = "";
-if  (sl.getTotalCompactingKVs() > 0) {
+if  (totalCompactingCells > 0) {
      percentDone = String.format("%.2f", 100 *
-        ((float) sl.getCurrentCompactedKVs() / sl.getTotalCompactingKVs())) + "%";
+        ((float) totalCompactedCells / totalCompactingCells)) + "%";
 }
 </%java>
 <tr>
 <td><& serverNameLink; serverName=serverName; serverLoad = sl; &></td>
-<td><% sl.getTotalCompactingKVs() %></td>
-<td><% sl.getCurrentCompactedKVs() %></td>
-<td><% sl.getTotalCompactingKVs() - sl.getCurrentCompactedKVs() %></td>
+<td><% totalCompactingCells %></td>
+<td><% totalCompactedCells %></td>
+<td><% totalCompactingCells - totalCompactedCells %></td>
 <td><% percentDone %></td>
 </tr>
 <%java>
@@ -300,7 +333,7 @@ if  (sl.getTotalCompactingKVs() > 0) {
 <%def serverNameLink>
         <%args>
         ServerName serverName;
-        ServerLoad serverLoad;
+        ServerMetrics serverLoad;
         </%args>
         <%java>
         int infoPort = master.getRegionServerInfoPort(serverName);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7f6e971c/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
index 81aa12d..6869390 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
@@ -28,11 +28,10 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.ServerMetrics;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -46,6 +45,7 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
 import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
@@ -132,12 +132,12 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
           }
           //the region is currently on none of the favored nodes
           //get it on one of them if possible
-          ServerLoad l1 = super.services.getServerManager().getLoad(
+          ServerMetrics l1 = super.services.getServerManager().getLoad(
               serverNameWithoutCodeToServerName.get(favoredNodes.get(1)));
-          ServerLoad l2 = super.services.getServerManager().getLoad(
+          ServerMetrics l2 = super.services.getServerManager().getLoad(
               serverNameWithoutCodeToServerName.get(favoredNodes.get(2)));
           if (l1 != null && l2 != null) {
-            if (l1.getLoad() > l2.getLoad()) {
+            if (l1.getRegionMetrics().size() > l2.getRegionMetrics().size()) {
               destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(2));
             } else {
               destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(1));
@@ -296,9 +296,9 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
       // assign the region to the one with a lower load
       // (both have the desired hdfs blocks)
       ServerName s;
-      ServerLoad tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost);
-      ServerLoad secondaryLoad = super.services.getServerManager().getLoad(secondaryHost);
-      if (secondaryLoad.getLoad() < tertiaryLoad.getLoad()) {
+      ServerMetrics tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost);
+      ServerMetrics secondaryLoad = super.services.getServerManager().getLoad(secondaryHost);
+      if (secondaryLoad.getRegionMetrics().size() < tertiaryLoad.getRegionMetrics().size()) {
         s = secondaryHost;
       } else {
         s = tertiaryHost;

http://git-wip-us.apache.org/repos/asf/hbase/blob/7f6e971c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java
index 262c59e..0dd50ff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java
@@ -24,13 +24,10 @@ import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.util.Date;
 import java.util.Map;
-
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
-
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.ServerMetrics;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
@@ -39,6 +36,7 @@ import org.apache.hadoop.hbase.monitoring.StateDumpServlet;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.regionserver.RSDumpServlet;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public class MasterDumpServlet extends StateDumpServlet {
@@ -132,8 +130,8 @@ public class MasterDumpServlet extends StateDumpServlet {
       return;
     }
 
-    Map<ServerName, ServerLoad> servers = sm.getOnlineServers();
-    for (Map.Entry<ServerName, ServerLoad> e : servers.entrySet()) {
+    Map<ServerName, ServerMetrics> servers = sm.getOnlineServers();
+    for (Map.Entry<ServerName, ServerMetrics> e : servers.entrySet()) {
       out.println(e.getKey() + ": " + e.getValue());
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7f6e971c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index b4f0faf..8f92041 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -37,7 +37,8 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.ServerMetrics;
+import org.apache.hadoop.hbase.ServerMetricsBuilder;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.UnknownRegionException;
@@ -102,6 +103,7 @@ import org.slf4j.LoggerFactory;
 import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
 import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
 import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
@@ -451,16 +453,16 @@ public class MasterRpcServices extends RSRpcServices
       master.checkServiceStarted();
       ClusterStatusProtos.ServerLoad sl = request.getLoad();
       ServerName serverName = ProtobufUtil.toServerName(request.getServer());
-      ServerLoad oldLoad = master.getServerManager().getLoad(serverName);
-      ServerLoad newLoad = new ServerLoad(serverName, sl);
+      ServerMetrics oldLoad = master.getServerManager().getLoad(serverName);
+      ServerMetrics newLoad = ServerMetricsBuilder.toServerMetrics(serverName, sl);
       master.getServerManager().regionServerReport(serverName, newLoad);
       int version = VersionInfoUtil.getCurrentClientVersionNumber();
       master.getAssignmentManager().reportOnlineRegions(serverName,
-        version, newLoad.getRegionsLoad().keySet());
+        version, newLoad.getRegionMetrics().keySet());
       if (sl != null && master.metricsMaster != null) {
         // Up our metrics.
         master.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests()
-            - (oldLoad != null ? oldLoad.getTotalNumberOfRequests() : 0));
+            - (oldLoad != null ? oldLoad.getRequestCount() : 0));
       }
     } catch (IOException ioe) {
       throw new ServiceException(ioe);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7f6e971c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index dbcce1d..06d6c8b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -37,13 +37,12 @@ import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.function.Predicate;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClockOutOfSyncException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.NotServingRegionException;
-import org.apache.hadoop.hbase.RegionLoad;
-import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.RegionMetrics;
+import org.apache.hadoop.hbase.ServerMetrics;
 import org.apache.hadoop.hbase.ServerMetricsBuilder;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.YouAreDeadException;
@@ -62,8 +61,10 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
@@ -124,7 +125,8 @@ public class ServerManager {
     storeFlushedSequenceIdsByRegion = new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR);
 
   /** Map of registered servers to their current load */
-  private final ConcurrentNavigableMap<ServerName, ServerLoad> onlineServers = new ConcurrentSkipListMap<>();
+  private final ConcurrentNavigableMap<ServerName, ServerMetrics> onlineServers =
+    new ConcurrentSkipListMap<>();
 
   /**
    * Map of admin interfaces per registered regionserver; these interfaces we use to control
@@ -240,7 +242,7 @@ public class ServerManager {
       request.getServerStartCode());
     checkClockSkew(sn, request.getServerCurrentTime());
     checkIsDead(sn, "STARTUP");
-    if (!checkAndRecordNewServer(sn, new ServerLoad(ServerMetricsBuilder.of(sn)))) {
+    if (!checkAndRecordNewServer(sn, ServerMetricsBuilder.of(sn))) {
       LOG.warn("THIS SHOULD NOT HAPPEN, RegionServerStartup"
         + " could not record the server: " + sn);
     }
@@ -252,12 +254,11 @@ public class ServerManager {
    * @param sn
    * @param hsl
    */
-  private void updateLastFlushedSequenceIds(ServerName sn, ServerLoad hsl) {
-    Map<byte[], RegionLoad> regionsLoad = hsl.getRegionsLoad();
-    for (Entry<byte[], RegionLoad> entry : regionsLoad.entrySet()) {
+  private void updateLastFlushedSequenceIds(ServerName sn, ServerMetrics hsl) {
+    for (Entry<byte[], RegionMetrics> entry : hsl.getRegionMetrics().entrySet()) {
       byte[] encodedRegionName = Bytes.toBytes(RegionInfo.encodeRegionName(entry.getKey()));
       Long existingValue = flushedSequenceIdByRegion.get(encodedRegionName);
-      long l = entry.getValue().getCompleteSequenceId();
+      long l = entry.getValue().getCompletedSequenceId();
       // Don't let smaller sequence ids override greater sequence ids.
       if (LOG.isTraceEnabled()) {
         LOG.trace(Bytes.toString(encodedRegionName) + ", existingValue=" + existingValue +
@@ -273,10 +274,10 @@ public class ServerManager {
       ConcurrentNavigableMap<byte[], Long> storeFlushedSequenceId =
           computeIfAbsent(storeFlushedSequenceIdsByRegion, encodedRegionName,
             () -> new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR));
-      for (StoreSequenceId storeSeqId : entry.getValue().getStoreCompleteSequenceId()) {
-        byte[] family = storeSeqId.getFamilyName().toByteArray();
+      for (Entry<byte[], Long> storeSeqId : entry.getValue().getStoreSequenceId().entrySet()) {
+        byte[] family = storeSeqId.getKey();
         existingValue = storeFlushedSequenceId.get(family);
-        l = storeSeqId.getSequenceId();
+        l = storeSeqId.getValue();
         if (LOG.isTraceEnabled()) {
           LOG.trace(Bytes.toString(encodedRegionName) + ", family=" + Bytes.toString(family) +
             ", existingValue=" + existingValue + ", completeSequenceId=" + l);
@@ -291,7 +292,7 @@ public class ServerManager {
 
   @VisibleForTesting
   public void regionServerReport(ServerName sn,
-      ServerLoad sl) throws YouAreDeadException {
+    ServerMetrics sl) throws YouAreDeadException {
     checkIsDead(sn, "REPORT");
     if (null == this.onlineServers.replace(sn, sl)) {
       // Already have this host+port combo and its just different start code?
@@ -316,7 +317,7 @@ public class ServerManager {
    * @param sl the server load on the server
    * @return true if the server is recorded, otherwise, false
    */
-  boolean checkAndRecordNewServer(final ServerName serverName, final ServerLoad sl) {
+  boolean checkAndRecordNewServer(final ServerName serverName, final ServerMetrics sl) {
     ServerName existingServer = null;
     synchronized (this.onlineServers) {
       existingServer = findServerWithSameHostnamePortWithLock(serverName);
@@ -423,7 +424,7 @@ public class ServerManager {
    * @param serverName The remote servers name.
    */
   @VisibleForTesting
-  void recordNewServerWithLock(final ServerName serverName, final ServerLoad sl) {
+  void recordNewServerWithLock(final ServerName serverName, final ServerMetrics sl) {
     LOG.info("Registering regionserver=" + serverName);
     this.onlineServers.put(serverName, sl);
     this.rsAdmins.remove(serverName);
@@ -447,9 +448,9 @@ public class ServerManager {
 
   /**
    * @param serverName
-   * @return ServerLoad if serverName is known else null
+   * @return ServerMetrics if serverName is known else null
    */
-  public ServerLoad getLoad(final ServerName serverName) {
+  public ServerMetrics getLoad(final ServerName serverName) {
     return this.onlineServers.get(serverName);
   }
 
@@ -462,9 +463,9 @@ public class ServerManager {
   public double getAverageLoad() {
     int totalLoad = 0;
     int numServers = 0;
-    for (ServerLoad sl: this.onlineServers.values()) {
-        numServers++;
-        totalLoad += sl.getNumberOfRegions();
+    for (ServerMetrics sl : this.onlineServers.values()) {
+      numServers++;
+      totalLoad += sl.getRegionMetrics().size();
     }
     return numServers == 0 ? 0 :
       (double)totalLoad / (double)numServers;
@@ -479,7 +480,7 @@ public class ServerManager {
   /**
    * @return Read-only map of servers to serverinfo
    */
-  public Map<ServerName, ServerLoad> getOnlineServers() {
+  public Map<ServerName, ServerMetrics> getOnlineServers() {
     // Presumption is that iterating the returned Map is OK.
     synchronized (this.onlineServers) {
       return Collections.unmodifiableMap(this.onlineServers);
@@ -907,11 +908,11 @@ public class ServerManager {
    * @return A copy of the internal list of online servers matched by the predicator
    */
   public List<ServerName> getOnlineServersListWithPredicator(List<ServerName> keys,
-    Predicate<ServerLoad> idleServerPredicator) {
+    Predicate<ServerMetrics> idleServerPredicator) {
     List<ServerName> names = new ArrayList<>();
     if (keys != null && idleServerPredicator != null) {
       keys.forEach(name -> {
-        ServerLoad load = onlineServers.get(name);
+        ServerMetrics load = onlineServers.get(name);
         if (load != null) {
           if (idleServerPredicator.test(load)) {
             names.add(name);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7f6e971c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index a8dd9ae..36f57f2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -1,4 +1,5 @@
- /*
+/**
+ *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -33,7 +34,6 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.function.Predicate;
 import java.util.stream.Collectors;
-
 import org.apache.commons.lang3.NotImplementedException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterMetrics;
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
-import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.ServerMetrics;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -73,8 +73,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
 
   private static final List<RegionInfo> EMPTY_REGION_LIST = new ArrayList<>(0);
 
-  static final Predicate<ServerLoad> IDLE_SERVER_PREDICATOR
-    = load -> load.getNumberOfRegions() == 0;
+  static final Predicate<ServerMetrics> IDLE_SERVER_PREDICATOR
+    = load -> load.getRegionMetrics().isEmpty();
 
   protected RegionLocationFinder regionFinder;
   protected boolean useRegionFinder;

http://git-wip-us.apache.org/repos/asf/hbase/blob/7f6e971c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java
index a72478c..b652610 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java
@@ -31,9 +31,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-
 import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.ServerMetrics;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper;
@@ -47,6 +46,7 @@ import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.util.Pair;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
 import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
@@ -274,10 +274,10 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements
 
       // Assign the region to the one with a lower load (both have the desired hdfs blocks)
       ServerName s;
-      ServerLoad tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost);
-      ServerLoad secondaryLoad = super.services.getServerManager().getLoad(secondaryHost);
+      ServerMetrics tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost);
+      ServerMetrics secondaryLoad = super.services.getServerManager().getLoad(secondaryHost);
       if (secondaryLoad != null && tertiaryLoad != null) {
-        if (secondaryLoad.getLoad() < tertiaryLoad.getLoad()) {
+        if (secondaryLoad.getRegionMetrics().size() < tertiaryLoad.getRegionMetrics().size()) {
           s = secondaryHost;
         } else {
           s = tertiaryHost;

http://git-wip-us.apache.org/repos/asf/hbase/blob/7f6e971c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
index 4c3167f..7e1dd4d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
@@ -22,10 +22,10 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
-
 import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.RegionMetrics;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.Size;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 
 /**
@@ -44,7 +45,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
  *
  *  <ol>
  *  <li> Get all regions of a given table
- *  <li> Get avg size S of each region (by total size of store files reported in RegionLoad)
+ *  <li> Get avg size S of each region (by total size of store files reported in RegionMetrics)
  *  <li> Seek every single region one by one. If a region R0 is bigger than S * 2, it is
  *  kindly requested to split. Thereon evaluate the next region R1
  *  <li> Otherwise, if R0 + R1 is smaller than S, R0 and R1 are kindly requested to merge.
@@ -204,12 +205,12 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
   private long getRegionSize(RegionInfo hri) {
     ServerName sn = masterServices.getAssignmentManager().getRegionStates().
       getRegionServerOfRegion(hri);
-    RegionLoad regionLoad = masterServices.getServerManager().getLoad(sn).
-      getRegionsLoad().get(hri.getRegionName());
+    RegionMetrics regionLoad = masterServices.getServerManager().getLoad(sn).
+      getRegionMetrics().get(hri.getRegionName());
     if (regionLoad == null) {
       LOG.debug(hri.getRegionNameAsString() + " was not found in RegionsLoad");
       return -1;
     }
-    return regionLoad.getStorefileSizeMB();
+    return (long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7f6e971c/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp b/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp
index 9f95b76..7b7e227 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp
@@ -28,7 +28,6 @@
   import="java.util.stream.Collectors"
   import="org.apache.hadoop.hbase.HTableDescriptor"
   import="org.apache.hadoop.hbase.RSGroupTableAccessor"
-  import="org.apache.hadoop.hbase.ServerLoad"
   import="org.apache.hadoop.hbase.ServerName"
   import="org.apache.hadoop.hbase.TableName"
   import="org.apache.hadoop.hbase.client.Admin"
@@ -42,6 +41,9 @@
   import="org.apache.hadoop.hbase.util.Bytes"
   import="org.apache.hadoop.hbase.util.VersionInfo"
   import="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"%>
+<%@ page import="org.apache.hadoop.hbase.ServerMetrics" %>
+<%@ page import="org.apache.hadoop.hbase.Size" %>
+<%@ page import="org.apache.hadoop.hbase.RegionMetrics" %>
 <%
   HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
   String rsGroupName = request.getParameter("name");
@@ -67,7 +69,7 @@
       return 0;
     });
 
-  Map<Address, ServerLoad> onlineServers = Collections.emptyMap();
+  Map<Address, ServerMetrics> onlineServers = Collections.emptyMap();
   Map<Address, ServerName> serverMaping = Collections.emptyMap();
   if (master.getServerManager() != null) {
     onlineServers = master.getServerManager().getOnlineServers().entrySet().stream()
@@ -141,7 +143,7 @@
                for (Address server: rsGroupServers) {
                  ServerName serverName = serverMaping.get(server);
                  if (serverName != null) {
-                   ServerLoad sl = onlineServers.get(server);
+                   ServerMetrics sl = onlineServers.get(server);
                    String version = master.getRegionServerVersion(serverName);
                    if (!masterVersion.equals(version)) {
                      inconsistentNodeNum ++;
@@ -150,11 +152,11 @@
                    int numRegionsOnline = 0;
                    long lastContact = 0;
                    if (sl != null) {
-                     requestsPerSecond = sl.getRequestsPerSecond();
-                     numRegionsOnline = sl.getNumberOfRegions();
-                     totalRegions += sl.getNumberOfRegions();
-                     totalRequests += sl.getNumberOfRequests();
-                     lastContact = (System.currentTimeMillis() - sl.getReportTime())/1000;
+                     requestsPerSecond = sl.getRequestCountPerSecond();
+                     numRegionsOnline = sl.getRegionMetrics().size();
+                     totalRegions += sl.getRegionMetrics().size();
+                     totalRequests += sl.getRequestCount();
+                     lastContact = (System.currentTimeMillis() - sl.getReportTimestamp())/1000;
                    }
                    long startcode = serverName.getStartcode();
                    int infoPort = master.getRegionServerInfoPort(serverName);
@@ -201,18 +203,21 @@
             </tr>
             <% for (Address server: rsGroupServers) {
                  ServerName serverName = serverMaping.get(server);
-                 ServerLoad sl = onlineServers.get(server);
+                 ServerMetrics sl = onlineServers.get(server);
                  if (sl != null && serverName != null) {
+                   double memStoreSizeMB = sl.getRegionMetrics().values()
+                           .stream().mapToDouble(rm -> rm.getMemStoreSize().get(Size.Unit.MEGABYTE))
+                           .sum();
                    int infoPort = master.getRegionServerInfoPort(serverName);
                    String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status";
             %>
                    <tr>
                      <td><a href="<%= url %>"><%= serverName.getServerName() %></a></td>
-                     <td><%= TraditionalBinaryPrefix.long2String(sl.getUsedHeapMB()
+                     <td><%= TraditionalBinaryPrefix.long2String((long) sl.getUsedHeapSize().get(Size.Unit.MEGABYTE)
                        * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
-                     <td><%= TraditionalBinaryPrefix.long2String(sl.getMaxHeapMB()
+                     <td><%= TraditionalBinaryPrefix.long2String((long) sl.getMaxHeapSize().get(Size.Unit.MEGABYTE)
                        * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
-                     <td><%= TraditionalBinaryPrefix.long2String(sl.getMemstoreSizeInMB()
+                     <td><%= TraditionalBinaryPrefix.long2String((long) memStoreSizeMB
                        * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
                    </tr>
               <% } else { %>
@@ -236,16 +241,22 @@
             </tr>
             <% for (Address server: rsGroupServers) {
                  ServerName serverName = serverMaping.get(server);
-                 ServerLoad sl = onlineServers.get(server);
+                 ServerMetrics sl = onlineServers.get(server);
                  if (sl != null && serverName != null) {
                    int infoPort = master.getRegionServerInfoPort(serverName);
+                   long readRequestCount = 0;
+                   long writeRequestCount = 0;
+                   for (RegionMetrics rm : sl.getRegionMetrics().values()) {
+                     readRequestCount += rm.getReadRequestCount();
+                     writeRequestCount += rm.getWriteRequestCount();
+                   }
                    String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status";
             %>
                    <tr>
                      <td><a href="<%= url %>"><%= serverName.getServerName() %></a></td>
-                     <td><%= String.format("%.0f", sl.getRequestsPerSecond()) %></td>
-                     <td><%= sl.getReadRequestsCount() %></td>
-                     <td><%= sl.getWriteRequestsCount() %></td>
+                     <td><%= String.format("%.0f", sl.getRequestCountPerSecond()) %></td>
+                     <td><%= readRequestCount %></td>
+                     <td><%= writeRequestCount %></td>
                    </tr>
               <% } else { %>
                    <tr>
@@ -271,22 +282,36 @@
             </tr>
             <%  for (Address server: rsGroupServers) {
                   ServerName serverName = serverMaping.get(server);
-                  ServerLoad sl = onlineServers.get(server);
+                  ServerMetrics sl = onlineServers.get(server);
                   if (sl != null && serverName != null) {
+                    long storeCount = 0;
+                    long storeFileCount = 0;
+                    double storeUncompressedSizeMB = 0;
+                    double storeFileSizeMB = 0;
+                    double totalStaticIndexSizeKB = 0;
+                    double totalStaticBloomSizeKB = 0;
+                    for (RegionMetrics rm : sl.getRegionMetrics().values()) {
+                      storeCount += rm.getStoreCount();
+                      storeFileCount += rm.getStoreFileCount();
+                      storeUncompressedSizeMB += rm.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE);
+                      storeFileSizeMB += rm.getStoreFileSize().get(Size.Unit.MEGABYTE);
+                      totalStaticIndexSizeKB += rm.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
+                      totalStaticBloomSizeKB += rm.getBloomFilterSize().get(Size.Unit.KILOBYTE);
+                    }
                     int infoPort = master.getRegionServerInfoPort(serverName);
                     String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status";
             %>
                     <tr>
                       <td><a href="<%= url %>"><%= serverName.getServerName() %></a></td>
-                      <td><%= sl.getStores() %></td>
-                      <td><%= sl.getStorefiles() %></td>
+                      <td><%= storeCount %></td>
+                      <td><%= storeFileCount %></td>
                       <td><%= TraditionalBinaryPrefix.long2String(
-                          sl.getStoreUncompressedSizeMB() * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
-                      <td><%= TraditionalBinaryPrefix.long2String(sl.getStorefileSizeInMB()
+                          (long) storeUncompressedSizeMB * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
+                      <td><%= TraditionalBinaryPrefix.long2String((long) storeFileSizeMB
                           * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
-                      <td><%= TraditionalBinaryPrefix.long2String(sl.getTotalStaticIndexSizeKB()
+                      <td><%= TraditionalBinaryPrefix.long2String((long) totalStaticIndexSizeKB
                           * TraditionalBinaryPrefix.KILO.value, "B", 1) %></td>
-                      <td><%= TraditionalBinaryPrefix.long2String(sl.getTotalStaticBloomSizeKB()
+                      <td><%= TraditionalBinaryPrefix.long2String((long) totalStaticBloomSizeKB
                           * TraditionalBinaryPrefix.KILO.value, "B", 1) %></td>
                     </tr>
                <% } else { %>
@@ -314,21 +339,27 @@
             </tr>
             <%  for (Address server: rsGroupServers) {
                   ServerName serverName = serverMaping.get(server);
-                  ServerLoad sl = onlineServers.get(server);
+                  ServerMetrics sl = onlineServers.get(server);
                   if (sl != null && serverName != null) {
+                    long totalCompactingCells = 0;
+                    long currentCompactedCells = 0;
+                    for (RegionMetrics rm : sl.getRegionMetrics().values()) {
+                      totalCompactingCells += rm.getCompactingCellCount();
+                      currentCompactedCells += rm.getCompactedCellCount();
+                    }
                     String percentDone = "";
-                    if  (sl.getTotalCompactingKVs() > 0) {
+                    if  (totalCompactingCells > 0) {
                          percentDone = String.format("%.2f", 100 *
-                            ((float) sl.getCurrentCompactedKVs() / sl.getTotalCompactingKVs())) + "%";
+                            ((float) currentCompactedCells / totalCompactingCells)) + "%";
                     }
                     int infoPort = master.getRegionServerInfoPort(serverName);
                     String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status";
             %>
                     <tr>
                       <td><a href="<%= url %>"><%= serverName.getServerName() %></a></td>
-                      <td><%= sl.getTotalCompactingKVs() %></td>
-                      <td><%= sl.getCurrentCompactedKVs() %></td>
-                      <td><%= sl.getTotalCompactingKVs() - sl.getCurrentCompactedKVs() %></td>
+                      <td><%= totalCompactingCells %></td>
+                      <td><%= currentCompactedCells %></td>
+                      <td><%= totalCompactingCells - currentCompactedCells %></td>
                       <td><%= percentDone %></td>
                     </tr>
                <% } else { %>

http://git-wip-us.apache.org/repos/asf/hbase/blob/7f6e971c/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 9252552..e52f33a 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -34,8 +34,6 @@
   import="org.apache.hadoop.hbase.HColumnDescriptor"
   import="org.apache.hadoop.hbase.HConstants"
   import="org.apache.hadoop.hbase.HRegionLocation"
-  import="org.apache.hadoop.hbase.RegionLoad"
-  import="org.apache.hadoop.hbase.ServerLoad"
   import="org.apache.hadoop.hbase.ServerName"
   import="org.apache.hadoop.hbase.TableName"
   import="org.apache.hadoop.hbase.TableNotFoundException"
@@ -60,16 +58,20 @@
 <%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos" %>
 <%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas" %>
 <%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota" %>
+<%@ page import="org.apache.hadoop.hbase.ServerMetrics" %>
+<%@ page import="org.apache.hadoop.hbase.RegionMetrics" %>
+<%@ page import="org.apache.hadoop.hbase.Size" %>
+<%@ page import="org.apache.hadoop.hbase.RegionMetricsBuilder" %>
 <%!
   /**
    * @return An empty region load stamped with the passed in <code>regionInfo</code>
    * region name.
    */
-  private RegionLoad getEmptyRegionLoad(final RegionInfo regionInfo) {
-    return new RegionLoad(ClusterStatusProtos.RegionLoad.newBuilder().
-      setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder().
-      setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME).
-      setValue(ByteString.copyFrom(regionInfo.getRegionName())).build()).build());
+  private RegionMetrics getEmptyRegionMetrics(final RegionInfo regionInfo) {
+    return RegionMetricsBuilder.toRegionMetrics(ClusterStatusProtos.RegionLoad.newBuilder().
+            setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder().
+                    setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME).
+                    setValue(ByteString.copyFrom(regionInfo.getRegionName())).build()).build());
   }
 %>
 <%
@@ -87,7 +89,6 @@
   Table table;
   String tableHeader;
   boolean withReplica = false;
-  ServerName rl = metaTableLocator.getMetaRegionLocation(master.getZooKeeper());
   boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
   boolean readOnly = conf.getBoolean("hbase.master.ui.readonly", false);
   int numMetaReplicas = conf.getInt(HConstants.META_REPLICAS_NUM,
@@ -216,18 +217,18 @@ if ( fqtn != null ) {
       float locality = 0.0f;
 
       if (metaLocation != null) {
-        ServerLoad sl = master.getServerManager().getLoad(metaLocation);
+        ServerMetrics sl = master.getServerManager().getLoad(metaLocation);
         // The host name portion should be safe, but I don't know how we handle IDNs so err on the side of failing safely.
         hostAndPort = URLEncoder.encode(metaLocation.getHostname()) + ":" + master.getRegionServerInfoPort(metaLocation);
         if (sl != null) {
-          Map<byte[], RegionLoad> map = sl.getRegionsLoad();
+          Map<byte[], RegionMetrics> map = sl.getRegionMetrics();
           if (map.containsKey(meta.getRegionName())) {
-            RegionLoad load = map.get(meta.getRegionName());
-            readReq = String.format("%,1d", load.getReadRequestsCount());
-            writeReq = String.format("%,1d", load.getWriteRequestsCount());
-            fileSize = StringUtils.byteDesc(load.getStorefileSizeMB()*1024l*1024);
-            fileCount = String.format("%,1d", load.getStorefiles());
-            memSize = StringUtils.byteDesc(load.getMemStoreSizeMB()*1024l*1024);
+            RegionMetrics load = map.get(meta.getRegionName());
+            readReq = String.format("%,1d", load.getReadRequestCount());
+            writeReq = String.format("%,1d", load.getWriteRequestCount());
+            fileSize = StringUtils.byteDesc((long) load.getStoreFileSize().get(Size.Unit.BYTE));
+            fileCount = String.format("%,1d", load.getStoreFileCount());
+            memSize = StringUtils.byteDesc((long) load.getMemStoreSize().get(Size.Unit.BYTE));
             locality = load.getDataLocality();
           }
         }
@@ -400,7 +401,7 @@ if ( fqtn != null ) {
   Map<ServerName, Integer> regDistribution = new TreeMap<>();
   Map<ServerName, Integer> primaryRegDistribution = new TreeMap<>();
   List<HRegionLocation> regions = r.getAllRegionLocations();
-  Map<RegionInfo, RegionLoad> regionsToLoad = new LinkedHashMap<>();
+  Map<RegionInfo, RegionMetrics> regionsToLoad = new LinkedHashMap<>();
   Map<RegionInfo, ServerName> regionsToServer = new LinkedHashMap<>();
   for (HRegionLocation hriEntry : regions) {
     RegionInfo regionInfo = hriEntry.getRegionInfo();
@@ -408,28 +409,27 @@ if ( fqtn != null ) {
     regionsToServer.put(regionInfo, addr);
 
     if (addr != null) {
-      ServerLoad sl = master.getServerManager().getLoad(addr);
+      ServerMetrics sl = master.getServerManager().getLoad(addr);
       if (sl != null) {
-        Map<byte[], RegionLoad> map = sl.getRegionsLoad();
-        RegionLoad regionload = map.get(regionInfo.getRegionName());
-        regionsToLoad.put(regionInfo, regionload);
-        if(regionload != null) {
-          totalReadReq += regionload.getReadRequestsCount();
-          totalWriteReq += regionload.getWriteRequestsCount();
-          totalSize += regionload.getStorefileSizeMB();
-          totalStoreFileCount += regionload.getStorefiles();
-          totalMemSize += regionload.getMemStoreSizeMB();
-          totalStoreFileSizeMB += regionload.getStorefileSizeMB();
+        RegionMetrics regionMetrics = sl.getRegionMetrics().get(regionInfo.getRegionName());
+        regionsToLoad.put(regionInfo, regionMetrics);
+        if(regionMetrics != null) {
+          totalReadReq += regionMetrics.getReadRequestCount();
+          totalWriteReq += regionMetrics.getWriteRequestCount();
+          totalSize += regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE);
+          totalStoreFileCount += regionMetrics.getStoreFileCount();
+          totalMemSize += regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE);
+          totalStoreFileSizeMB += regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE);
         } else {
-          RegionLoad load0 = getEmptyRegionLoad(regionInfo);
+          RegionMetrics load0 = getEmptyRegionMetrics(regionInfo);
           regionsToLoad.put(regionInfo, load0);
         }
       } else{
-        RegionLoad load0 = getEmptyRegionLoad(regionInfo);
+        RegionMetrics load0 = getEmptyRegionMetrics(regionInfo);
         regionsToLoad.put(regionInfo, load0);
       }
     } else {
-      RegionLoad load0 = getEmptyRegionLoad(regionInfo);
+      RegionMetrics load0 = getEmptyRegionMetrics(regionInfo);
       regionsToLoad.put(regionInfo, load0);
     }
   }
@@ -474,156 +474,92 @@ ShowDetailName&Start/End Key<input type="checkbox" id="showWhole" style="margin-
 </tr>
 
 <%
-  List<Map.Entry<RegionInfo, RegionLoad>> entryList = new ArrayList<>(regionsToLoad.entrySet());
+  List<Map.Entry<RegionInfo, RegionMetrics>> entryList = new ArrayList<>(regionsToLoad.entrySet());
   if(sortKey != null) {
     if (sortKey.equals("readrequest")) {
-      Collections.sort(entryList,
-          new Comparator<Map.Entry<RegionInfo, RegionLoad>>() {
-            public int compare(
-                Map.Entry<RegionInfo, RegionLoad> entry1,
-                Map.Entry<RegionInfo, RegionLoad> entry2) {
-              if (entry1 == null || entry1.getValue() == null) {
-                return -1;
-              } else if (entry2 == null || entry2.getValue() == null) {
-                return 1;
-              }
-              int result = 0;
-              if (entry1.getValue().getReadRequestsCount() < entry2.getValue().getReadRequestsCount()) {
-                result = -1;
-              } else if (entry1.getValue().getReadRequestsCount() > entry2.getValue().getReadRequestsCount()) {
-                result = 1;
-              }
-              if (reverseOrder) {
-                result = -1 * result;
-              }
-              return result;
-            }
-          });
+      Collections.sort(entryList, (entry1, entry2) -> {
+        if (entry1 == null || entry1.getValue() == null) {
+          return -1;
+        } else if (entry2 == null || entry2.getValue() == null) {
+          return 1;
+        }
+        int result = Long.compare(entry1.getValue().getReadRequestCount(),
+                entry2.getValue().getReadRequestCount());
+        if (reverseOrder) {
+          result = -1 * result;
+        }
+        return result;
+      });
     } else if (sortKey.equals("writerequest")) {
-      Collections.sort(entryList,
-          new Comparator<Map.Entry<RegionInfo, RegionLoad>>() {
-            public int compare(
-                Map.Entry<RegionInfo, RegionLoad> entry1,
-                Map.Entry<RegionInfo, RegionLoad> entry2) {
-              if (entry1 == null || entry1.getValue() == null) {
-                return -1;
-              } else if (entry2 == null || entry2.getValue() == null) {
-                return 1;
-              }
-              int result = 0;
-              if (entry1.getValue().getWriteRequestsCount() < entry2.getValue()
-                  .getWriteRequestsCount()) {
-                result = -1;
-              } else if (entry1.getValue().getWriteRequestsCount() > entry2.getValue()
-                  .getWriteRequestsCount()) {
-                result = 1;
-              }
-              if (reverseOrder) {
-                result = -1 * result;
-              }
-              return result;
-            }
-          });
+      Collections.sort(entryList, (entry1, entry2) -> {
+        if (entry1 == null || entry1.getValue() == null) {
+          return -1;
+        } else if (entry2 == null || entry2.getValue() == null) {
+          return 1;
+        }
+        int result = Long.compare(entry1.getValue().getWriteRequestCount(),
+                entry2.getValue().getWriteRequestCount());
+        if (reverseOrder) {
+          result = -1 * result;
+        }
+        return result;
+      });
     } else if (sortKey.equals("size")) {
-      Collections.sort(entryList,
-          new Comparator<Map.Entry<RegionInfo, RegionLoad>>() {
-            public int compare(
-                Map.Entry<RegionInfo, RegionLoad> entry1,
-                Map.Entry<RegionInfo, RegionLoad> entry2) {
-              if (entry1 == null || entry1.getValue() == null) {
-                return -1;
-              } else if (entry2 == null || entry2.getValue() == null) {
-                return 1;
-              }
-              int result = 0;
-              if (entry1.getValue().getStorefileSizeMB() < entry2.getValue()
-                  .getStorefileSizeMB()) {
-                result = -1;
-              } else if (entry1.getValue().getStorefileSizeMB() > entry2
-                  .getValue().getStorefileSizeMB()) {
-                result = 1;
-              }
-              if (reverseOrder) {
-                result = -1 * result;
-              }
-              return result;
-            }
-          });
+      Collections.sort(entryList, (entry1, entry2) -> {
+        if (entry1 == null || entry1.getValue() == null) {
+          return -1;
+        } else if (entry2 == null || entry2.getValue() == null) {
+          return 1;
+        }
+        int result = Double.compare(entry1.getValue().getStoreFileSize().get(),
+                entry2.getValue().getStoreFileSize().get());
+        if (reverseOrder) {
+          result = -1 * result;
+        }
+        return result;
+      });
     } else if (sortKey.equals("filecount")) {
-      Collections.sort(entryList,
-          new Comparator<Map.Entry<RegionInfo, RegionLoad>>() {
-            public int compare(
-                Map.Entry<RegionInfo, RegionLoad> entry1,
-                Map.Entry<RegionInfo, RegionLoad> entry2) {
-              if (entry1 == null || entry1.getValue() == null) {
-                return -1;
-              } else if (entry2 == null || entry2.getValue() == null) {
-                return 1;
-              }
-              int result = 0;
-              if (entry1.getValue().getStorefiles() < entry2.getValue()
-                  .getStorefiles()) {
-                result = -1;
-              } else if (entry1.getValue().getStorefiles() > entry2.getValue()
-                  .getStorefiles()) {
-                result = 1;
-              }
-              if (reverseOrder) {
-                result = -1 * result;
-              }
-              return result;
-            }
-          });
+      Collections.sort(entryList, (entry1, entry2) -> {
+        if (entry1 == null || entry1.getValue() == null) {
+          return -1;
+        } else if (entry2 == null || entry2.getValue() == null) {
+          return 1;
+        }
+        int result = Integer.compare(entry1.getValue().getStoreCount(),
+                entry2.getValue().getStoreCount());
+        if (reverseOrder) {
+          result = -1 * result;
+        }
+        return result;
+      });
     } else if (sortKey.equals("memstore")) {
-      Collections.sort(entryList,
-          new Comparator<Map.Entry<RegionInfo, RegionLoad>>() {
-            public int compare(
-                Map.Entry<RegionInfo, RegionLoad> entry1,
-                Map.Entry<RegionInfo, RegionLoad> entry2) {
-              if (entry1 == null || entry1.getValue()==null) {
-                return -1;
-              } else if (entry2 == null || entry2.getValue()==null) {
-                return 1;
-              }
-              int result = 0;
-              if (entry1.getValue().getMemStoreSizeMB() < entry2.getValue()
-                  .getMemStoreSizeMB()) {
-                result = -1;
-              } else if (entry1.getValue().getMemStoreSizeMB() > entry2
-                  .getValue().getMemStoreSizeMB()) {
-                result = 1;
-              }
-              if (reverseOrder) {
-                result = -1 * result;
-              }
-              return result;
-            }
-          });
+      Collections.sort(entryList, (entry1, entry2) -> {
+        if (entry1 == null || entry1.getValue() == null) {
+          return -1;
+        } else if (entry2 == null || entry2.getValue() == null) {
+          return 1;
+        }
+        int result = Double.compare(entry1.getValue().getMemStoreSize().get(),
+                entry2.getValue().getMemStoreSize().get());
+        if (reverseOrder) {
+          result = -1 * result;
+        }
+        return result;
+      });
     } else if (sortKey.equals("locality")) {
-      Collections.sort(entryList,
-          new Comparator<Map.Entry<RegionInfo, RegionLoad>>() {
-            public int compare(
-                Map.Entry<RegionInfo, RegionLoad> entry1,
-                Map.Entry<RegionInfo, RegionLoad> entry2) {
-              if (entry1 == null || entry1.getValue()==null) {
-                return -1;
-              } else if (entry2 == null || entry2.getValue()==null) {
-                return 1;
-              }
-              int result = 0;
-              if (entry1.getValue().getDataLocality() < entry2.getValue()
-                  .getDataLocality()) {
-                result = -1;
-              } else if (entry1.getValue().getDataLocality() > entry2
-                  .getValue().getDataLocality()) {
-                result = 1;
-              }
-              if (reverseOrder) {
-                result = -1 * result;
-              }
-              return result;
-            }
-          });
+      Collections.sort(entryList, (entry1, entry2) -> {
+        if (entry1 == null || entry1.getValue() == null) {
+          return -1;
+        } else if (entry2 == null || entry2.getValue() == null) {
+          return 1;
+        }
+        int result = Double.compare(entry1.getValue().getDataLocality(),
+                entry2.getValue().getDataLocality());
+        if (reverseOrder) {
+          result = -1 * result;
+        }
+        return result;
+      });
     }
   }
   numRegions = regions.size();
@@ -632,10 +568,10 @@ ShowDetailName&Start/End Key<input type="checkbox" id="showWhole" style="margin-
   if (numRegionsToRender < 0) {
     numRegionsToRender = numRegions;
   }
-  for (Map.Entry<RegionInfo, RegionLoad> hriEntry : entryList) {
+  for (Map.Entry<RegionInfo, RegionMetrics> hriEntry : entryList) {
     RegionInfo regionInfo = hriEntry.getKey();
     ServerName addr = regionsToServer.get(regionInfo);
-    RegionLoad load = hriEntry.getValue();
+    RegionMetrics load = hriEntry.getValue();
     String readReq = "N/A";
     String writeReq = "N/A";
     String regionSize = "N/A";
@@ -644,11 +580,11 @@ ShowDetailName&Start/End Key<input type="checkbox" id="showWhole" style="margin-
     float locality = 0.0f;
     String state = "N/A";
     if(load != null) {
-      readReq = String.format("%,1d", load.getReadRequestsCount());
-      writeReq = String.format("%,1d", load.getWriteRequestsCount());
-      regionSize = StringUtils.byteDesc(load.getStorefileSizeMB()*1024l*1024);
-      fileCount = String.format("%,1d", load.getStorefiles());
-      memSize = StringUtils.byteDesc(load.getMemStoreSizeMB()*1024l*1024);
+      readReq = String.format("%,1d", load.getReadRequestCount());
+      writeReq = String.format("%,1d", load.getWriteRequestCount());
+      regionSize = StringUtils.byteDesc((long) load.getStoreFileSize().get(Size.Unit.BYTE));
+      fileCount = String.format("%,1d", load.getStoreFileCount());
+      memSize = StringUtils.byteDesc((long) load.getMemStoreSize().get(Size.Unit.BYTE));
       locality = load.getDataLocality();
     }
 
@@ -657,7 +593,7 @@ ShowDetailName&Start/End Key<input type="checkbox" id="showWhole" style="margin-
     }
 
     if (addr != null) {
-      ServerLoad sl = master.getServerManager().getLoad(addr);
+      ServerMetrics sl = master.getServerManager().getLoad(addr);
       // This port might be wrong if RS actually ended up using something else.
       urlRegionServer =
           "//" + URLEncoder.encode(addr.getHostname()) + ":" + master.getRegionServerInfoPort(addr) + "/";

http://git-wip-us.apache.org/repos/asf/hbase/blob/7f6e971c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
index 02a35d8..bcc89b8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
@@ -29,8 +29,9 @@ import java.util.List;
 import java.util.Map;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.RegionMetrics;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.Size;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
@@ -363,15 +364,16 @@ public class TestSimpleRegionNormalizer {
       getRegionServerOfRegion(any())).thenReturn(sn);
 
     for (Map.Entry<byte[], Integer> region : regionSizes.entrySet()) {
-      RegionLoad regionLoad = Mockito.mock(RegionLoad.class);
-      when(regionLoad.getName()).thenReturn(region.getKey());
-      when(regionLoad.getStorefileSizeMB()).thenReturn(region.getValue());
+      RegionMetrics regionLoad = Mockito.mock(RegionMetrics.class);
+      when(regionLoad.getRegionName()).thenReturn(region.getKey());
+      when(regionLoad.getStoreFileSize())
+        .thenReturn(new Size(region.getValue(), Size.Unit.MEGABYTE));
 
       // this is possibly broken with jdk9, unclear if false positive or not
       // suppress it for now, fix it when we get to running tests on 9
       // see: http://errorprone.info/bugpattern/MockitoCast
       when((Object) masterServices.getServerManager().getLoad(sn).
-        getRegionsLoad().get(region.getKey())).thenReturn(regionLoad);
+        getRegionMetrics().get(region.getKey())).thenReturn(regionLoad);
     }
     try {
       when(masterRpcServices.isSplitOrMergeEnabled(any(),


[05/11] hbase git commit: HBASE-20106 [api compliance chacker] Fix Bug Where Branch Isn't Found

Posted by bu...@apache.org.
HBASE-20106 [api compliance chacker] Fix Bug Where Branch Isn't Found

While git rev-parse, sometimes the branch cannot be found unless
the remote is specified. This fix tries to use "origin" if the
remote is not specified and the branch is not found.

Signed-off-by: Sean Busbey <bu...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/96ebab74
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/96ebab74
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/96ebab74

Branch: refs/heads/HBASE-15151
Commit: 96ebab748fae24582cfaea0d0f8c318f79bf1681
Parents: 6cfa208
Author: Alex Leblang <al...@cloudera.com>
Authored: Thu Sep 7 12:31:53 2017 -0400
Committer: Sean Busbey <bu...@apache.org>
Committed: Wed Feb 28 11:52:15 2018 -0600

----------------------------------------------------------------------
 dev-support/checkcompatibility.py | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/96ebab74/dev-support/checkcompatibility.py
----------------------------------------------------------------------
diff --git a/dev-support/checkcompatibility.py b/dev-support/checkcompatibility.py
index 83189cf..ea9c229 100755
--- a/dev-support/checkcompatibility.py
+++ b/dev-support/checkcompatibility.py
@@ -116,7 +116,12 @@ def checkout_java_tree(rev, path):
 
 def get_git_hash(revname):
     """ Convert 'revname' to its SHA-1 hash. """
-    return check_output(["git", "rev-parse", revname],
+    try:
+        return check_output(["git", "rev-parse", revname],
+                        cwd=get_repo_dir()).strip()
+    except:
+        revname = "origin/" + revname
+        return check_output(["git", "rev-parse", revname],
                         cwd=get_repo_dir()).strip()
 
 


[06/11] hbase git commit: HBASE-19863 java.lang.IllegalStateException: isDelete failed when SingleColumnValueFilter is used

Posted by bu...@apache.org.
HBASE-19863 java.lang.IllegalStateException: isDelete failed when SingleColumnValueFilter is used

Signed-off-by: Chia-Ping Tsai <ch...@gmail.com>
Signed-off-by: ramkrish86 <ra...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/393ab302
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/393ab302
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/393ab302

Branch: refs/heads/HBASE-15151
Commit: 393ab302ab08b70a839ec87e75fcf4b165765db2
Parents: 96ebab7
Author: Sergey Soldatov <ss...@apache.org>
Authored: Tue Feb 13 22:08:11 2018 -0800
Committer: Josh Elser <el...@apache.org>
Committed: Wed Feb 28 13:58:37 2018 -0500

----------------------------------------------------------------------
 .../hadoop/hbase/regionserver/StoreScanner.java |   6 +
 .../hadoop/hbase/HBaseTestingUtility.java       |  41 +++--
 .../hbase/regionserver/TestIsDeleteFailure.java | 160 +++++++++++++++++++
 3 files changed, 195 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/393ab302/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 1624810..9f6a015 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -816,6 +816,12 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
         return false;
       }
     } while ((nextCell = this.heap.peek()) != null && CellUtil.matchingRowColumn(cell, nextCell));
+    // We need this check because it may happen that the new scanner that we get
+    // during heap.next() is requiring reseek due of fake KV previously generated for
+    // ROWCOL bloom filter optimization. See HBASE-19863 for more details
+    if (nextCell != null && matcher.compareKeyForNextColumn(nextCell, cell) < 0) {
+      return false;
+    }
     return true;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/393ab302/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 2bdfd2d..b48abc6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -1378,23 +1378,40 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
 
   /**
    * Create a table.
-   * @param htd
-   * @param families
-   * @param splitKeys
+   * @param htd table descriptor
+   * @param families array of column families
+   * @param splitKeys array of split keys
    * @param c Configuration to use
    * @return A Table instance for the created table.
-   * @throws IOException
+   * @throws IOException if getAdmin or createTable fails
    */
   public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
       Configuration c) throws IOException {
+    // Disable blooms (they are on by default as of 0.95) but we disable them here because
+    // tests have hard coded counts of what to expect in block cache, etc., and blooms being
+    // on is interfering.
+    return createTable(htd, families, splitKeys, BloomType.NONE, HConstants.DEFAULT_BLOCKSIZE, c);
+  }
+
+  /**
+   * Create a table.
+   * @param htd table descriptor
+   * @param families array of column families
+   * @param splitKeys array of split keys
+   * @param type Bloom type
+   * @param blockSize block size
+   * @param c Configuration to use
+   * @return A Table instance for the created table.
+   * @throws IOException if getAdmin or createTable fails
+   */
+
+  public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
+      BloomType type, int blockSize, Configuration c) throws IOException {
     TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
     for (byte[] family : families) {
-      // Disable blooms (they are on by default as of 0.95) but we disable them here because
-      // tests have hard coded counts of what to expect in block cache, etc., and blooms being
-      // on is interfering.
-      builder.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family)
-                              .setBloomFilterType(BloomType.NONE)
-                              .build());
+      builder.addColumnFamily(
+          ColumnFamilyDescriptorBuilder.newBuilder(family).setBloomFilterType(type)
+              .setBlocksize(blockSize).build());
     }
     TableDescriptor td = builder.build();
     getAdmin().createTable(td, splitKeys);
@@ -1406,8 +1423,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
 
   /**
    * Create a table.
-   * @param htd
-   * @param splitRows
+   * @param htd table descriptor
+   * @param splitRows array of split keys
    * @return A Table instance for the created table.
    * @throws IOException
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/393ab302/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestIsDeleteFailure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestIsDeleteFailure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestIsDeleteFailure.java
new file mode 100644
index 0000000..1198867
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestIsDeleteFailure.java
@@ -0,0 +1,160 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.apache.hadoop.hbase.testclassification.FilterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+/**
+ * Test failure in ScanDeleteTracker.isDeleted when ROWCOL bloom filter
+ * is used during a scan with a filter.
+ */
+@Category({ RegionServerTests.class, FilterTests.class, MediumTests.class })
+public class TestIsDeleteFailure {
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+      HBaseClassTestRule.forClass(TestIsDeleteFailure.class);
+
+  @Rule public TestName name = new TestName();
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
+    TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
+    TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 2);
+    TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
+    TEST_UTIL.startMiniCluster(1);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testIsDeleteFailure() throws Exception {
+    final HTableDescriptor table = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
+    final byte[] family = Bytes.toBytes("0");
+    final byte[] c1 = Bytes.toBytes("C01");
+    final byte[] c2 = Bytes.toBytes("C02");
+    final byte[] c3 = Bytes.toBytes("C03");
+    final byte[] c4 = Bytes.toBytes("C04");
+    final byte[] c5 = Bytes.toBytes("C05");
+    final byte[] c6 = Bytes.toBytes("C07");
+    final byte[] c7 = Bytes.toBytes("C07");
+    final byte[] c8 = Bytes.toBytes("C08");
+    final byte[] c9 = Bytes.toBytes("C09");
+    final byte[] c10 = Bytes.toBytes("C10");
+    final byte[] c11 = Bytes.toBytes("C11");
+    final byte[] c12 = Bytes.toBytes("C12");
+    final byte[] c13 = Bytes.toBytes("C13");
+    final byte[] c14 = Bytes.toBytes("C14");
+    final byte[] c15 = Bytes.toBytes("C15");
+
+    final byte[] val = Bytes.toBytes("foo");
+    List<byte[]> fams = new ArrayList<>(1);
+    fams.add(family);
+    Table ht = TEST_UTIL
+        .createTable(table, fams.toArray(new byte[0][]), null, BloomType.ROWCOL, 10000,
+            new Configuration(TEST_UTIL.getConfiguration()));
+    List<Mutation> pending = new ArrayList<Mutation>();
+    for (int i = 0; i < 1000; i++) {
+      byte[] row = Bytes.toBytes("key" + Integer.toString(i));
+      Put put = new Put(row);
+      put.addColumn(family, c3, val);
+      put.addColumn(family, c4, val);
+      put.addColumn(family, c5, val);
+      put.addColumn(family, c6, val);
+      put.addColumn(family, c7, val);
+      put.addColumn(family, c8, val);
+      put.addColumn(family, c12, val);
+      put.addColumn(family, c13, val);
+      put.addColumn(family, c15, val);
+      pending.add(put);
+      Delete del = new Delete(row);
+      del.addColumns(family, c2);
+      del.addColumns(family, c9);
+      del.addColumns(family, c10);
+      del.addColumns(family, c14);
+      pending.add(del);
+    }
+    ht.batch(pending, new Object[pending.size()]);
+    TEST_UTIL.flush();
+    TEST_UTIL.compact(true);
+    for (int i = 20; i < 300; i++) {
+      byte[] row = Bytes.toBytes("key" + Integer.toString(i));
+      Put put = new Put(row);
+      put.addColumn(family, c3, val);
+      put.addColumn(family, c4, val);
+      put.addColumn(family, c5, val);
+      put.addColumn(family, c6, val);
+      put.addColumn(family, c7, val);
+      put.addColumn(family, c8, val);
+      put.addColumn(family, c12, val);
+      put.addColumn(family, c13, val);
+      put.addColumn(family, c15, val);
+      pending.add(put);
+      Delete del = new Delete(row);
+      del.addColumns(family, c2);
+      del.addColumns(family, c9);
+      del.addColumns(family, c10);
+      del.addColumns(family, c14);
+      pending.add(del);
+    }
+    ht.batch(pending, new Object[pending.size()]);
+    TEST_UTIL.flush();
+
+    Scan scan = new Scan();
+    scan.addColumn(family, c9);
+    scan.addColumn(family, c15);
+    SingleColumnValueFilter filter =
+        new SingleColumnValueFilter(family, c15, CompareFilter.CompareOp.EQUAL,
+            new BinaryComparator(c15));
+    scan.setFilter(filter);
+    //Trigger the scan for not existing row, so it will scan over all rows
+    for (Result result : ht.getScanner(scan)) {
+      result.advance();
+    }
+    ht.close();
+  }
+}
\ No newline at end of file


[03/11] hbase git commit: HBASE-20097 Merge TableDescriptors#getAll and TableDescriptors#getAllDescriptors into one

Posted by bu...@apache.org.
HBASE-20097 Merge TableDescriptors#getAll and TableDescriptors#getAllDescriptors into one

Signed-off-by: tedyu <yu...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/62ee7d95
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/62ee7d95
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/62ee7d95

Branch: refs/heads/HBASE-15151
Commit: 62ee7d9502d599f704ea8c9cf16f9ac4c1b1d22a
Parents: 197bd79
Author: Chia-Ping Tsai <ch...@gmail.com>
Authored: Tue Feb 27 16:34:14 2018 +0800
Committer: Chia-Ping Tsai <ch...@gmail.com>
Committed: Wed Feb 28 15:21:21 2018 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/TableDescriptors.java   |  9 ---------
 .../hadoop/hbase/master/TableStateManager.java      |  2 +-
 .../hadoop/hbase/util/FSTableDescriptors.java       | 16 +---------------
 .../hbase/master/assignment/MockMasterServices.java |  5 -----
 4 files changed, 2 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/62ee7d95/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
index ecdfc82..5787f66 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
@@ -55,15 +55,6 @@ public interface TableDescriptors {
   throws IOException;
 
   /**
-   * Get Map of all TableDescriptors. Populates the descriptor cache as a
-   * side effect.
-   * @return Map of all descriptors.
-   * @throws IOException
-   */
-  Map<String, TableDescriptor> getAllDescriptors()
-      throws IOException;
-
-  /**
    * Add or update descriptor
    * @param htd Descriptor to set into TableDescriptors
    * @throws IOException

http://git-wip-us.apache.org/repos/asf/hbase/blob/62ee7d95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
index 0d89eef..affb684 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
@@ -223,7 +223,7 @@ public class TableStateManager {
 
   private void fixTableStates(TableDescriptors tableDescriptors, Connection connection)
       throws IOException {
-    final Map<String, TableDescriptor> allDescriptors = tableDescriptors.getAllDescriptors();
+    final Map<String, TableDescriptor> allDescriptors = tableDescriptors.getAll();
     final Map<String, TableState> states = new HashMap<>();
     // NOTE: Ful hbase:meta table scan!
     MetaTableAccessor.fullScanTables(connection, new MetaTableAccessor.Visitor() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/62ee7d95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index c72b9e0..b4b0be0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -248,7 +248,7 @@ public class FSTableDescriptors implements TableDescriptors {
    * Returns a map from table name to table descriptor for all tables.
    */
   @Override
-  public Map<String, TableDescriptor> getAllDescriptors()
+  public Map<String, TableDescriptor> getAll()
   throws IOException {
     Map<String, TableDescriptor> tds = new TreeMap<>();
 
@@ -282,20 +282,6 @@ public class FSTableDescriptors implements TableDescriptors {
   }
 
   /**
-   * Returns a map from table name to table descriptor for all tables.
-   */
-  @Override
-  public Map<String, TableDescriptor> getAll() throws IOException {
-    Map<String, TableDescriptor> htds = new TreeMap<>();
-    Map<String, TableDescriptor> allDescriptors = getAllDescriptors();
-    for (Map.Entry<String, TableDescriptor> entry : allDescriptors
-        .entrySet()) {
-      htds.put(entry.getKey(), entry.getValue());
-    }
-    return htds;
-  }
-
-  /**
     * Find descriptors by namespace.
     * @see #get(org.apache.hadoop.hbase.TableName)
     */

http://git-wip-us.apache.org/repos/asf/hbase/blob/62ee7d95/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
index 91f123f..6cd399d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
@@ -327,11 +327,6 @@ public class MockMasterServices extends MockNoopMasterServices {
         return null;
       }
 
-      @Override public Map<String, TableDescriptor> getAllDescriptors() throws IOException {
-        // noop
-        return null;
-      }
-
       @Override
       public TableDescriptor get(TableName tablename) throws IOException {
         TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tablename);


[10/11] hbase git commit: HBASE-18467 temporarily mute posting to jira

Posted by bu...@apache.org.
HBASE-18467 temporarily mute posting to jira


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dd18ab6f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dd18ab6f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dd18ab6f

Branch: refs/heads/HBASE-15151
Commit: dd18ab6f8483160c80caaf4ec4a405fda8e98a93
Parents: aa34561
Author: Sean Busbey <bu...@apache.org>
Authored: Sun Feb 25 00:59:52 2018 -0600
Committer: Sean Busbey <bu...@apache.org>
Committed: Wed Feb 28 14:30:34 2018 -0600

----------------------------------------------------------------------
 dev-support/Jenkinsfile | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/dd18ab6f/dev-support/Jenkinsfile
----------------------------------------------------------------------
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index fe3676d..00d0403 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -467,7 +467,8 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
            echo ""
            echo "[INFO] There are ${currentBuild.changeSets.size()} change sets."
            getJirasToComment(currentBuild).each { currentIssue ->
-             jiraComment issueKey: currentIssue, body: comment
+//             jiraComment issueKey: currentIssue, body: comment
+               echo "jiraComment issueKey: ${currentIssue}, body: ${comment}"
            }
         } catch (Exception exception) {
           echo "Got exception: ${exception}"


[07/11] hbase git commit: HBASE-18133 Decrease quota reaction latency by HBase

Posted by bu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestLowLatencySpaceQuotas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestLowLatencySpaceQuotas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestLowLatencySpaceQuotas.java
new file mode 100644
index 0000000..f90ed82
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestLowLatencySpaceQuotas.java
@@ -0,0 +1,228 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ClientServiceCallable;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.RpcRetryingCaller;
+import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+import org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
+
+@Category({MediumTests.class})
+public class TestLowLatencySpaceQuotas {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+      HBaseClassTestRule.forClass(TestLowLatencySpaceQuotas.class);
+
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  // Global for all tests in the class
+  private static final AtomicLong COUNTER = new AtomicLong(0);
+
+  @Rule
+  public TestName testName = new TestName();
+  private SpaceQuotaHelperForTests helper;
+  private Connection conn;
+  private Admin admin;
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    Configuration conf = TEST_UTIL.getConfiguration();
+    // The default 1s period for QuotaObserverChore is good.
+    SpaceQuotaHelperForTests.updateConfigForQuotas(conf);
+    // Set the period to read region size from HDFS to be very long
+    conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_KEY, 1000 * 120);
+    TEST_UTIL.startMiniCluster(1);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Before
+  public void removeAllQuotas() throws Exception {
+    helper = new SpaceQuotaHelperForTests(TEST_UTIL, testName, COUNTER);
+    conn = TEST_UTIL.getConnection();
+    admin = TEST_UTIL.getAdmin();
+    helper.waitForQuotaTable(conn);
+  }
+
+  @Test
+  public void testFlushes() throws Exception {
+    TableName tn = helper.createTableWithRegions(1);
+    // Set a quota
+    QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(
+        tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS);
+    admin.setQuota(settings);
+
+    // Write some data
+    final long initialSize = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE;
+    helper.writeData(tn, initialSize);
+
+    // Make sure a flush happened
+    admin.flush(tn);
+
+    // We should be able to observe the system recording an increase in size (even
+    // though we know the filesystem scanning did not happen).
+    TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) {
+      @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
+        return snapshot.getUsage() >= initialSize;
+      }
+    });
+  }
+
+  @Test
+  public void testMajorCompaction() throws Exception {
+    TableName tn = helper.createTableWithRegions(1);
+    // Set a quota
+    QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(
+        tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS);
+    admin.setQuota(settings);
+
+    // Write some data and flush it to disk.
+    final long sizePerBatch = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE;
+    helper.writeData(tn, sizePerBatch);
+    admin.flush(tn);
+
+    // Write the same data again, flushing it to a second file
+    helper.writeData(tn, sizePerBatch);
+    admin.flush(tn);
+
+    // After two flushes, both hfiles would contain similar data. We should see 2x the data.
+    TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) {
+      @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
+        return snapshot.getUsage() >= 2L * sizePerBatch;
+      }
+    });
+
+    // Rewrite the two files into one.
+    admin.majorCompact(tn);
+
+    // After we major compact the table, we should notice quickly that the amount of data in the
+    // table is much closer to reality (the duplicate entries across the two files are removed).
+    TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) {
+      @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
+        return snapshot.getUsage() >= sizePerBatch && snapshot.getUsage() <= 2L * sizePerBatch;
+      }
+    });
+  }
+
+  @Test
+  public void testMinorCompaction() throws Exception {
+    TableName tn = helper.createTableWithRegions(1);
+    // Set a quota
+    QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(
+        tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS);
+    admin.setQuota(settings);
+
+    // Write some data and flush it to disk.
+    final long sizePerBatch = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE;
+    final long numBatches = 6;
+    for (long i = 0; i < numBatches; i++) {
+      helper.writeData(tn, sizePerBatch);
+      admin.flush(tn);
+    }
+
+    HRegion region = Iterables.getOnlyElement(TEST_UTIL.getHBaseCluster().getRegions(tn));
+    long numFiles = getNumHFilesForRegion(region);
+    assertEquals(numBatches, numFiles);
+
+    // After two flushes, both hfiles would contain similar data. We should see 2x the data.
+    TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) {
+      @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
+        return snapshot.getUsage() >= numFiles * sizePerBatch;
+      }
+    });
+
+    // Rewrite some files into fewer
+    TEST_UTIL.compact(tn, false);
+    long numFilesAfterMinorCompaction = getNumHFilesForRegion(region);
+
+    // After we major compact the table, we should notice quickly that the amount of data in the
+    // table is much closer to reality (the duplicate entries across the two files are removed).
+    TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) {
+      @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
+        return snapshot.getUsage() >= numFilesAfterMinorCompaction * sizePerBatch &&
+            snapshot.getUsage() <= (numFilesAfterMinorCompaction + 1) * sizePerBatch;
+      }
+    });
+  }
+
+  private long getNumHFilesForRegion(HRegion region) {
+    return region.getStores().stream().mapToLong((s) -> s.getNumHFiles()).sum();
+  }
+
+  @Test
+  public void testBulkLoading() throws Exception {
+    TableName tn = helper.createTableWithRegions(1);
+    // Set a quota
+    QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(
+        tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS);
+    admin.setQuota(settings);
+
+    ClientServiceCallable<Boolean> callable = helper.generateFileToLoad(tn, 3, 550);
+    // Make sure the files are about as long as we expect
+    FileSystem fs = TEST_UTIL.getTestFileSystem();
+    FileStatus[] files = fs.listStatus(
+        new Path(fs.getHomeDirectory(), testName.getMethodName() + "_files"));
+    long totalSize = 0;
+    for (FileStatus file : files) {
+      assertTrue(
+          "Expected the file, " + file.getPath() + ",  length to be larger than 25KB, but was "
+              + file.getLen(),
+          file.getLen() > 25 * SpaceQuotaHelperForTests.ONE_KILOBYTE);
+      totalSize += file.getLen();
+    }
+
+    RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(TEST_UTIL.getConfiguration());
+    RpcRetryingCaller<Boolean> caller = factory.<Boolean> newCaller();
+    assertTrue("The bulk load failed", caller.callWithRetries(callable, Integer.MAX_VALUE));
+
+    final long finalTotalSize = totalSize;
+    TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) {
+      @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
+        return snapshot.getUsage() >= finalTotalSize;
+      }
+    });
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java
index debd54c..7391fa1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java
@@ -29,17 +29,18 @@ import java.util.Map.Entry;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -86,8 +87,8 @@ public class TestQuotaObserverChoreRegionReports {
   @Test
   public void testReportExpiration() throws Exception {
     Configuration conf = TEST_UTIL.getConfiguration();
-    // Send reports every 30 seconds
-    conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_KEY, 25000);
+    // Send reports every 25 seconds
+    conf.setInt(RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_PERIOD_KEY, 25000);
     // Expire the reports after 5 seconds
     conf.setInt(QuotaObserverChore.REGION_REPORT_RETENTION_DURATION_KEY, 5000);
     TEST_UTIL.startMiniCluster(1);
@@ -103,8 +104,8 @@ public class TestQuotaObserverChoreRegionReports {
 
     // Create a table
     final TableName tn = TableName.valueOf("reportExpiration");
-    HTableDescriptor tableDesc = new HTableDescriptor(tn);
-    tableDesc.addFamily(new HColumnDescriptor(FAM1));
+    TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tn).addColumnFamily(
+        ColumnFamilyDescriptorBuilder.of(FAM1)).build();
     TEST_UTIL.getAdmin().createTable(tableDesc);
 
     // No reports right after we created this table.
@@ -148,8 +149,8 @@ public class TestQuotaObserverChoreRegionReports {
 
     // Create a table
     final TableName tn = TableName.valueOf("quotaAcceptanceWithoutReports");
-    HTableDescriptor tableDesc = new HTableDescriptor(tn);
-    tableDesc.addFamily(new HColumnDescriptor(FAM1));
+    TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tn).addColumnFamily(
+        ColumnFamilyDescriptorBuilder.of(FAM1)).build();
     TEST_UTIL.getAdmin().createTable(tableDesc);
 
     // Set a quota

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeImpl.java
new file mode 100644
index 0000000..9217762
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeImpl.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import static org.junit.Assert.assertEquals;
+
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({SmallTests.class})
+public class TestRegionSizeImpl {
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+      HBaseClassTestRule.forClass(TestRegionSizeImpl.class);
+
+  @Test
+  public void testReportingWithSizeChanges() {
+    long currentSize = 1024L;
+    RegionSizeImpl size = new RegionSizeImpl(currentSize);
+
+    assertEquals(currentSize, size.getSize());
+
+    currentSize *= 2L;
+    size.setSize(currentSize);
+    assertEquals(currentSize, size.getSize());
+
+    long delta = 512L;
+    currentSize += delta;
+    size.incrementSize(delta);
+    assertEquals(currentSize, size.getSize());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeReportingChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeReportingChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeReportingChore.java
new file mode 100644
index 0000000..6541cdc
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeReportingChore.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({SmallTests.class})
+public class TestRegionSizeReportingChore {
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+      HBaseClassTestRule.forClass(TestRegionSizeReportingChore.class);
+
+  @Test
+  public void testDefaultConfigurationProperties() {
+    final Configuration conf = getDefaultHBaseConfiguration();
+    final HRegionServer rs = mockRegionServer(conf);
+    RegionSizeReportingChore chore = new RegionSizeReportingChore(rs);
+    assertEquals(
+        RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_DELAY_DEFAULT,
+        chore.getInitialDelay());
+    assertEquals(
+        RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_PERIOD_DEFAULT, chore.getPeriod());
+    assertEquals(
+        TimeUnit.valueOf(RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_TIMEUNIT_DEFAULT),
+        chore.getTimeUnit());
+  }
+
+  @Test
+  public void testNonDefaultConfigurationProperties() {
+    final Configuration conf = getDefaultHBaseConfiguration();
+    final HRegionServer rs = mockRegionServer(conf);
+    final int period = RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_PERIOD_DEFAULT + 1;
+    final long delay = RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_DELAY_DEFAULT + 1L;
+    final String timeUnit = TimeUnit.SECONDS.name();
+    conf.setInt(RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_PERIOD_KEY, period);
+    conf.setLong(RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_DELAY_KEY, delay);
+    conf.set(RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_TIMEUNIT_KEY, timeUnit);
+    RegionSizeReportingChore chore = new RegionSizeReportingChore(rs);
+    assertEquals(delay, chore.getInitialDelay());
+    assertEquals(period, chore.getPeriod());
+    assertEquals(TimeUnit.valueOf(timeUnit), chore.getTimeUnit());
+  }
+
+  @Test
+  public void testRemovableOfNonOnlineRegions() {
+    final Configuration conf = getDefaultHBaseConfiguration();
+    final HRegionServer rs = mockRegionServer(conf);
+    RegionSizeReportingChore chore = new RegionSizeReportingChore(rs);
+
+    RegionInfo infoA = RegionInfoBuilder.newBuilder(TableName.valueOf("T1"))
+        .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build();
+    RegionInfo infoB = RegionInfoBuilder.newBuilder(TableName.valueOf("T1"))
+        .setStartKey(Bytes.toBytes("b")).setEndKey(Bytes.toBytes("d")).build();
+    RegionInfo infoC = RegionInfoBuilder.newBuilder(TableName.valueOf("T1"))
+        .setStartKey(Bytes.toBytes("c")).setEndKey(Bytes.toBytes("d")).build();
+
+    RegionSizeStore store = new RegionSizeStoreImpl();
+    store.put(infoA, 1024L);
+    store.put(infoB, 1024L);
+    store.put(infoC, 1024L);
+
+    // If there are no online regions, all entries should be removed.
+    chore.removeNonOnlineRegions(store, Collections.<RegionInfo> emptySet());
+    assertTrue(store.isEmpty());
+
+    store.put(infoA, 1024L);
+    store.put(infoB, 1024L);
+    store.put(infoC, 1024L);
+
+    // Remove a single region
+    chore.removeNonOnlineRegions(store, new HashSet<>(Arrays.asList(infoA, infoC)));
+    assertEquals(2, store.size());
+    assertNotNull(store.getRegionSize(infoA));
+    assertNotNull(store.getRegionSize(infoC));
+  }
+
+  /**
+   * Creates an HBase Configuration object for the default values.
+   */
+  private Configuration getDefaultHBaseConfiguration() {
+    final Configuration conf = HBaseConfiguration.create();
+    conf.addResource("hbase-default.xml");
+    return conf;
+  }
+
+  private HRegionServer mockRegionServer(Configuration conf) {
+    HRegionServer rs = mock(HRegionServer.class);
+    when(rs.getConfiguration()).thenReturn(conf);
+    return rs;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeStoreImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeStoreImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeStoreImpl.java
new file mode 100644
index 0000000..688fde0
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeStoreImpl.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({SmallTests.class})
+public class TestRegionSizeStoreImpl {
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+      HBaseClassTestRule.forClass(TestRegionSizeStoreImpl.class);
+
+  private static final RegionInfo INFOA = RegionInfoBuilder.newBuilder(TableName.valueOf("TEST"))
+      .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build();
+  private static final RegionInfo INFOB = RegionInfoBuilder.newBuilder(TableName.valueOf("TEST"))
+      .setStartKey(Bytes.toBytes("b")).setEndKey(Bytes.toBytes("c")).build();
+
+  @Test
+  public void testSizeUpdates() {
+    RegionSizeStore store = new RegionSizeStoreImpl();
+    assertTrue(store.isEmpty());
+    assertEquals(0, store.size());
+
+    store.put(INFOA, 1024L);
+
+    assertFalse(store.isEmpty());
+    assertEquals(1, store.size());
+    assertEquals(1024L, store.getRegionSize(INFOA).getSize());
+
+    store.put(INFOA, 2048L);
+    assertEquals(1, store.size());
+    assertEquals(2048L, store.getRegionSize(INFOA).getSize());
+
+    store.incrementRegionSize(INFOA, 512L);
+    assertEquals(1, store.size());
+    assertEquals(2048L + 512L, store.getRegionSize(INFOA).getSize());
+
+    store.remove(INFOA);
+    assertTrue(store.isEmpty());
+    assertEquals(0, store.size());
+
+    store.put(INFOA, 64L);
+    store.put(INFOB, 128L);
+
+    assertEquals(2, store.size());
+    Map<RegionInfo,RegionSize> records = new HashMap<>();
+    for (Entry<RegionInfo,RegionSize> entry : store) {
+      records.put(entry.getKey(), entry.getValue());
+    }
+
+    assertEquals(64L, records.remove(INFOA).getSize());
+    assertEquals(128L, records.remove(INFOB).getSize());
+    assertTrue(records.isEmpty());
+  }
+
+  @Test
+  public void testNegativeDeltaForMissingRegion() {
+    RegionSizeStore store = new RegionSizeStoreImpl();
+
+    assertNull(store.getRegionSize(INFOA));
+
+    // We shouldn't allow a negative size to enter the RegionSizeStore. Getting a negative size
+    // like this shouldn't be possible, but we can prevent the bad state from propagating and
+    // getting worse.
+    store.incrementRegionSize(INFOA, -5);
+    assertNotNull(store.getRegionSize(INFOA));
+    assertEquals(0, store.getRegionSize(INFOA).getSize());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.java
index ae71b31..9b6a1b8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.java
@@ -23,9 +23,7 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.concurrent.atomic.AtomicLong;
@@ -36,7 +34,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Append;
@@ -52,17 +49,13 @@ import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.RpcRetryingCaller;
 import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.SecureBulkLoadClient;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.quotas.policies.DefaultViolationPolicyEnforcement;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -247,9 +240,9 @@ public class TestSpaceQuotas {
     TableName tableName = writeUntilViolationAndVerifyViolation(SpaceViolationPolicy.NO_WRITES, p);
 
     // The table is now in violation. Try to do a bulk load
-    ClientServiceCallable<Void> callable = generateFileToLoad(tableName, 1, 50);
+    ClientServiceCallable<Boolean> callable = helper.generateFileToLoad(tableName, 1, 50);
     RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(TEST_UTIL.getConfiguration());
-    RpcRetryingCaller<Void> caller = factory.<Void> newCaller();
+    RpcRetryingCaller<Boolean> caller = factory.newCaller();
     try {
       caller.callWithRetries(callable, Integer.MAX_VALUE);
       fail("Expected the bulk load call to fail!");
@@ -298,7 +291,7 @@ public class TestSpaceQuotas {
         enforcement instanceof DefaultViolationPolicyEnforcement);
 
     // Should generate two files, each of which is over 25KB each
-    ClientServiceCallable<Void> callable = generateFileToLoad(tn, 2, 500);
+    ClientServiceCallable<Boolean> callable = helper.generateFileToLoad(tn, 2, 525);
     FileSystem fs = TEST_UTIL.getTestFileSystem();
     FileStatus[] files = fs.listStatus(
         new Path(fs.getHomeDirectory(), testName.getMethodName() + "_files"));
@@ -311,7 +304,7 @@ public class TestSpaceQuotas {
     }
 
     RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(TEST_UTIL.getConfiguration());
-    RpcRetryingCaller<Void> caller = factory.<Void> newCaller();
+    RpcRetryingCaller<Boolean> caller = factory.newCaller();
     try {
       caller.callWithRetries(callable, Integer.MAX_VALUE);
       fail("Expected the bulk load call to fail!");
@@ -432,39 +425,4 @@ public class TestSpaceQuotas {
     assertTrue(
         "Expected to see an exception writing data to a table exceeding its quota", sawError);
   }
-
-  private ClientServiceCallable<Void> generateFileToLoad(
-      TableName tn, int numFiles, int numRowsPerFile) throws Exception {
-    Connection conn = TEST_UTIL.getConnection();
-    FileSystem fs = TEST_UTIL.getTestFileSystem();
-    Configuration conf = TEST_UTIL.getConfiguration();
-    Path baseDir = new Path(fs.getHomeDirectory(), testName.getMethodName() + "_files");
-    fs.mkdirs(baseDir);
-    final List<Pair<byte[], String>> famPaths = new ArrayList<Pair<byte[], String>>();
-    for (int i = 1; i <= numFiles; i++) {
-      Path hfile = new Path(baseDir, "file" + i);
-      TestHRegionServerBulkLoad.createHFile(
-          fs, hfile, Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"),
-          Bytes.toBytes("reject"), numRowsPerFile);
-      famPaths.add(new Pair<>(Bytes.toBytes(SpaceQuotaHelperForTests.F1), hfile.toString()));
-    }
-
-    // bulk load HFiles
-    Table table = conn.getTable(tn);
-    final String bulkToken = new SecureBulkLoadClient(conf, table).prepareBulkLoad(conn);
-    return new ClientServiceCallable<Void>(conn,
-        tn, Bytes.toBytes("row"), new RpcControllerFactory(conf).newController(), HConstants.PRIORITY_UNSET) {
-      @Override
-      public Void rpcCall() throws Exception {
-        SecureBulkLoadClient secureClient = null;
-        byte[] regionName = getLocation().getRegionInfo().getRegionName();
-        try (Table table = conn.getTable(getTableName())) {
-          secureClient = new SecureBulkLoadClient(conf, table);
-          secureClient.secureBulkLoadHFiles(getStub(), famPaths, regionName,
-                true, null, bulkToken);
-        }
-        return null;
-      }
-    };
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java
index 3628738..4995de7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java
@@ -78,7 +78,7 @@ public class TestBulkLoadCheckingViolationPolicyEnforcement {
 
     policy.initialize(rss, tableName, snapshot);
 
-    policy.checkBulkLoad(fs, paths);
+    policy.computeBulkLoadSize(fs, paths);
   }
 
   @Test(expected = IllegalArgumentException.class)
@@ -97,7 +97,7 @@ public class TestBulkLoadCheckingViolationPolicyEnforcement {
     policy.initialize(rss, tableName, snapshot);
 
     // If the file to bulk load isn't a file, this should throw an exception
-    policy.checkBulkLoad(fs, paths);
+    policy.computeBulkLoadSize(fs, paths);
   }
 
   @Test(expected = SpaceLimitingException.class)
@@ -120,7 +120,7 @@ public class TestBulkLoadCheckingViolationPolicyEnforcement {
 
     policy.initialize(rss, tableName, snapshot);
 
-    policy.checkBulkLoad(fs, paths);
+    policy.computeBulkLoadSize(fs, paths);
   }
 
   @Test(expected = SpaceLimitingException.class)
@@ -143,6 +143,6 @@ public class TestBulkLoadCheckingViolationPolicyEnforcement {
 
     policy.initialize(rss, tableName, snapshot);
 
-    policy.checkBulkLoad(fs, paths);
+    policy.computeBulkLoadSize(fs, paths);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
index 9479890..1b2009c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
@@ -87,6 +87,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
+import org.apache.hadoop.hbase.quotas.RegionSizeStoreImpl;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
 import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
 import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
@@ -1658,6 +1659,56 @@ public class TestHStore {
     assertFalse(heap.equals(heap2));
   }
 
+  @Test
+  public void testSpaceQuotaChangeAfterReplacement() throws IOException {
+    final TableName tn = TableName.valueOf(name.getMethodName());
+    init(name.getMethodName());
+
+    RegionSizeStoreImpl sizeStore = new RegionSizeStoreImpl();
+
+    HStoreFile sf1 = mockStoreFileWithLength(1024L);
+    HStoreFile sf2 = mockStoreFileWithLength(2048L);
+    HStoreFile sf3 = mockStoreFileWithLength(4096L);
+    HStoreFile sf4 = mockStoreFileWithLength(8192L);
+
+    RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tn).setStartKey(Bytes.toBytes("a"))
+        .setEndKey(Bytes.toBytes("b")).build();
+
+    // Compacting two files down to one, reducing size
+    sizeStore.put(regionInfo, 1024L + 4096L);
+    store.updateSpaceQuotaAfterFileReplacement(
+        sizeStore, regionInfo, Arrays.asList(sf1, sf3), Arrays.asList(sf2));
+
+    assertEquals(2048L, sizeStore.getRegionSize(regionInfo).getSize());
+
+    // The same file length in and out should have no change
+    store.updateSpaceQuotaAfterFileReplacement(
+        sizeStore, regionInfo, Arrays.asList(sf2), Arrays.asList(sf2));
+
+    assertEquals(2048L, sizeStore.getRegionSize(regionInfo).getSize());
+
+    // Increase the total size used
+    store.updateSpaceQuotaAfterFileReplacement(
+        sizeStore, regionInfo, Arrays.asList(sf2), Arrays.asList(sf3));
+
+    assertEquals(4096L, sizeStore.getRegionSize(regionInfo).getSize());
+
+    RegionInfo regionInfo2 = RegionInfoBuilder.newBuilder(tn).setStartKey(Bytes.toBytes("b"))
+        .setEndKey(Bytes.toBytes("c")).build();
+    store.updateSpaceQuotaAfterFileReplacement(sizeStore, regionInfo2, null, Arrays.asList(sf4));
+
+    assertEquals(8192L, sizeStore.getRegionSize(regionInfo2).getSize());
+  }
+
+  private HStoreFile mockStoreFileWithLength(long length) {
+    HStoreFile sf = mock(HStoreFile.class);
+    StoreFileReader sfr = mock(StoreFileReader.class);
+    when(sf.isHFile()).thenReturn(true);
+    when(sf.getReader()).thenReturn(sfr);
+    when(sfr.length()).thenReturn(length);
+    return sf;
+  }
+
   private static class MyThread extends Thread {
     private StoreScanner scanner;
     private KeyValueHeap heap;

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java
index e17b87c..3cac439 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java
@@ -25,12 +25,13 @@ import static org.mockito.Matchers.anyLong;
 import static org.mockito.Mockito.doCallRealMethod;
 import static org.mockito.Mockito.mock;
 
-import java.util.HashMap;
-import java.util.Map;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.quotas.RegionSize;
+import org.apache.hadoop.hbase.quotas.RegionSizeStore;
+import org.apache.hadoop.hbase.quotas.RegionSizeStoreFactory;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.ClassRule;
@@ -68,52 +69,24 @@ public class TestRegionServerRegionSpaceUseReport {
         .setStartKey(Bytes.toBytes("c"))
         .setEndKey(Bytes.toBytes("d"))
         .build();
-    Map<RegionInfo,Long> sizes = new HashMap<>();
-    sizes.put(hri1, 1024L * 1024L);
-    sizes.put(hri2, 1024L * 1024L * 8L);
-    sizes.put(hri3, 1024L * 1024L * 32L);
+    RegionSizeStore store = RegionSizeStoreFactory.getInstance().createStore();
+    store.put(hri1, 1024L * 1024L);
+    store.put(hri2, 1024L * 1024L * 8L);
+    store.put(hri3, 1024L * 1024L * 32L);
 
     // Call the real method to convert the map into a protobuf
     HRegionServer rs = mock(HRegionServer.class);
-    doCallRealMethod().when(rs).buildRegionSpaceUseReportRequest(any());
+    doCallRealMethod().when(rs).buildRegionSpaceUseReportRequest(any(RegionSizeStore.class));
     doCallRealMethod().when(rs).convertRegionSize(any(), anyLong());
 
-    RegionSpaceUseReportRequest requests = rs.buildRegionSpaceUseReportRequest(sizes);
-    assertEquals(sizes.size(), requests.getSpaceUseCount());
+    RegionSpaceUseReportRequest requests = rs.buildRegionSpaceUseReportRequest(store);
+    assertEquals(store.size(), requests.getSpaceUseCount());
     for (RegionSpaceUse spaceUse : requests.getSpaceUseList()) {
       RegionInfo hri = ProtobufUtil.toRegionInfo(spaceUse.getRegionInfo());
-      Long expectedSize = sizes.remove(hri);
+      RegionSize expectedSize = store.remove(hri);
       assertNotNull("Could not find size for HRI: " + hri, expectedSize);
-      assertEquals(expectedSize.longValue(), spaceUse.getRegionSize());
+      assertEquals(expectedSize.getSize(), spaceUse.getRegionSize());
     }
-    assertTrue("Should not have any space use entries left: " + sizes, sizes.isEmpty());
-  }
-
-  @Test(expected = NullPointerException.class)
-  public void testNullMap() {
-    // Call the real method to convert the map into a protobuf
-    HRegionServer rs = mock(HRegionServer.class);
-    doCallRealMethod().when(rs).buildRegionSpaceUseReportRequest(any());
-    doCallRealMethod().when(rs).convertRegionSize(any(), anyLong());
-
-    rs.buildRegionSpaceUseReportRequest(null);
-  }
-
-  @Test(expected = NullPointerException.class)
-  public void testMalformedMap() {
-    TableName tn = TableName.valueOf("table1");
-    RegionInfo hri1 = RegionInfoBuilder.newBuilder(tn)
-        .setStartKey(Bytes.toBytes("a"))
-        .setEndKey(Bytes.toBytes("b"))
-        .build();
-    Map<RegionInfo,Long> sizes = new HashMap<>();
-    sizes.put(hri1, null);
-
-    // Call the real method to convert the map into a protobuf
-    HRegionServer rs = mock(HRegionServer.class);
-    doCallRealMethod().when(rs).buildRegionSpaceUseReportRequest(any());
-    doCallRealMethod().when(rs).convertRegionSize(any(), anyLong());
-
-    rs.buildRegionSpaceUseReportRequest(sizes);
+    assertTrue("Should not have any space use entries left: " + store, store.isEmpty());
   }
 }


[08/11] hbase git commit: HBASE-18133 Decrease quota reaction latency by HBase

Posted by bu...@apache.org.
HBASE-18133 Decrease quota reaction latency by HBase

Certain operations in HBase are known to directly affect
the utilization of tables on HDFS. When these actions
occur, we can circumvent the normal path and notify the
Master directly. This results in a much faster response to
changes in HDFS usage.

This requires FS scanning by the RS to be decoupled from
the reporting of sizes to the Master. An API inside each
RS is made so that any operation can hook into this call
in the face of other operations (e.g. compaction, flush,
bulk load).

Signed-off-by: Ted Yu <yu...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bdedcc56
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bdedcc56
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bdedcc56

Branch: refs/heads/HBASE-15151
Commit: bdedcc5631fa9c8c400d4daa01b8a1947d4a12dd
Parents: 393ab30
Author: Josh Elser <el...@apache.org>
Authored: Wed Feb 21 18:27:51 2018 -0500
Committer: Josh Elser <el...@apache.org>
Committed: Wed Feb 28 14:11:20 2018 -0500

----------------------------------------------------------------------
 .../MetricsRegionServerQuotaSource.java         |  24 ++
 .../MetricsRegionServerQuotaSourceImpl.java     |  86 +++++++
 ....regionserver.MetricsRegionServerQuotaSource |  18 ++
 .../quotas/FileSystemUtilizationChore.java      |  39 +---
 .../hbase/quotas/NoOpRegionSizeStore.java       |  76 +++++++
 .../quotas/RegionServerSpaceQuotaManager.java   |  22 ++
 .../apache/hadoop/hbase/quotas/RegionSize.java  |  50 ++++
 .../hadoop/hbase/quotas/RegionSizeImpl.java     |  70 ++++++
 .../hbase/quotas/RegionSizeReportingChore.java  | 156 +++++++++++++
 .../hadoop/hbase/quotas/RegionSizeStore.java    |  82 +++++++
 .../hbase/quotas/RegionSizeStoreFactory.java    |  38 ++++
 .../hbase/quotas/RegionSizeStoreImpl.java       | 105 +++++++++
 .../quotas/SpaceViolationPolicyEnforcement.java |   7 +-
 .../AbstractViolationPolicyEnforcement.java     |  32 ++-
 .../DefaultViolationPolicyEnforcement.java      |  34 +--
 ...ssingSnapshotViolationPolicyEnforcement.java |   8 +-
 .../hadoop/hbase/regionserver/HRegion.java      |  11 +
 .../hbase/regionserver/HRegionServer.java       |  36 ++-
 .../hadoop/hbase/regionserver/HStore.java       |  40 ++++
 .../hbase/regionserver/MetricsRegionServer.java |  17 ++
 .../hbase/regionserver/RSRpcServices.java       |  19 +-
 .../regionserver/RegionServerServices.java      |   9 +
 .../hadoop/hbase/MockRegionServerServices.java  |   6 +
 .../hadoop/hbase/master/MockRegionServer.java   |   6 +
 .../hbase/quotas/SpaceQuotaHelperForTests.java  |  70 +++++-
 .../quotas/TestFileSystemUtilizationChore.java  |  27 +--
 .../hbase/quotas/TestLowLatencySpaceQuotas.java | 228 +++++++++++++++++++
 .../TestQuotaObserverChoreRegionReports.java    |  17 +-
 .../hadoop/hbase/quotas/TestRegionSizeImpl.java |  49 ++++
 .../quotas/TestRegionSizeReportingChore.java    | 127 +++++++++++
 .../hbase/quotas/TestRegionSizeStoreImpl.java   | 101 ++++++++
 .../hadoop/hbase/quotas/TestSpaceQuotas.java    |  50 +---
 ...kLoadCheckingViolationPolicyEnforcement.java |   8 +-
 .../hadoop/hbase/regionserver/TestHStore.java   |  51 +++++
 .../TestRegionServerRegionSpaceUseReport.java   |  53 ++---
 35 files changed, 1575 insertions(+), 197 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java
index 3175aad..5463a6a 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java
@@ -28,6 +28,14 @@ public interface MetricsRegionServerQuotaSource extends BaseSource {
   String METRICS_DESCRIPTION = "Metrics about HBase RegionServer Quotas";
   String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
 
+  String NUM_TABLES_IN_VIOLATION_NAME = "numTablesInViolation";
+  String NUM_SPACE_SNAPSHOTS_RECEIVED_NAME = "numSpaceSnapshotsReceived";
+  String FILE_SYSTEM_UTILIZATION_CHORE_TIME = "fileSystemUtilizationChoreTime";
+  String SPACE_QUOTA_REFRESHER_CHORE_TIME = "spaceQuotaRefresherChoreTime";
+
+  String NUM_REGION_SIZE_REPORT_NAME = "numRegionSizeReports";
+  String REGION_SIZE_REPORTING_CHORE_TIME_NAME = "regionSizeReportingChoreTime";
+
   /**
    * Updates the metric tracking how many tables this RegionServer has marked as in violation
    * of their space quota.
@@ -57,4 +65,20 @@ public interface MetricsRegionServerQuotaSource extends BaseSource {
    * @param time The execution time of the chore in milliseconds.
    */
   void incrementSpaceQuotaRefresherChoreTime(long time);
+
+  /**
+   * Updates the metric tracking how many region size reports were sent from this RegionServer to
+   * the Master. These reports contain information on the size of each Region hosted locally.
+   *
+   * @param numReportsSent The number of region size reports sent
+   */
+  void incrementNumRegionSizeReportsSent(long numReportsSent);
+
+  /**
+   * Updates the metric tracking how much time was spent sending region size reports to the Master
+   * by the RegionSizeReportingChore.
+   *
+   * @param time The execution time in milliseconds.
+   */
+  void incrementRegionSizeReportingChoreTime(long time);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java
new file mode 100644
index 0000000..3a796dd
--- /dev/null
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Implementation of {@link MetricsRegionServerQuotaSource}.
+ */
+@InterfaceAudience.Private
+public class MetricsRegionServerQuotaSourceImpl extends BaseSourceImpl implements
+    MetricsRegionServerQuotaSource {
+
+  private final Meter tablesInViolationCounter;
+  private final Meter spaceQuotaSnapshotsReceived;
+  private final Timer fileSystemUtilizationChoreTimer;
+  private final Timer spaceQuotaRefresherChoreTimer;
+  private final Counter regionSizeReportCounter;
+  private final Timer regionSizeReportingChoreTimer;
+
+  public MetricsRegionServerQuotaSourceImpl() {
+    this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
+  }
+
+  public MetricsRegionServerQuotaSourceImpl(String metricsName, String metricsDescription,
+      String metricsContext, String metricsJmxContext) {
+    super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+
+    tablesInViolationCounter = this.registry.meter(NUM_TABLES_IN_VIOLATION_NAME);
+    spaceQuotaSnapshotsReceived = this.registry.meter(NUM_SPACE_SNAPSHOTS_RECEIVED_NAME);
+    fileSystemUtilizationChoreTimer = this.registry.timer(FILE_SYSTEM_UTILIZATION_CHORE_TIME);
+    spaceQuotaRefresherChoreTimer = this.registry.timer(SPACE_QUOTA_REFRESHER_CHORE_TIME);
+    regionSizeReportCounter = this.registry.counter(NUM_REGION_SIZE_REPORT_NAME);
+    regionSizeReportingChoreTimer = registry.timer(REGION_SIZE_REPORTING_CHORE_TIME_NAME);
+  }
+
+  @Override
+  public void updateNumTablesInSpaceQuotaViolation(long tablesInViolation) {
+    this.tablesInViolationCounter.mark(tablesInViolation);
+  }
+
+  @Override
+  public void updateNumTableSpaceQuotaSnapshots(long numSnapshots) {
+    this.spaceQuotaSnapshotsReceived.mark(numSnapshots);
+  }
+
+  @Override
+  public void incrementSpaceQuotaFileSystemScannerChoreTime(long time) {
+    this.fileSystemUtilizationChoreTimer.updateMillis(time);
+  }
+
+  @Override
+  public void incrementSpaceQuotaRefresherChoreTime(long time) {
+    this.spaceQuotaRefresherChoreTimer.updateMillis(time);
+  }
+
+  @Override
+  public void incrementNumRegionSizeReportsSent(long numReportsSent) {
+    regionSizeReportCounter.increment(numReportsSent);
+  }
+
+  @Override
+  public void incrementRegionSizeReportingChoreTime(long time) {
+    regionSizeReportingChoreTimer.update(time, TimeUnit.MILLISECONDS);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerQuotaSource
----------------------------------------------------------------------
diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerQuotaSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerQuotaSource
new file mode 100644
index 0000000..58fe4d9
--- /dev/null
+++ b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerQuotaSource
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+org.apache.hadoop.hbase.regionserver.MetricsRegionServerQuotaSourceImpl
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
index eded076..edda4df 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
@@ -16,10 +16,8 @@
  */
 package org.apache.hadoop.hbase.quotas;
 
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
-import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
@@ -36,7 +34,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * A chore which computes the size of each {@link HRegion} on the FileSystem hosted by the given {@link HRegionServer}.
+ * A chore which computes the size of each {@link HRegion} on the FileSystem hosted by the given
+ * {@link HRegionServer}. The results of this computation are stored in the
+ * {@link RegionServerSpaceQuotaManager}'s {@link RegionSizeStore} object.
  */
 @InterfaceAudience.Private
 public class FileSystemUtilizationChore extends ScheduledChore {
@@ -53,9 +53,6 @@ public class FileSystemUtilizationChore extends ScheduledChore {
   static final String FS_UTILIZATION_MAX_ITERATION_DURATION_KEY = "hbase.regionserver.quotas.fs.utilization.chore.max.iteration.millis";
   static final long FS_UTILIZATION_MAX_ITERATION_DURATION_DEFAULT = 5000L;
 
-  private int numberOfCyclesToSkip = 0, prevNumberOfCyclesToSkip = 0;
-  private static final int CYCLE_UPPER_BOUND = 32;
-
   private final HRegionServer rs;
   private final long maxIterationMillis;
   private Iterator<Region> leftoverRegions;
@@ -70,11 +67,7 @@ public class FileSystemUtilizationChore extends ScheduledChore {
 
   @Override
   protected void chore() {
-    if (numberOfCyclesToSkip > 0) {
-      numberOfCyclesToSkip--;
-      return;
-    }
-    final Map<RegionInfo, Long> onlineRegionSizes = new HashMap<>();
+    final RegionSizeStore regionSizeStore = getRegionSizeStore();
     final Set<Region> onlineRegions = new HashSet<>(rs.getRegions());
     // Process the regions from the last run if we have any. If we are somehow having difficulty
     // processing the Regions, we want to avoid creating a backlog in memory of Region objs.
@@ -100,7 +93,7 @@ public class FileSystemUtilizationChore extends ScheduledChore {
       long timeRunning = EnvironmentEdgeManager.currentTime() - start;
       if (timeRunning > maxIterationMillis) {
         LOG.debug("Preempting execution of FileSystemUtilizationChore because it exceeds the"
-            + " maximum iteration configuration value. Will process remaining iterators"
+            + " maximum iteration configuration value. Will process remaining Regions"
             + " on a subsequent invocation.");
         setLeftoverRegions(iterator);
         break;
@@ -124,7 +117,7 @@ public class FileSystemUtilizationChore extends ScheduledChore {
         continue;
       }
       final long sizeInBytes = computeSize(region);
-      onlineRegionSizes.put(region.getRegionInfo(), sizeInBytes);
+      regionSizeStore.put(region.getRegionInfo(), sizeInBytes);
       regionSizesCalculated++;
     }
     if (LOG.isTraceEnabled()) {
@@ -133,14 +126,6 @@ public class FileSystemUtilizationChore extends ScheduledChore {
           + skippedSplitParents + " regions due to being the parent of a split, and"
           + skippedRegionReplicas + " regions due to being region replicas.");
     }
-    if (!reportRegionSizesToMaster(onlineRegionSizes)) {
-      // backoff reporting
-      numberOfCyclesToSkip = prevNumberOfCyclesToSkip > 0 ? 2 * prevNumberOfCyclesToSkip : 1;
-      if (numberOfCyclesToSkip > CYCLE_UPPER_BOUND) {
-        numberOfCyclesToSkip = CYCLE_UPPER_BOUND;
-      }
-      prevNumberOfCyclesToSkip = numberOfCyclesToSkip;
-    }
   }
 
   /**
@@ -176,15 +161,9 @@ public class FileSystemUtilizationChore extends ScheduledChore {
     return regionSize;
   }
 
-  /**
-   * Reports the computed region sizes to the currently active Master.
-   *
-   * @param onlineRegionSizes The computed region sizes to report.
-   * @return {@code false} if FileSystemUtilizationChore should pause reporting to master,
-   *    {@code true} otherwise.
-   */
-  boolean reportRegionSizesToMaster(Map<RegionInfo,Long> onlineRegionSizes) {
-    return this.rs.reportRegionSizesForQuotas(onlineRegionSizes);
+  // VisibleForTesting
+  RegionSizeStore getRegionSizeStore() {
+    return rs.getRegionServerSpaceQuotaManager().getRegionSizeStore();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoOpRegionSizeStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoOpRegionSizeStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoOpRegionSizeStore.java
new file mode 100644
index 0000000..df62d0a
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoOpRegionSizeStore.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * A {@link RegionSizeStore} implementation that stores nothing.
+ */
+@InterfaceAudience.Private
+public final class NoOpRegionSizeStore implements RegionSizeStore {
+  private static final NoOpRegionSizeStore INSTANCE = new NoOpRegionSizeStore();
+
+  private NoOpRegionSizeStore() {}
+
+  public static NoOpRegionSizeStore getInstance() {
+    return INSTANCE;
+  }
+
+  @Override
+  public Iterator<Entry<RegionInfo,RegionSize>> iterator() {
+    return null;
+  }
+
+  @Override
+  public long heapSize() {
+    return 0;
+  }
+
+  @Override
+  public RegionSize getRegionSize(RegionInfo regionInfo) {
+    return null;
+  }
+
+  @Override
+  public void put(RegionInfo regionInfo, long size) {}
+
+  @Override
+  public void incrementRegionSize(RegionInfo regionInfo, long delta) {}
+
+  @Override
+  public RegionSize remove(RegionInfo regionInfo) {
+    return null;
+  }
+
+  @Override
+  public int size() {
+    return 0;
+  }
+
+  @Override
+  public boolean isEmpty() {
+    return true;
+  }
+
+  @Override
+  public void clear() {}
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java
index b0bdede..bbc6df8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java
@@ -55,6 +55,8 @@ public class RegionServerSpaceQuotaManager {
   private boolean started = false;
   private final ConcurrentHashMap<TableName,SpaceViolationPolicyEnforcement> enforcedPolicies;
   private SpaceViolationPolicyEnforcementFactory factory;
+  private RegionSizeStore regionSizeStore;
+  private RegionSizeReportingChore regionSizeReporter;
 
   public RegionServerSpaceQuotaManager(RegionServerServices rsServices) {
     this(rsServices, SpaceViolationPolicyEnforcementFactory.getInstance());
@@ -67,6 +69,8 @@ public class RegionServerSpaceQuotaManager {
     this.factory = factory;
     this.enforcedPolicies = new ConcurrentHashMap<>();
     this.currentQuotaSnapshots = new AtomicReference<>(new HashMap<>());
+    // Initialize the size store to not track anything -- create the real one if we're start()'ed
+    this.regionSizeStore = NoOpRegionSizeStore.getInstance();
   }
 
   public synchronized void start() throws IOException {
@@ -79,8 +83,13 @@ public class RegionServerSpaceQuotaManager {
       LOG.warn("RegionServerSpaceQuotaManager has already been started!");
       return;
     }
+    // Start the chores
     this.spaceQuotaRefresher = new SpaceQuotaRefresherChore(this, rsServices.getClusterConnection());
     rsServices.getChoreService().scheduleChore(spaceQuotaRefresher);
+    this.regionSizeReporter = new RegionSizeReportingChore(rsServices);
+    rsServices.getChoreService().scheduleChore(regionSizeReporter);
+    // Instantiate the real RegionSizeStore
+    this.regionSizeStore = RegionSizeStoreFactory.getInstance().createStore();
     started = true;
   }
 
@@ -89,6 +98,10 @@ public class RegionServerSpaceQuotaManager {
       spaceQuotaRefresher.cancel();
       spaceQuotaRefresher = null;
     }
+    if (regionSizeReporter != null) {
+      regionSizeReporter.cancel();
+      regionSizeReporter = null;
+    }
     started = false;
   }
 
@@ -212,6 +225,15 @@ public class RegionServerSpaceQuotaManager {
   }
 
   /**
+   * Returns the {@link RegionSizeStore} tracking filesystem utilization by each region.
+   *
+   * @return A {@link RegionSizeStore} implementation.
+   */
+  public RegionSizeStore getRegionSizeStore() {
+    return regionSizeStore;
+  }
+
+  /**
    * Returns the collection of tables which have quota violation policies enforced on
    * this RegionServer.
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSize.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSize.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSize.java
new file mode 100644
index 0000000..c1d94d6
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSize.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Interface that encapsulates optionally sending a Region's size to the master.
+ */
+@InterfaceAudience.Private
+public interface RegionSize extends HeapSize {
+
+  /**
+   * Updates the size of the Region.
+   *
+   * @param newSize the new size of the Region
+   * @return {@code this}
+   */
+  RegionSize setSize(long newSize);
+
+  /**
+   * Atomically adds the provided {@code delta} to the region size.
+   *
+   * @param delta The change in size in bytes of the region.
+   * @return {@code this}
+   */
+  RegionSize incrementSize(long delta);
+
+  /**
+   * Returns the size of the region.
+   *
+   * @return The size in bytes.
+   */
+  long getSize();
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeImpl.java
new file mode 100644
index 0000000..2a433b4
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeImpl.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hadoop.hbase.util.ClassSize;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * An object encapsulating a Region's size and whether it's been reported to the master since
+ * the value last changed.
+ */
+@InterfaceAudience.Private
+public class RegionSizeImpl implements RegionSize {
+  private static final Logger LOG = LoggerFactory.getLogger(RegionSizeImpl.class);
+  private static final long HEAP_SIZE = ClassSize.OBJECT + ClassSize.ATOMIC_LONG +
+    ClassSize.REFERENCE;
+  private final AtomicLong size;
+
+  public RegionSizeImpl(long initialSize) {
+    // A region can never be negative in size. We can prevent this from being a larger problem, but
+    // we will need to leave ourselves a note to figure out how we got here.
+    if (initialSize < 0L && LOG.isTraceEnabled()) {
+      LOG.trace("Nonsensical negative Region size being constructed, this is likely an error",
+          new Exception());
+    }
+    this.size = new AtomicLong(initialSize < 0L ? 0L : initialSize);
+  }
+
+  @Override
+  public long heapSize() {
+    return HEAP_SIZE;
+  }
+
+  @Override
+  public RegionSizeImpl setSize(long newSize) {
+    // Set the new size before advertising that we need to tell the master about it. Worst case
+    // we have to wait for the next period to report it.
+    size.set(newSize);
+    return this;
+  }
+
+  @Override
+  public RegionSizeImpl incrementSize(long delta) {
+    size.addAndGet(delta);
+    return this;
+  }
+
+  @Override
+  public long getSize() {
+    return size.get();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeReportingChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeReportingChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeReportingChore.java
new file mode 100644
index 0000000..bf525e5
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeReportingChore.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ScheduledChore;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * A Chore which sends the region size reports on this RegionServer to the Master.
+ */
+@InterfaceAudience.Private
+public class RegionSizeReportingChore extends ScheduledChore {
+  private static final Log LOG = LogFactory.getLog(RegionSizeReportingChore.class);
+
+  static final String REGION_SIZE_REPORTING_CHORE_PERIOD_KEY =
+      "hbase.regionserver.quotas.region.size.reporting.chore.period";
+  static final int REGION_SIZE_REPORTING_CHORE_PERIOD_DEFAULT = 1000 * 60;
+
+  static final String REGION_SIZE_REPORTING_CHORE_DELAY_KEY =
+      "hbase.regionserver.quotas.region.size.reporting.chore.delay";
+  static final long REGION_SIZE_REPORTING_CHORE_DELAY_DEFAULT = 1000 * 30;
+
+  static final String REGION_SIZE_REPORTING_CHORE_TIMEUNIT_KEY =
+      "hbase.regionserver.quotas.region.size.reporting.chore.timeunit";
+  static final String REGION_SIZE_REPORTING_CHORE_TIMEUNIT_DEFAULT = TimeUnit.MILLISECONDS.name();
+
+  private final RegionServerServices rsServices;
+  private final MetricsRegionServer metrics;
+
+  public RegionSizeReportingChore(RegionServerServices rsServices) {
+    super(
+        RegionSizeReportingChore.class.getSimpleName(), rsServices,
+        getPeriod(rsServices.getConfiguration()), getInitialDelay(rsServices.getConfiguration()),
+        getTimeUnit(rsServices.getConfiguration()));
+    this.rsServices = rsServices;
+    this.metrics = rsServices.getMetrics();
+  }
+
+  @Override
+  protected void chore() {
+    final long start = System.nanoTime();
+    try {
+      _chore();
+    } finally {
+      if (metrics != null) {
+        metrics.incrementRegionSizeReportingChoreTime(
+            TimeUnit.MILLISECONDS.convert(System.nanoTime() - start, TimeUnit.NANOSECONDS));
+      }
+    }
+  }
+
+  void _chore() {
+    final RegionServerSpaceQuotaManager quotaManager =
+        rsServices.getRegionServerSpaceQuotaManager();
+    // Get the HRegionInfo for each online region
+    HashSet<RegionInfo> onlineRegionInfos = getOnlineRegionInfos(rsServices.getRegions());
+    RegionSizeStore store = quotaManager.getRegionSizeStore();
+    // Remove all sizes for non-online regions
+    removeNonOnlineRegions(store, onlineRegionInfos);
+    rsServices.reportRegionSizesForQuotas(store);
+  }
+
+  HashSet<RegionInfo> getOnlineRegionInfos(List<? extends Region> onlineRegions) {
+    HashSet<RegionInfo> regionInfos = new HashSet<>();
+    onlineRegions.forEach((region) -> regionInfos.add(region.getRegionInfo()));
+    return regionInfos;
+  }
+
+  void removeNonOnlineRegions(RegionSizeStore store, Set<RegionInfo> onlineRegions) {
+    // We have to remove regions which are no longer online from the store, otherwise they will
+    // continue to be sent to the Master which will prevent size report expiration.
+    if (onlineRegions.isEmpty()) {
+      // Easy-case, no online regions means no size reports
+      store.clear();
+      return;
+    }
+
+    Iterator<Entry<RegionInfo,RegionSize>> iter = store.iterator();
+    int numEntriesRemoved = 0;
+    while (iter.hasNext()) {
+      Entry<RegionInfo,RegionSize> entry = iter.next();
+      RegionInfo regionInfo = entry.getKey();
+      if (!onlineRegions.contains(regionInfo)) {
+        numEntriesRemoved++;
+        iter.remove();
+      }
+    }
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Removed " + numEntriesRemoved + " region sizes before reporting to Master "
+          + "because they are for non-online regions.");
+    }
+  }
+
+  /**
+   * Extracts the period for the chore from the configuration.
+   *
+   * @param conf The configuration object.
+   * @return The configured chore period or the default value.
+   */
+  static int getPeriod(Configuration conf) {
+    return conf.getInt(
+        REGION_SIZE_REPORTING_CHORE_PERIOD_KEY, REGION_SIZE_REPORTING_CHORE_PERIOD_DEFAULT);
+  }
+
+  /**
+   * Extracts the initial delay for the chore from the configuration.
+   *
+   * @param conf The configuration object.
+   * @return The configured chore initial delay or the default value.
+   */
+  static long getInitialDelay(Configuration conf) {
+    return conf.getLong(
+        REGION_SIZE_REPORTING_CHORE_DELAY_KEY, REGION_SIZE_REPORTING_CHORE_DELAY_DEFAULT);
+  }
+
+  /**
+   * Extracts the time unit for the chore period and initial delay from the configuration. The
+   * configuration value for {@link #REGION_SIZE_REPORTING_CHORE_TIMEUNIT_KEY} must correspond to a
+   * {@link TimeUnit} value.
+   *
+   * @param conf The configuration object.
+   * @return The configured time unit for the chore period and initial delay or the default value.
+   */
+  static TimeUnit getTimeUnit(Configuration conf) {
+    return TimeUnit.valueOf(conf.get(REGION_SIZE_REPORTING_CHORE_TIMEUNIT_KEY,
+        REGION_SIZE_REPORTING_CHORE_TIMEUNIT_DEFAULT));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeStore.java
new file mode 100644
index 0000000..bd5c5bb
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeStore.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import java.util.Map.Entry;
+
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * An interface for concurrently storing and updating the size of a Region.
+ */
+@InterfaceAudience.Private
+public interface RegionSizeStore extends Iterable<Entry<RegionInfo,RegionSize>>, HeapSize {
+
+  /**
+   * Returns the size for the give region if one exists. If no size exists, {@code null} is
+   * returned.
+   *
+   * @param regionInfo The region whose size is being fetched.
+   * @return The size in bytes of the region or null if no size is stored.
+   */
+  RegionSize getRegionSize(RegionInfo regionInfo);
+
+  /**
+   * Atomically sets the given {@code size} for a region.
+   *
+   * @param regionInfo An identifier for a region.
+   * @param size The size in bytes of the region.
+   */
+  void put(RegionInfo regionInfo, long size);
+
+  /**
+   * Atomically alter the size of a region.
+   *
+   * @param regionInfo The region to update.
+   * @param delta The change in size for the region, positive or negative.
+   */
+  void incrementRegionSize(RegionInfo regionInfo, long delta);
+
+  /**
+   * Removes the mapping for the given key, returning the value if one exists in the store.
+   *
+   * @param regionInfo The key to remove from the store
+   * @return The value removed from the store if one exists, otherwise null.
+   */
+  RegionSize remove(RegionInfo regionInfo);
+
+  /**
+   * Returns the number of entries in the store.
+   *
+   * @return The number of entries in the store.
+   */
+  int size();
+
+  /**
+   * Returns if the store is empty.
+   *
+   * @return true if there are no entries in the store, otherwise false.
+   */
+  boolean isEmpty();
+
+  /**
+   * Removes all entries from the store.
+   */
+  void clear();
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeStoreFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeStoreFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeStoreFactory.java
new file mode 100644
index 0000000..2564ecb
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeStoreFactory.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * A factory class for creating implementations of {@link RegionSizeStore}.
+ */
+@InterfaceAudience.Private
+public final class RegionSizeStoreFactory {
+  private static final RegionSizeStoreFactory INSTANCE = new RegionSizeStoreFactory();
+
+  private RegionSizeStoreFactory() {}
+
+  public static RegionSizeStoreFactory getInstance() {
+    return INSTANCE;
+  }
+
+  public RegionSizeStore createStore() {
+    // Presently, there is only one implementation.
+    return new RegionSizeStoreImpl();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.java
new file mode 100644
index 0000000..4b48869
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ClassSize;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * A {@link RegionSizeStore} implementation backed by a ConcurrentHashMap. We expected similar
+ * amounts of reads and writes to the "store", so using a RWLock is not going to provide any
+ * exceptional gains.
+ */
+@InterfaceAudience.Private
+public class RegionSizeStoreImpl implements RegionSizeStore {
+  private static final Log LOG = LogFactory.getLog(RegionSizeStoreImpl.class);
+  private static final long sizeOfEntry = ClassSize.align(
+      ClassSize.CONCURRENT_HASHMAP_ENTRY
+      + ClassSize.OBJECT + Bytes.SIZEOF_LONG
+      // TODO Have RegionInfo implement HeapSize. 100B is an approximation based on a heapdump.
+      + ClassSize.OBJECT + 100);
+  private final ConcurrentHashMap<RegionInfo,RegionSize> store;
+
+  public RegionSizeStoreImpl() {
+    store = new ConcurrentHashMap<>();
+  }
+
+  @Override
+  public Iterator<Entry<RegionInfo,RegionSize>> iterator() {
+    return store.entrySet().iterator();
+  }
+
+  @Override
+  public RegionSize getRegionSize(RegionInfo regionInfo) {
+    return store.get(regionInfo);
+  }
+
+  @Override
+  public void put(RegionInfo regionInfo, long size) {
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Setting space quota size for " + regionInfo + " to " + size);
+    }
+    // Atomic. Either sets the new size for the first time, or replaces the existing value.
+    store.compute(regionInfo,
+      (key,value) -> value == null ? new RegionSizeImpl(size) : value.setSize(size));
+  }
+
+  @Override
+  public void incrementRegionSize(RegionInfo regionInfo, long delta) {
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Updating space quota size for " + regionInfo + " with a delta of " + delta);
+    }
+    // Atomic. Recomputes the stored value with the delta if there is one, otherwise use the delta.
+    store.compute(regionInfo,
+      (key,value) -> value == null ? new RegionSizeImpl(delta) : value.incrementSize(delta));
+  }
+
+  @Override
+  public RegionSize remove(RegionInfo regionInfo) {
+    return store.remove(regionInfo);
+  }
+
+  @Override
+  public long heapSize() {
+    // Will have to iterate over each element if RegionInfo implements HeapSize, for now it's just
+    // a simple calculation.
+    return sizeOfEntry * store.size();
+  }
+
+  @Override
+  public int size() {
+    return store.size();
+  }
+
+  @Override
+  public boolean isEmpty() {
+    return store.isEmpty();
+  }
+
+  @Override
+  public void clear() {
+    store.clear();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicyEnforcement.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicyEnforcement.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicyEnforcement.java
index b1f3cd0..d9730a5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicyEnforcement.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicyEnforcement.java
@@ -80,12 +80,13 @@ public interface SpaceViolationPolicyEnforcement {
   boolean shouldCheckBulkLoads();
 
   /**
-   * Checks the file at the given path against <code>this</code> policy and the current
-   * {@link SpaceQuotaSnapshot}. If the file would violate the policy, a
+   * Computes the size of the file(s) at the given path against <code>this</code> policy and the
+   * current {@link SpaceQuotaSnapshot}. If the file would violate the policy, a
    * {@link SpaceLimitingException} will be thrown.
    *
    * @param paths The paths in HDFS to files to be bulk loaded.
+   * @return The size, in bytes, of the files that would be loaded.
    */
-  void checkBulkLoad(FileSystem fs, List<String> paths) throws SpaceLimitingException;
+  long computeBulkLoadSize(FileSystem fs, List<String> paths) throws SpaceLimitingException;
 
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/AbstractViolationPolicyEnforcement.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/AbstractViolationPolicyEnforcement.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/AbstractViolationPolicyEnforcement.java
index c919d7e..79c78bc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/AbstractViolationPolicyEnforcement.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/AbstractViolationPolicyEnforcement.java
@@ -16,14 +16,19 @@
  */
 package org.apache.hadoop.hbase.quotas.policies;
 
+import java.io.IOException;
 import java.util.Objects;
 
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.yetus.audience.InterfaceStability;
+import org.apache.hadoop.hbase.quotas.SpaceLimitingException;
 import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
 import org.apache.hadoop.hbase.quotas.SpaceViolationPolicyEnforcement;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
 
 /**
  * Abstract implementation for {@link SpaceViolationPolicyEnforcement}.
@@ -74,4 +79,27 @@ public abstract class AbstractViolationPolicyEnforcement
   public boolean areCompactionsDisabled() {
     return false;
   }
+
+  /**
+   * Computes the size of a single file on the filesystem. If the size cannot be computed for some
+   * reason, a {@link SpaceLimitingException} is thrown, as the file may violate a quota. If the
+   * provided path does not reference a file, an {@link IllegalArgumentException} is thrown.
+   *
+   * @param fs The FileSystem which the path refers to a file upon
+   * @param path The path on the {@code fs} to a file whose size is being checked
+   * @return The size in bytes of the file
+   */
+  long getFileSize(FileSystem fs, String path) throws SpaceLimitingException {
+    final FileStatus status;
+    try {
+      status = fs.getFileStatus(new Path(Objects.requireNonNull(path)));
+    } catch (IOException e) {
+      throw new SpaceLimitingException(
+          getPolicyName(), "Could not verify length of file to bulk load: " + path, e);
+    }
+    if (!status.isFile()) {
+      throw new IllegalArgumentException(path + " is not a file.");
+    }
+    return status.getLen();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DefaultViolationPolicyEnforcement.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DefaultViolationPolicyEnforcement.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DefaultViolationPolicyEnforcement.java
index 28e7fd2..01217b3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DefaultViolationPolicyEnforcement.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DefaultViolationPolicyEnforcement.java
@@ -18,11 +18,8 @@ package org.apache.hadoop.hbase.quotas.policies;
 
 import java.io.IOException;
 import java.util.List;
-import java.util.Objects;
 
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.quotas.SpaceLimitingException;
@@ -58,33 +55,22 @@ public class DefaultViolationPolicyEnforcement extends AbstractViolationPolicyEn
   }
 
   @Override
-  public void checkBulkLoad(FileSystem fs, List<String> paths) throws SpaceLimitingException {
+  public long computeBulkLoadSize(FileSystem fs, List<String> paths) throws SpaceLimitingException {
     // Compute the amount of space that could be used to save some arithmetic in the for-loop
     final long sizeAvailableForBulkLoads = quotaSnapshot.getLimit() - quotaSnapshot.getUsage();
     long size = 0L;
     for (String path : paths) {
-      size += addSingleFile(fs, path);
+      try {
+        size += getFileSize(fs, path);
+      } catch (IOException e) {
+        throw new SpaceLimitingException(
+            getPolicyName(), "Colud not verify length of file to bulk load: " + path, e);
+      }
       if (size > sizeAvailableForBulkLoads) {
-        break;
+        throw new SpaceLimitingException(getPolicyName(), "Bulk load of " + paths
+            + " is disallowed because the file(s) exceed the limits of a space quota.");
       }
     }
-    if (size > sizeAvailableForBulkLoads) {
-      throw new SpaceLimitingException(getPolicyName(), "Bulk load of " + paths
-          + " is disallowed because the file(s) exceed the limits of a space quota.");
-    }
-  }
-
-  private long addSingleFile(FileSystem fs, String path) throws SpaceLimitingException {
-    final FileStatus status;
-    try {
-      status = fs.getFileStatus(new Path(Objects.requireNonNull(path)));
-    } catch (IOException e) {
-      throw new SpaceLimitingException(
-          getPolicyName(), "Could not verify length of file to bulk load", e);
-    }
-    if (!status.isFile()) {
-      throw new IllegalArgumentException(path + " is not a file.");
-    }
-    return status.getLen();
+    return size;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/MissingSnapshotViolationPolicyEnforcement.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/MissingSnapshotViolationPolicyEnforcement.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/MissingSnapshotViolationPolicyEnforcement.java
index d0e4b16..294954c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/MissingSnapshotViolationPolicyEnforcement.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/MissingSnapshotViolationPolicyEnforcement.java
@@ -45,7 +45,13 @@ public class MissingSnapshotViolationPolicyEnforcement extends AbstractViolation
   }
 
   @Override
-  public void checkBulkLoad(FileSystem fs, List<String> paths) {}
+  public long computeBulkLoadSize(FileSystem fs, List<String> paths) throws SpaceLimitingException {
+    long size = 0;
+    for (String path : paths) {
+      size += getFileSize(fs, path);
+    }
+    return size;
+  }
 
   @Override
   public void enable() throws IOException {}

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 9f3d9bd..f071baf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -138,6 +138,7 @@ import org.apache.hadoop.hbase.ipc.RpcCall;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
+import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry;
 import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
 import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
@@ -2645,6 +2646,16 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       // Set down the memstore size by amount of flush.
       this.decrMemStoreSize(prepareResult.totalFlushableSize);
 
+      // Increase the size of this Region for the purposes of quota. Noop if quotas are disabled.
+      // During startup, quota manager may not be initialized yet.
+      if (rsServices != null) {
+        RegionServerSpaceQuotaManager quotaManager = rsServices.getRegionServerSpaceQuotaManager();
+        if (quotaManager != null) {
+          quotaManager.getRegionSizeStore().incrementRegionSize(
+              this.getRegionInfo(), flushedOutputFileSize);
+        }
+      }
+
       if (wal != null) {
         // write flush marker to WAL. If fail, we should throw DroppedSnapshotException
         FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.COMMIT_FLUSH,

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 81febc0..7415b77 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -120,6 +120,8 @@ import org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore;
 import org.apache.hadoop.hbase.quotas.QuotaUtil;
 import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
 import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
+import org.apache.hadoop.hbase.quotas.RegionSize;
+import org.apache.hadoop.hbase.quotas.RegionSizeStore;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
@@ -1209,10 +1211,10 @@ public class HRegionServer extends HasThread implements
   /**
    * Reports the given map of Regions and their size on the filesystem to the active Master.
    *
-   * @param onlineRegionSizes A map of region info to size in bytes
+   * @param regionSizeStore The store containing region sizes
    * @return false if FileSystemUtilizationChore should pause reporting to master. true otherwise
    */
-  public boolean reportRegionSizesForQuotas(final Map<RegionInfo, Long> onlineRegionSizes) {
+  public boolean reportRegionSizesForQuotas(RegionSizeStore regionSizeStore) {
     RegionServerStatusService.BlockingInterface rss = rssStub;
     if (rss == null) {
       // the current server could be stopping.
@@ -1220,9 +1222,7 @@ public class HRegionServer extends HasThread implements
       return true;
     }
     try {
-      RegionSpaceUseReportRequest request = buildRegionSpaceUseReportRequest(
-          Objects.requireNonNull(onlineRegionSizes));
-      rss.reportRegionSpaceUse(null, request);
+      buildReportAndSend(rss, regionSizeStore);
     } catch (ServiceException se) {
       IOException ioe = ProtobufUtil.getRemoteException(se);
       if (ioe instanceof PleaseHoldException) {
@@ -1251,15 +1251,33 @@ public class HRegionServer extends HasThread implements
   }
 
   /**
+   * Builds the region size report and sends it to the master. Upon successful sending of the
+   * report, the region sizes that were sent are marked as sent.
+   *
+   * @param rss The stub to send to the Master
+   * @param regionSizeStore The store containing region sizes
+   */
+  void buildReportAndSend(RegionServerStatusService.BlockingInterface rss,
+      RegionSizeStore regionSizeStore) throws ServiceException {
+    RegionSpaceUseReportRequest request =
+        buildRegionSpaceUseReportRequest(Objects.requireNonNull(regionSizeStore));
+    rss.reportRegionSpaceUse(null, request);
+    // Record the number of size reports sent
+    if (metricsRegionServer != null) {
+      metricsRegionServer.incrementNumRegionSizeReportsSent(regionSizeStore.size());
+    }
+  }
+
+  /**
    * Builds a {@link RegionSpaceUseReportRequest} protobuf message from the region size map.
    *
-   * @param regionSizes Map of region info to size in bytes.
+   * @param regionSizeStore The size in bytes of regions
    * @return The corresponding protocol buffer message.
    */
-  RegionSpaceUseReportRequest buildRegionSpaceUseReportRequest(Map<RegionInfo,Long> regionSizes) {
+  RegionSpaceUseReportRequest buildRegionSpaceUseReportRequest(RegionSizeStore regionSizes) {
     RegionSpaceUseReportRequest.Builder request = RegionSpaceUseReportRequest.newBuilder();
-    for (Entry<RegionInfo, Long> entry : Objects.requireNonNull(regionSizes).entrySet()) {
-      request.addSpaceUse(convertRegionSize(entry.getKey(), entry.getValue()));
+    for (Entry<RegionInfo, RegionSize> entry : regionSizes) {
+      request.addSpaceUse(convertRegionSize(entry.getKey(), entry.getValue().getSize()));
     }
     return request.build();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 68a057a..951f97c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -81,6 +81,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
 import org.apache.hadoop.hbase.log.HBaseMarkers;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
+import org.apache.hadoop.hbase.quotas.RegionSizeStore;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
@@ -1455,12 +1456,51 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
       synchronized (filesCompacting) {
         filesCompacting.removeAll(compactedFiles);
       }
+
+      // These may be null when the RS is shutting down. The space quota Chores will fix the Region
+      // sizes later so it's not super-critical if we miss these.
+      RegionServerServices rsServices = region.getRegionServerServices();
+      if (rsServices != null && rsServices.getRegionServerSpaceQuotaManager() != null) {
+        updateSpaceQuotaAfterFileReplacement(
+            rsServices.getRegionServerSpaceQuotaManager().getRegionSizeStore(), getRegionInfo(),
+            compactedFiles, result);
+      }
     } finally {
       this.lock.writeLock().unlock();
     }
   }
 
   /**
+   * Updates the space quota usage for this region, removing the size for files compacted away
+   * and adding in the size for new files.
+   *
+   * @param sizeStore The object tracking changes in region size for space quotas.
+   * @param regionInfo The identifier for the region whose size is being updated.
+   * @param oldFiles Files removed from this store's region.
+   * @param newFiles Files added to this store's region.
+   */
+  void updateSpaceQuotaAfterFileReplacement(
+      RegionSizeStore sizeStore, RegionInfo regionInfo, Collection<HStoreFile> oldFiles,
+      Collection<HStoreFile> newFiles) {
+    long delta = 0;
+    if (oldFiles != null) {
+      for (HStoreFile compactedFile : oldFiles) {
+        if (compactedFile.isHFile()) {
+          delta -= compactedFile.getReader().length();
+        }
+      }
+    }
+    if (newFiles != null) {
+      for (HStoreFile newFile : newFiles) {
+        if (newFile.isHFile()) {
+          delta += newFile.getReader().length();
+        }
+      }
+    }
+    sizeStore.incrementRegionSize(regionInfo, delta);
+  }
+
+  /**
    * Log a very elaborate compaction completion message.
    * @param cr Request.
    * @param sfs Resulting files.

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
index df50fa8..3ff6131 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
@@ -46,6 +46,7 @@ public class MetricsRegionServer {
   private MetricsRegionServerSource serverSource;
   private MetricsRegionServerWrapper regionServerWrapper;
   private RegionServerTableMetrics tableMetrics;
+  private MetricsRegionServerQuotaSource quotaSource;
 
   private MetricRegistry metricRegistry;
   private Timer bulkLoadTimer;
@@ -62,6 +63,8 @@ public class MetricsRegionServer {
 
     // create and use metrics from the new hbase-metrics based registry.
     bulkLoadTimer = metricRegistry.timer("Bulkload");
+
+    quotaSource = CompatibilitySingletonFactory.getInstance(MetricsRegionServerQuotaSource.class);
   }
 
   MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper,
@@ -211,4 +214,18 @@ public class MetricsRegionServer {
   public void updateBulkLoad(long millis) {
     this.bulkLoadTimer.updateMillis(millis);
   }
+
+  /**
+   * @see MetricsRegionServerQuotaSource#incrementNumRegionSizeReportsSent(long)
+   */
+  public void incrementNumRegionSizeReportsSent(long numReportsSent) {
+    quotaSource.incrementNumRegionSizeReportsSent(numReportsSent);
+  }
+
+  /**
+   * @see MetricsRegionServerQuotaSource#incrementRegionSizeReportingChoreTime(long)
+   */
+  public void incrementRegionSizeReportingChoreTime(long time) {
+    quotaSource.incrementRegionSizeReportingChoreTime(time);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 4dd826f..1ff67e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -2279,9 +2279,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
       requestCount.increment();
       HRegion region = getRegion(request.getRegion());
       Map<byte[], List<Path>> map = null;
+      final boolean spaceQuotaEnabled = QuotaUtil.isQuotaEnabled(getConfiguration());
+      long sizeToBeLoaded = -1;
 
       // Check to see if this bulk load would exceed the space quota for this table
-      if (QuotaUtil.isQuotaEnabled(getConfiguration())) {
+      if (spaceQuotaEnabled) {
         ActivePolicyEnforcement activeSpaceQuotas = getSpaceQuotaManager().getActiveEnforcements();
         SpaceViolationPolicyEnforcement enforcement = activeSpaceQuotas.getPolicyEnforcement(
             region);
@@ -2292,7 +2294,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
             filePaths.add(familyPath.getPath());
           }
           // Check if the batch of files exceeds the current quota
-          enforcement.checkBulkLoad(regionServer.getFileSystem(), filePaths);
+          sizeToBeLoaded = enforcement.computeBulkLoadSize(regionServer.getFileSystem(), filePaths);
         }
       }
 
@@ -2318,6 +2320,19 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
       }
       BulkLoadHFileResponse.Builder builder = BulkLoadHFileResponse.newBuilder();
       builder.setLoaded(map != null);
+      if (map != null) {
+        // Treat any negative size as a flag to "ignore" updating the region size as that is
+        // not possible to occur in real life (cannot bulk load a file with negative size)
+        if (spaceQuotaEnabled && sizeToBeLoaded > 0) {
+          if (LOG.isTraceEnabled()) {
+            LOG.trace("Incrementing space use of " + region.getRegionInfo() + " by "
+                + sizeToBeLoaded + " bytes");
+          }
+          // Inform space quotas of the new files for this region
+          getSpaceQuotaManager().getRegionSizeStore().incrementRegionSize(
+              region.getRegionInfo(), sizeToBeLoaded);
+        }
+      }
       return builder.build();
     } catch (IOException ie) {
       throw new ServiceException(ie);

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index 60bd215..da1f14f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
 import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
+import org.apache.hadoop.hbase.quotas.RegionSizeStore;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester;
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
 import org.apache.hadoop.hbase.wal.WAL;
@@ -234,4 +235,12 @@ public interface RegionServerServices extends Server, MutableOnlineRegions, Favo
    * See HBASE-17712 for more details.
    */
   void unassign(byte[] regionName) throws IOException;
+
+  /**
+   * Reports the provided Region sizes hosted by this RegionServer to the active Master.
+   *
+   * @param sizeStore The sizes for Regions locally hosted.
+   * @return {@code false} if reporting should be temporarily paused, {@code true} otherwise.
+   */
+  boolean reportRegionSizesForQuotas(RegionSizeStore sizeStore);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index f1e020f..910d334 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
 import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
+import org.apache.hadoop.hbase.quotas.RegionSizeStore;
 import org.apache.hadoop.hbase.regionserver.FlushRequester;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HeapMemoryManager;
@@ -334,4 +335,9 @@ public class MockRegionServerServices implements RegionServerServices {
   public Connection createConnection(Configuration conf) throws IOException {
     return null;
   }
+
+  @Override
+  public boolean reportRegionSizesForQuotas(RegionSizeStore sizeStore) {
+    return true;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index cabd769..c87d723 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
 import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
+import org.apache.hadoop.hbase.quotas.RegionSizeStore;
 import org.apache.hadoop.hbase.regionserver.FlushRequester;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HeapMemoryManager;
@@ -678,4 +679,9 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
   public Connection createConnection(Configuration conf) throws IOException {
     return null;
   }
+
+  @Override
+  public boolean reportRegionSizesForQuotas(RegionSizeStore sizeStore) {
+    return true;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
index 1b4f2d3..a10e3a2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
@@ -29,21 +29,30 @@ import java.util.Set;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter.Predicate;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ClientServiceCallable;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.SecureBulkLoadClient;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
+import org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.junit.rules.TestName;
 import org.slf4j.Logger;
@@ -87,6 +96,8 @@ public class SpaceQuotaHelperForTests {
     conf.setInt(SpaceQuotaRefresherChore.POLICY_REFRESHER_CHORE_PERIOD_KEY, 1000);
     conf.setInt(SnapshotQuotaObserverChore.SNAPSHOT_QUOTA_CHORE_DELAY_KEY, 1000);
     conf.setInt(SnapshotQuotaObserverChore.SNAPSHOT_QUOTA_CHORE_PERIOD_KEY, 1000);
+    conf.setInt(RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_PERIOD_KEY, 1000);
+    conf.setInt(RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_DELAY_KEY, 1000);
     // The period at which we check for compacted files that should be deleted from HDFS
     conf.setInt("hbase.hfile.compaction.discharger.interval", 5 * 1000);
     conf.setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
@@ -116,10 +127,7 @@ public class SpaceQuotaHelperForTests {
   void removeAllQuotas(Connection conn) throws IOException, InterruptedException {
     // Wait for the quota table to be created
     if (!conn.getAdmin().tableExists(QuotaUtil.QUOTA_TABLE_NAME)) {
-      do {
-        LOG.debug("Quota table does not yet exist");
-        Thread.sleep(1000);
-      } while (!conn.getAdmin().tableExists(QuotaUtil.QUOTA_TABLE_NAME));
+      waitForQuotaTable(conn);
     } else {
       // Or, clean up any quotas from previous test runs.
       QuotaRetriever scanner = QuotaRetriever.open(conn.getConfiguration());
@@ -159,14 +167,14 @@ public class SpaceQuotaHelperForTests {
   /**
    * Waits 30seconds for the HBase quota table to exist.
    */
-  void waitForQuotaTable(Connection conn) throws IOException {
+  public void waitForQuotaTable(Connection conn) throws IOException {
     waitForQuotaTable(conn, 30_000);
   }
 
   /**
    * Waits {@code timeout} milliseconds for the HBase quota table to exist.
    */
-  void waitForQuotaTable(Connection conn, long timeout) throws IOException {
+  public void waitForQuotaTable(Connection conn, long timeout) throws IOException {
     testUtil.waitFor(timeout, 1000, new Predicate<IOException>() {
       @Override
       public boolean evaluate() throws IOException {
@@ -316,8 +324,8 @@ public class SpaceQuotaHelperForTests {
     }
 
     // Create the table
-    HTableDescriptor tableDesc = new HTableDescriptor(tn);
-    tableDesc.addFamily(new HColumnDescriptor(F1));
+    TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tn)
+        .addColumnFamily(ColumnFamilyDescriptorBuilder.of(F1)).build();
     if (numRegions == 1) {
       admin.createTable(tableDesc);
     } else {
@@ -338,8 +346,8 @@ public class SpaceQuotaHelperForTests {
     }
 
     // Create the table
-    HTableDescriptor tableDesc = new HTableDescriptor(tn);
-    tableDesc.addFamily(new HColumnDescriptor(F1));
+    TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tn)
+        .addColumnFamily(ColumnFamilyDescriptorBuilder.of(F1)).build();
 
     admin.createTable(tableDesc);
     return tn;
@@ -365,6 +373,44 @@ public class SpaceQuotaHelperForTests {
   }
 
   /**
+   * Bulk-loads a number of files with a number of rows to the given table.
+   */
+  ClientServiceCallable<Boolean> generateFileToLoad(
+      TableName tn, int numFiles, int numRowsPerFile) throws Exception {
+    Connection conn = testUtil.getConnection();
+    FileSystem fs = testUtil.getTestFileSystem();
+    Configuration conf = testUtil.getConfiguration();
+    Path baseDir = new Path(fs.getHomeDirectory(), testName.getMethodName() + "_files");
+    fs.mkdirs(baseDir);
+    final List<Pair<byte[], String>> famPaths = new ArrayList<>();
+    for (int i = 1; i <= numFiles; i++) {
+      Path hfile = new Path(baseDir, "file" + i);
+      TestHRegionServerBulkLoad.createHFile(
+          fs, hfile, Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("my"),
+          Bytes.toBytes("file"), numRowsPerFile);
+      famPaths.add(new Pair<>(Bytes.toBytes(SpaceQuotaHelperForTests.F1), hfile.toString()));
+    }
+
+    // bulk load HFiles
+    Table table = conn.getTable(tn);
+    final String bulkToken = new SecureBulkLoadClient(conf, table).prepareBulkLoad(conn);
+    return new ClientServiceCallable<Boolean>(
+        conn, tn, Bytes.toBytes("row"), new RpcControllerFactory(conf).newController(),
+        HConstants.PRIORITY_UNSET) {
+      @Override
+     public Boolean rpcCall() throws Exception {
+        SecureBulkLoadClient secureClient = null;
+        byte[] regionName = getLocation().getRegion().getRegionName();
+        try (Table table = conn.getTable(getTableName())) {
+          secureClient = new SecureBulkLoadClient(conf, table);
+          return secureClient.secureBulkLoadHFiles(getStub(), famPaths, regionName,
+                true, null, bulkToken);
+        }
+      }
+    };
+  }
+
+  /**
    * Abstraction to simplify the case where a test needs to verify a certain state
    * on a {@code SpaceQuotaSnapshot}. This class fails-fast when there is no such
    * snapshot obtained from the Master. As such, it is not useful to verify the

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdedcc56/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java
index bc2ac78..38d98e4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java
@@ -57,7 +57,6 @@ public class TestFileSystemUtilizationChore {
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestFileSystemUtilizationChore.class);
 
-  @SuppressWarnings("unchecked")
   @Test
   public void testNoOnlineRegions() {
     // One region with a store size of one.
@@ -67,14 +66,13 @@ public class TestFileSystemUtilizationChore {
     final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs);
     doAnswer(new ExpectedRegionSizeSummationAnswer(sum(regionSizes)))
         .when(rs)
-        .reportRegionSizesForQuotas((Map<RegionInfo,Long>) any());
+        .reportRegionSizesForQuotas(any(RegionSizeStore.class));
 
     final Region region = mockRegionWithSize(regionSizes);
     Mockito.doReturn(Arrays.asList(region)).when(rs).getRegions();
     chore.chore();
   }
 
-  @SuppressWarnings("unchecked")
   @Test
   public void testRegionSizes() {
     // One region with a store size of one.
@@ -84,14 +82,13 @@ public class TestFileSystemUtilizationChore {
     final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs);
     doAnswer(new ExpectedRegionSizeSummationAnswer(sum(regionSizes)))
         .when(rs)
-        .reportRegionSizesForQuotas((Map<RegionInfo,Long>) any());
+        .reportRegionSizesForQuotas(any(RegionSizeStore.class));
 
     final Region region = mockRegionWithSize(regionSizes);
     Mockito.doReturn(Arrays.asList(region)).when(rs).getRegions();
     chore.chore();
   }
 
-  @SuppressWarnings("unchecked")
   @Test
   public void testMultipleRegionSizes() {
     final Configuration conf = getDefaultHBaseConfiguration();
@@ -108,7 +105,7 @@ public class TestFileSystemUtilizationChore {
     final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs);
     doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(r1Sum, r2Sum, r3Sum))))
         .when(rs)
-        .reportRegionSizesForQuotas((Map<RegionInfo,Long>) any());
+        .reportRegionSizesForQuotas(any(RegionSizeStore.class));
 
     final Region r1 = mockRegionWithSize(r1Sizes);
     final Region r2 = mockRegionWithSize(r2Sizes);
@@ -151,7 +148,6 @@ public class TestFileSystemUtilizationChore {
     assertEquals(timeUnit, chore.getTimeUnit());
   }
 
-  @SuppressWarnings("unchecked")
   @Test
   public void testProcessingLeftoverRegions() {
     final Configuration conf = getDefaultHBaseConfiguration();
@@ -173,7 +169,7 @@ public class TestFileSystemUtilizationChore {
     };
     doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(leftover1Sum, leftover2Sum))))
         .when(rs)
-        .reportRegionSizesForQuotas((Map<RegionInfo,Long>) any());
+        .reportRegionSizesForQuotas(any(RegionSizeStore.class));
 
     // We shouldn't compute all of these region sizes, just the leftovers
     final Region r1 = mockRegionWithSize(Arrays.asList(1024L, 2048L));
@@ -184,7 +180,6 @@ public class TestFileSystemUtilizationChore {
     chore.chore();
   }
 
-  @SuppressWarnings("unchecked")
   @Test
   public void testProcessingNowOfflineLeftoversAreIgnored() {
     final Configuration conf = getDefaultHBaseConfiguration();
@@ -205,7 +200,7 @@ public class TestFileSystemUtilizationChore {
     };
     doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(leftover1Sum))))
         .when(rs)
-        .reportRegionSizesForQuotas((Map<RegionInfo,Long>) any());
+        .reportRegionSizesForQuotas(any(RegionSizeStore.class));
 
     // We shouldn't compute all of these region sizes, just the leftovers
     final Region r1 = mockRegionWithSize(Arrays.asList(1024L, 2048L));
@@ -217,7 +212,6 @@ public class TestFileSystemUtilizationChore {
     chore.chore();
   }
 
-  @SuppressWarnings("unchecked")
   @Test
   public void testIgnoreSplitParents() {
     final Configuration conf = getDefaultHBaseConfiguration();
@@ -231,7 +225,7 @@ public class TestFileSystemUtilizationChore {
     final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs);
     doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(r1Sum))))
         .when(rs)
-        .reportRegionSizesForQuotas((Map<RegionInfo,Long>) any());
+        .reportRegionSizesForQuotas(any(RegionSizeStore.class));
 
     final Region r1 = mockRegionWithSize(r1Sizes);
     final Region r2 = mockSplitParentRegionWithSize(r2Sizes);
@@ -239,7 +233,6 @@ public class TestFileSystemUtilizationChore {
     chore.chore();
   }
 
-  @SuppressWarnings("unchecked")
   @Test
   public void testIgnoreRegionReplicas() {
     final Configuration conf = getDefaultHBaseConfiguration();
@@ -253,7 +246,7 @@ public class TestFileSystemUtilizationChore {
     final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs);
     doAnswer(new ExpectedRegionSizeSummationAnswer(r1Sum))
         .when(rs)
-        .reportRegionSizesForQuotas((Map<RegionInfo,Long>) any());
+        .reportRegionSizesForQuotas(any(RegionSizeStore.class));
 
     final Region r1 = mockRegionWithSize(r1Sizes);
     final Region r2 = mockRegionReplicaWithSize(r2Sizes);
@@ -261,7 +254,6 @@ public class TestFileSystemUtilizationChore {
     chore.chore();
   }
 
-  @SuppressWarnings("unchecked")
   @Test
   public void testNonHFilesAreIgnored() {
     final Configuration conf = getDefaultHBaseConfiguration();
@@ -280,7 +272,7 @@ public class TestFileSystemUtilizationChore {
     final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs);
     doAnswer(new ExpectedRegionSizeSummationAnswer(
         sum(Arrays.asList(r1HFileSizeSum, r2HFileSizeSum))))
-        .when(rs).reportRegionSizesForQuotas((Map<RegionInfo,Long>) any());
+        .when(rs).reportRegionSizesForQuotas(any(RegionSizeStore.class));
 
     final Region r1 = mockRegionWithHFileLinks(r1StoreFileSizes, r1HFileSizes);
     final Region r2 = mockRegionWithHFileLinks(r2StoreFileSizes, r2HFileSizes);
@@ -302,7 +294,10 @@ public class TestFileSystemUtilizationChore {
    */
   private HRegionServer mockRegionServer(Configuration conf) {
     final HRegionServer rs = mock(HRegionServer.class);
+    final RegionServerSpaceQuotaManager quotaManager = mock(RegionServerSpaceQuotaManager.class);
     when(rs.getConfiguration()).thenReturn(conf);
+    when(rs.getRegionServerSpaceQuotaManager()).thenReturn(quotaManager);
+    when(quotaManager.getRegionSizeStore()).thenReturn(new RegionSizeStoreImpl());
     return rs;
   }
 


[04/11] hbase git commit: HBASE-17825: Backup further optimizations

Posted by bu...@apache.org.
HBASE-17825: Backup further optimizations

Signed-off-by: Josh Elser <el...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6cfa208a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6cfa208a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6cfa208a

Branch: refs/heads/HBASE-15151
Commit: 6cfa208add6ea424e17cae00114ebd3e7d7967f1
Parents: 62ee7d9
Author: Vladimir Rodionov <vr...@hortonworks.com>
Authored: Tue Feb 27 14:08:14 2018 -0800
Committer: Josh Elser <el...@apache.org>
Committed: Wed Feb 28 09:14:15 2018 -0500

----------------------------------------------------------------------
 .../impl/IncrementalTableBackupClient.java      |  14 ++-
 .../mapreduce/MapReduceBackupMergeJob.java      |  60 ++++++++--
 .../TestIncrementalBackupMergeWithFailures.java |   4 +-
 .../hbase/mapreduce/HFileOutputFormat2.java     |  18 ++-
 .../hadoop/hbase/mapreduce/WALPlayer.java       | 114 ++++++++++---------
 .../hadoop/hbase/util/FSTableDescriptors.java   |   6 +-
 6 files changed, 142 insertions(+), 74 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/6cfa208a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
index c897ae2..8fd6573 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
@@ -374,14 +374,17 @@ public class IncrementalTableBackupClient extends TableBackupClient {
     Set<TableName> tableSet = backupManager.getIncrementalBackupTableSet();
     // filter missing files out (they have been copied by previous backups)
     incrBackupFileList = filterMissingFiles(incrBackupFileList);
+    List<String> tableList = new ArrayList<String>();
     for (TableName table : tableSet) {
       // Check if table exists
       if (tableExists(table, conn)) {
-        walToHFiles(incrBackupFileList, table);
+        tableList.add(table.getNameAsString());
       } else {
         LOG.warn("Table " + table + " does not exists. Skipping in WAL converter");
       }
     }
+    walToHFiles(incrBackupFileList, tableList);
+
   }
 
   protected boolean tableExists(TableName table, Connection conn) throws IOException {
@@ -390,20 +393,21 @@ public class IncrementalTableBackupClient extends TableBackupClient {
     }
   }
 
-  protected void walToHFiles(List<String> dirPaths, TableName tableName) throws IOException {
+  protected void walToHFiles(List<String> dirPaths, List<String> tableList) throws IOException {
     Tool player = new WALPlayer();
 
     // Player reads all files in arbitrary directory structure and creates
     // a Map task for each file. We use ';' as separator
     // because WAL file names contains ','
     String dirs = StringUtils.join(dirPaths, ';');
-    String jobname = "Incremental_Backup-" + backupId + "-" + tableName.getNameAsString();
+    String jobname = "Incremental_Backup-" + backupId ;
 
-    Path bulkOutputPath = getBulkOutputDirForTable(tableName);
+    Path bulkOutputPath = getBulkOutputDir();
     conf.set(WALPlayer.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString());
     conf.set(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ";");
+    conf.setBoolean(WALPlayer.MULTI_TABLES_SUPPORT, true);
     conf.set(JOB_NAME_CONF_KEY, jobname);
-    String[] playerArgs = { dirs, tableName.getNameAsString() };
+    String[] playerArgs = { dirs, StringUtils.join(tableList, ",") };
 
     try {
       player.setConf(conf);

http://git-wip-us.apache.org/repos/asf/hbase/blob/6cfa208a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
index 6f2c44c..3fcf692 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.backup.mapreduce;
 
 import static org.apache.hadoop.hbase.backup.util.BackupUtils.succeeded;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashSet;
@@ -29,7 +30,9 @@ import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.BackupInfo;
 import org.apache.hadoop.hbase.backup.BackupMergeJob;
@@ -40,6 +43,8 @@ import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.util.Tool;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -113,6 +118,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
         // Find input directories for table
         Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds);
         String dirs = StringUtils.join(dirPaths, ",");
+
         Path bulkOutputPath =
             BackupUtils.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(tableNames[i]),
               getConf(), false);
@@ -243,19 +249,59 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
   protected void moveData(FileSystem fs, String backupRoot, Path bulkOutputPath,
           TableName tableName, String mergedBackupId) throws IllegalArgumentException, IOException {
     Path dest =
-        new Path(HBackupFileSystem.getTableBackupDataDir(backupRoot, mergedBackupId, tableName));
+        new Path(HBackupFileSystem.getTableBackupDir(backupRoot, mergedBackupId, tableName));
 
-    // Delete all in dest
-    if (!fs.delete(dest, true)) {
+    // Delete all *data* files in dest
+    if (!deleteData(fs, dest)) {
       throw new IOException("Could not delete " + dest);
     }
 
     FileStatus[] fsts = fs.listStatus(bulkOutputPath);
     for (FileStatus fst : fsts) {
       if (fst.isDirectory()) {
-        fs.rename(fst.getPath().getParent(), dest);
+        String family =  fst.getPath().getName();
+        Path newDst = new Path(dest, family);
+        if (fs.exists(newDst)) {
+          if (!fs.delete(newDst, true)) {
+            throw new IOException("failed to delete :"+ newDst);
+          }
+        }
+        fs.rename(fst.getPath(), dest);
+      }
+    }
+  }
+
+  /**
+   * Deletes only data files and keeps all META
+   * @param fs file system instance
+   * @param dest destination location
+   * @return true, if success, false - otherwise
+   * @throws FileNotFoundException exception
+   * @throws IOException exception
+   */
+  private boolean deleteData(FileSystem fs, Path dest) throws FileNotFoundException, IOException {
+    RemoteIterator<LocatedFileStatus> it = fs.listFiles(dest, true);
+    List<Path> toDelete = new ArrayList<Path>();
+    while (it.hasNext()) {
+      Path p = it.next().getPath();
+      if (fs.isDirectory(p)) {
+        continue;
+      }
+      // Keep meta
+      String fileName  = p.toString();
+      if (fileName.indexOf(FSTableDescriptors.TABLEINFO_DIR) > 0 ||
+          fileName.indexOf(HRegionFileSystem.REGION_INFO_FILE) > 0) {
+        continue;
+      }
+      toDelete.add(p);
+    }
+    for (Path p : toDelete) {
+      boolean result = fs.delete(p, false);
+      if (!result) {
+        return false;
       }
     }
+    return true;
   }
 
   protected String findMostRecentBackupId(String[] backupIds) {
@@ -291,12 +337,12 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
 
     for (String backupId : backupIds) {
       Path fileBackupDirPath =
-          new Path(HBackupFileSystem.getTableBackupDataDir(backupRoot, backupId, tableName));
+          new Path(HBackupFileSystem.getTableBackupDir(backupRoot, backupId, tableName));
       if (fs.exists(fileBackupDirPath)) {
         dirs.add(fileBackupDirPath);
       } else {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("File: " + fileBackupDirPath + " does not exist.");
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("File: " + fileBackupDirPath + " does not exist.");
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6cfa208a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
index 83fb29e..7ce5050 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
@@ -254,8 +254,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
     request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
     String backupIdIncMultiple2 = client.backupTables(request);
     assertTrue(checkSucceeded(backupIdIncMultiple2));
-
-    // #4 Merge backup images with failures
+        // #4 Merge backup images with failures
 
     for (FailurePhase phase : FailurePhase.values()) {
       Configuration conf = conn.getConfiguration();
@@ -294,7 +293,6 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
         LOG.debug("Expected :"+ e.getMessage());
       }
     }
-
     // Now merge w/o failures
     Configuration conf = conn.getConfiguration();
     conf.unset(FAILURE_PHASE_KEY);

http://git-wip-us.apache.org/repos/asf/hbase/blob/6cfa208a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 60352ea..3b04c0b 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -258,15 +258,19 @@ public class HFileOutputFormat2
         } else {
           tableNameBytes = Bytes.toBytes(writeTableNames);
         }
+        String tableName = Bytes.toString(tableNameBytes);
+        Path tableRelPath = getTableRelativePath(tableNameBytes);
         byte[] tableAndFamily = getTableNameSuffixedWithFamily(tableNameBytes, family);
+
         WriterLength wl = this.writers.get(tableAndFamily);
 
         // If this is a new column family, verify that the directory exists
         if (wl == null) {
           Path writerPath = null;
           if (writeMultipleTables) {
-            writerPath = new Path(outputDir, new Path(Bytes.toString(tableNameBytes), Bytes
+            writerPath = new Path(outputDir,new Path(tableRelPath, Bytes
                     .toString(family)));
+
           }
           else {
             writerPath = new Path(outputDir, Bytes.toString(family));
@@ -289,7 +293,6 @@ public class HFileOutputFormat2
           if (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) {
             HRegionLocation loc = null;
 
-            String tableName = Bytes.toString(tableNameBytes);
             if (tableName != null) {
               try (Connection connection = ConnectionFactory.createConnection(conf);
                      RegionLocator locator =
@@ -341,6 +344,15 @@ public class HFileOutputFormat2
         this.previousRow = rowKey;
       }
 
+      private Path getTableRelativePath(byte[] tableNameBytes) {
+        String tableName = Bytes.toString(tableNameBytes);
+        String[] tableNameParts = tableName.split(":");
+        Path tableRelPath = new Path(tableName.split(":")[0]);
+        if (tableNameParts.length > 1) {
+          tableRelPath = new Path(tableRelPath, tableName.split(":")[1]);
+        }
+        return tableRelPath;
+      }
       private void rollWriters(WriterLength writerLength) throws IOException {
         if (writerLength != null) {
           closeWriter(writerLength);
@@ -376,7 +388,7 @@ public class HFileOutputFormat2
         Path familydir = new Path(outputDir, Bytes.toString(family));
         if (writeMultipleTables) {
           familydir = new Path(outputDir,
-                  new Path(Bytes.toString(tableName), Bytes.toString(family)));
+                  new Path(getTableRelativePath(tableName), Bytes.toString(family)));
         }
         WriterLength wl = new WriterLength();
         Algorithm compression = compressionMap.get(tableAndFamily);

http://git-wip-us.apache.org/repos/asf/hbase/blob/6cfa208a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
index 97ae81c..057127c 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
@@ -20,7 +20,11 @@ package org.apache.hadoop.hbase.mapreduce;
 import java.io.IOException;
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
@@ -38,6 +42,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.TableInfo;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.MapReduceExtendedCell;
@@ -72,7 +77,9 @@ public class WALPlayer extends Configured implements Tool {
   public final static String TABLE_MAP_KEY = "wal.input.tablesmap";
   public final static String INPUT_FILES_SEPARATOR_KEY = "wal.input.separator";
   public final static String IGNORE_MISSING_FILES = "wal.input.ignore.missing.files";
+  public final static String MULTI_TABLES_SUPPORT = "wal.multi.tables.support";
 
+  protected static final String tableSeparator = ";";
 
   // This relies on Hadoop Configuration to handle warning about deprecated configs and
   // to set the correct non-deprecated configs when an old one shows up.
@@ -84,7 +91,7 @@ public class WALPlayer extends Configured implements Tool {
 
   private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name";
 
-  public WALPlayer(){
+  public WALPlayer() {
   }
 
   protected WALPlayer(final Configuration c) {
@@ -92,26 +99,27 @@ public class WALPlayer extends Configured implements Tool {
   }
 
   /**
-   * A mapper that just writes out KeyValues.
-   * This one can be used together with {@link CellSortReducer}
+   * A mapper that just writes out KeyValues. This one can be used together with
+   * {@link CellSortReducer}
    */
-  static class WALKeyValueMapper
-    extends Mapper<WALKey, WALEdit, ImmutableBytesWritable, Cell> {
-    private byte[] table;
+  static class WALKeyValueMapper extends Mapper<WALKey, WALEdit, ImmutableBytesWritable, Cell> {
+    private Set<String> tableSet = new HashSet<String>();
+    private boolean multiTableSupport = false;
 
     @Override
-    public void map(WALKey key, WALEdit value,
-      Context context)
-    throws IOException {
+    public void map(WALKey key, WALEdit value, Context context) throws IOException {
       try {
         // skip all other tables
-        if (Bytes.equals(table, key.getTableName().getName())) {
+        TableName table = key.getTableName();
+        if (tableSet.contains(table.getNameAsString())) {
           for (Cell cell : value.getCells()) {
             if (WALEdit.isMetaEditFamily(cell)) {
               continue;
             }
-            context.write(new ImmutableBytesWritable(CellUtil.cloneRow(cell)),
-              new MapReduceExtendedCell(cell));
+            byte[] outKey = multiTableSupport
+                ? Bytes.add(table.getName(), Bytes.toBytes(tableSeparator), CellUtil.cloneRow(cell))
+                : CellUtil.cloneRow(cell);
+            context.write(new ImmutableBytesWritable(outKey), new MapReduceExtendedCell(cell));
           }
         }
       } catch (InterruptedException e) {
@@ -121,34 +129,28 @@ public class WALPlayer extends Configured implements Tool {
 
     @Override
     public void setup(Context context) throws IOException {
-      // only a single table is supported when HFiles are generated with HFileOutputFormat
-      String[] tables = context.getConfiguration().getStrings(TABLES_KEY);
-      if (tables == null || tables.length != 1) {
-        // this can only happen when WALMapper is used directly by a class other than WALPlayer
-        throw new IOException("Exactly one table must be specified for bulk HFile case.");
+      Configuration conf = context.getConfiguration();
+      String[] tables = conf.getStrings(TABLES_KEY);
+      this.multiTableSupport = conf.getBoolean(MULTI_TABLES_SUPPORT, false);
+      for (String table : tables) {
+        tableSet.add(table);
       }
-      table = Bytes.toBytes(tables[0]);
-
     }
-
   }
 
   /**
-   * A mapper that writes out {@link Mutation} to be directly applied to
-   * a running HBase instance.
+   * A mapper that writes out {@link Mutation} to be directly applied to a running HBase instance.
    */
   protected static class WALMapper
-  extends Mapper<WALKey, WALEdit, ImmutableBytesWritable, Mutation> {
+      extends Mapper<WALKey, WALEdit, ImmutableBytesWritable, Mutation> {
     private Map<TableName, TableName> tables = new TreeMap<>();
 
     @Override
-    public void map(WALKey key, WALEdit value, Context context)
-    throws IOException {
+    public void map(WALKey key, WALEdit value, Context context) throws IOException {
       try {
         if (tables.isEmpty() || tables.containsKey(key.getTableName())) {
-          TableName targetTable = tables.isEmpty() ?
-                key.getTableName() :
-                tables.get(key.getTableName());
+          TableName targetTable =
+              tables.isEmpty() ? key.getTableName() : tables.get(key.getTableName());
           ImmutableBytesWritable tableOut = new ImmutableBytesWritable(targetTable.getName());
           Put put = null;
           Delete del = null;
@@ -228,8 +230,7 @@ public class WALPlayer extends Configured implements Tool {
       int i = 0;
       if (tablesToUse != null) {
         for (String table : tablesToUse) {
-          tables.put(TableName.valueOf(table),
-            TableName.valueOf(tableMap[i++]));
+          tables.put(TableName.valueOf(table), TableName.valueOf(tableMap[i++]));
         }
       }
     }
@@ -249,9 +250,9 @@ public class WALPlayer extends Configured implements Tool {
         // then see if just a number of ms's was specified
         ms = Long.parseLong(val);
       } catch (NumberFormatException nfe) {
-        throw new IOException(option
-            + " must be specified either in the form 2001-02-20T16:35:06.99 "
-            + "or as number of milliseconds");
+        throw new IOException(
+            option + " must be specified either in the form 2001-02-20T16:35:06.99 "
+                + "or as number of milliseconds");
       }
     }
     conf.setLong(option, ms);
@@ -259,8 +260,7 @@ public class WALPlayer extends Configured implements Tool {
 
   /**
    * Sets up the actual job.
-   *
-   * @param args  The command line parameters.
+   * @param args The command line parameters.
    * @return The newly created job.
    * @throws IOException When setting up the job fails.
    */
@@ -283,7 +283,8 @@ public class WALPlayer extends Configured implements Tool {
     conf.setStrings(TABLES_KEY, tables);
     conf.setStrings(TABLE_MAP_KEY, tableMap);
     conf.set(FileInputFormat.INPUT_DIR, inputDirs);
-    Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + System.currentTimeMillis()));
+    Job job =
+        Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + System.currentTimeMillis()));
     job.setJarByClass(WALPlayer.class);
 
     job.setInputFormatClass(WALInputFormat.class);
@@ -294,22 +295,24 @@ public class WALPlayer extends Configured implements Tool {
       LOG.debug("add incremental job :" + hfileOutPath + " from " + inputDirs);
 
       // the bulk HFile case
-      if (tables.length != 1) {
-        throw new IOException("Exactly one table must be specified for the bulk export option");
-      }
-      TableName tableName = TableName.valueOf(tables[0]);
+      List<TableName> tableNames = getTableNameList(tables);
+
       job.setMapperClass(WALKeyValueMapper.class);
       job.setReducerClass(CellSortReducer.class);
       Path outputDir = new Path(hfileOutPath);
       FileOutputFormat.setOutputPath(job, outputDir);
       job.setMapOutputValueClass(MapReduceExtendedCell.class);
-      try (Connection conn = ConnectionFactory.createConnection(conf);
+      try (Connection conn = ConnectionFactory.createConnection(conf);) {
+        List<TableInfo> tableInfoList = new ArrayList<TableInfo>();
+        for (TableName tableName : tableNames) {
           Table table = conn.getTable(tableName);
-          RegionLocator regionLocator = conn.getRegionLocator(tableName)) {
-        HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
+          RegionLocator regionLocator = conn.getRegionLocator(tableName);
+          tableInfoList.add(new TableInfo(table.getDescriptor(), regionLocator));
+        }
+        MultiTableHFileOutputFormat.configureIncrementalLoad(job, tableInfoList);
       }
       TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(),
-          org.apache.hbase.thirdparty.com.google.common.base.Preconditions.class);
+        org.apache.hbase.thirdparty.com.google.common.base.Preconditions.class);
     } else {
       // output to live cluster
       job.setMapperClass(WALMapper.class);
@@ -321,17 +324,25 @@ public class WALPlayer extends Configured implements Tool {
     }
     String codecCls = WALCellCodec.getWALCellCodecClass(conf);
     try {
-      TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), Class.forName(codecCls));
+      TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(),
+        Class.forName(codecCls));
     } catch (Exception e) {
       throw new IOException("Cannot determine wal codec class " + codecCls, e);
     }
     return job;
   }
 
+  private List<TableName> getTableNameList(String[] tables) {
+    List<TableName> list = new ArrayList<TableName>();
+    for (String name : tables) {
+      list.add(TableName.valueOf(name));
+    }
+    return list;
+  }
 
   /**
    * Print usage
-   * @param errorMsg Error message.  Can be null.
+   * @param errorMsg Error message. Can be null.
    */
   private void usage(final String errorMsg) {
     if (errorMsg != null && errorMsg.length() > 0) {
@@ -340,8 +351,7 @@ public class WALPlayer extends Configured implements Tool {
     System.err.println("Usage: " + NAME + " [options] <wal inputdir> <tables> [<tableMappings>]");
     System.err.println("Read all WAL entries for <tables>.");
     System.err.println("If no tables (\"\") are specific, all tables are imported.");
-    System.err.println("(Careful, even hbase:meta entries will be imported"+
-      " in that case.)");
+    System.err.println("(Careful, even hbase:meta entries will be imported" + " in that case.)");
     System.err.println("Otherwise <tables> is a comma separated list of tables.\n");
     System.err.println("The WAL entries can be mapped to new set of tables via <tableMapping>.");
     System.err.println("<tableMapping> is a command separated list of targettables.");
@@ -354,16 +364,14 @@ public class WALPlayer extends Configured implements Tool {
     System.err.println("  -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]");
     System.err.println("  -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]");
     System.err.println("   -D " + JOB_NAME_CONF_KEY
-      + "=jobName - use the specified mapreduce job name for the wal player");
+        + "=jobName - use the specified mapreduce job name for the wal player");
     System.err.println("For performance also consider the following options:\n"
-      + "  -Dmapreduce.map.speculative=false\n"
-      + "  -Dmapreduce.reduce.speculative=false");
+        + "  -Dmapreduce.map.speculative=false\n" + "  -Dmapreduce.reduce.speculative=false");
   }
 
   /**
    * Main entry point.
-   *
-   * @param args  The command line parameters.
+   * @param args The command line parameters.
    * @throws Exception When running the job fails.
    */
   public static void main(String[] args) throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/6cfa208a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index b4b0be0..ca64172 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -88,9 +88,9 @@ public class FSTableDescriptors implements TableDescriptors {
   /**
    * The file name prefix used to store HTD in HDFS
    */
-  static final String TABLEINFO_FILE_PREFIX = ".tableinfo";
-  static final String TABLEINFO_DIR = ".tabledesc";
-  static final String TMP_DIR = ".tmp";
+  public static final String TABLEINFO_FILE_PREFIX = ".tableinfo";
+  public static final String TABLEINFO_DIR = ".tabledesc";
+  public static final String TMP_DIR = ".tmp";
 
   // This cache does not age out the old stuff.  Thinking is that the amount
   // of data we keep up in here is so small, no need to do occasional purge.


[09/11] hbase git commit: HBASE-18467 report nightly results to devs via jira

Posted by bu...@apache.org.
HBASE-18467 report nightly results to devs via jira

- rely on parallel pipeline to ensure all stages always run
- define non-CPS jira commenting function
- comment on jiras in the changeset with summary and links


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/aa345614
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/aa345614
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/aa345614

Branch: refs/heads/HBASE-15151
Commit: aa345614a66f40daf4914b5604c06ff0a689f267
Parents: bdedcc5
Author: Sean Busbey <bu...@apache.org>
Authored: Wed Aug 9 00:48:46 2017 -0500
Committer: Sean Busbey <bu...@apache.org>
Committed: Wed Feb 28 14:30:34 2018 -0600

----------------------------------------------------------------------
 dev-support/Jenkinsfile                      | 592 ++++++++++++++--------
 dev-support/hbase_nightly_source-artifact.sh |   1 -
 2 files changed, 367 insertions(+), 226 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/aa345614/dev-support/Jenkinsfile
----------------------------------------------------------------------
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 201783b..fe3676d 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -15,11 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 pipeline {
-  agent {
-    node {
-      label 'Hadoop'
-    }
-  }
+  agent any
   triggers {
     cron('@daily')
   }
@@ -34,6 +30,12 @@ pipeline {
     // where we check out to across stages
     BASEDIR = "${env.WORKSPACE}/component"
     YETUS_RELEASE = '0.7.0'
+    // where we'll write everything from different steps. Need a copy here so the final step can check for success/failure.
+    OUTPUT_DIR_RELATIVE_GENERAL = 'output-general'
+    OUTPUT_DIR_RELATIVE_JDK7 = 'output-jdk7'
+    OUTPUT_DIR_RELATIVE_HADOOP2 = 'output-jdk8-hadoop2'
+    OUTPUT_DIR_RELATIVE_HADOOP3 = 'output-jdk8-hadoop3'
+
     PROJECT = 'hbase'
     PROJECT_PERSONALITY = 'https://raw.githubusercontent.com/apache/hbase/master/dev-support/hbase-personality.sh'
     // This section of the docs tells folks not to use the javadoc tag. older branches have our old version of the check for said tag.
@@ -62,6 +64,7 @@ pipeline {
         dir('component') {
           checkout scm
         }
+        stash name: 'component', includes: "component/*,component/**/*"
       }
     }
     stage ('yetus install') {
@@ -111,252 +114,391 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
         stash name: 'yetus', includes: "yetus-*/*,yetus-*/**/*,tools/personality.sh"
       }
     }
-    stage ('yetus general check') {
-      environment {
-        // TODO does hadoopcheck need to be jdk specific?
-        // Should be things that work with multijdk
-        TESTS = 'all,-unit,-findbugs'
-        // on branches that don't support jdk7, this will already be JAVA_HOME, so we'll end up not
-        // doing multijdk there.
-        MULTIJDK = '/usr/lib/jvm/java-8-openjdk-amd64'
-        OUTPUT_DIR_RELATIVE = "output-general"
-        OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE}"
-      }
-      steps {
-        unstash 'yetus'
-        sh '''#!/usr/bin/env bash
-          rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
-          rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
-          "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
+    stage ('health checks') {
+      parallel {
+        stage ('yetus general check') {
+  agent {
+    node {
+      label 'Hadoop'
+    }
+  }
+          environment {
+            // TODO does hadoopcheck need to be jdk specific?
+            // Should be things that work with multijdk
+            TESTS = 'all,-unit,-findbugs'
+            // on branches that don't support jdk7, this will already be JAVA_HOME, so we'll end up not
+            // doing multijdk there.
+            MULTIJDK = '/usr/lib/jvm/java-8-openjdk-amd64'
+            OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_GENERAL}"
+            OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_GENERAL}"
+          }
+          steps {
+            unstash 'yetus'
+            unstash 'component'
+            sh '''#!/usr/bin/env bash
+              rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
+              rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
+              "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
 '''
-        // TODO should this be a download from master, similar to how the personality is?
-        sh "${env.BASEDIR}/dev-support/hbase_nightly_yetus.sh"
-      }
-      post {
-        always {
-          // Has to be relative to WORKSPACE.
-          archive "${env.OUTPUT_DIR_RELATIVE}/*"
-          archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
-          publishHTML target: [
-            allowMissing: true,
-            keepAll: true,
-            alwaysLinkToLastBuild: true,
-            // Has to be relative to WORKSPACE
-            reportDir: "${env.OUTPUT_DIR_RELATIVE}",
-            reportFiles: 'console-report.html',
-            reportName: 'General Nightly Build Report'
-          ]
+            // TODO roll this into the hbase_nightly_yetus script
+            sh '''#!/usr/bin/env bash
+              rm -rf "${OUTPUT_DIR}/commentfile}"
+              declare -i status=0
+              if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
+                echo '(/) {color:green}+1 general checks{color}' >> "${OUTPUT_DIR}/commentfile"
+              else
+                echo '(x) {color:red}-1 general checks{color}' >> "${OUTPUT_DIR}/commentfile"
+                status=1
+              fi
+              echo "-- For more information [see general report|${BUILD_URL}/General_Nightly_Build_Report/]" >> "${OUTPUT_DIR}/commentfile"
+              exit "${status}"
+            '''
+          }
+          post {
+            always {
+              // Has to be relative to WORKSPACE.
+              archive "${env.OUTPUT_DIR_RELATIVE}/*"
+              archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
+              publishHTML target: [
+                allowMissing: true,
+                keepAll: true,
+                alwaysLinkToLastBuild: true,
+                // Has to be relative to WORKSPACE
+                reportDir: "${env.OUTPUT_DIR_RELATIVE}",
+                reportFiles: 'console-report.html',
+                reportName: 'General Nightly Build Report'
+              ]
+            }
+          }
         }
-      }
+        stage ('yetus jdk7 checks') {
+  agent {
+    node {
+      label 'Hadoop'
     }
-    stage ('yetus jdk7 checks') {
-      when {
-        branch 'branch-1*'
-      }
-      environment {
-        TESTS = 'mvninstall,compile,javac,unit,htmlout'
-        OUTPUT_DIR_RELATIVE = "output-jdk7"
-        OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE}"
-        // On branches where we do jdk7 checks, jdk7 will be JAVA_HOME already.
-      }
-      steps {
-        unstash 'yetus'
-        sh '''#!/usr/bin/env bash
-          rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
-          rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
-          "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
+  }
+          when {
+            branch 'branch-1*'
+          }
+          environment {
+            TESTS = 'mvninstall,compile,javac,unit,htmlout'
+            OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_JDK7}"
+            OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_JDK7}"
+            // On branches where we do jdk7 checks, jdk7 will be JAVA_HOME already.
+          }
+          steps {
+            unstash 'yetus'
+            unstash 'component'
+            sh '''#!/usr/bin/env bash
+              rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
+              rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
+              "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
 '''
-        sh """#!/usr/bin/env bash
-          # for branch-1.1 we don't do jdk8 findbugs, so do it here
-          if [ "${env.BRANCH_NAME}" == "branch-1.1" ]; then
-            TESTS+=",findbugs"
-          fi
-          "${env.BASEDIR}/dev-support/hbase_nightly_yetus.sh"
-        """
-      }
-      post {
-        always {
-          junit testResults: "${env.OUTPUT_DIR_RELATIVE}/**/target/**/TEST-*.xml", allowEmptyResults: true
-          // zip surefire reports.
-          sh '''#!/bin/bash -e
-            if [ -d "${OUTPUT_DIR}/archiver" ]; then
-              count=$(find "${OUTPUT_DIR}/archiver" -type f | wc -l)
-              if [[ 0 -ne ${count} ]]; then
-                echo "zipping ${count} archived files"
-                zip -q -m -r "${OUTPUT_DIR}/test_logs.zip" "${OUTPUT_DIR}/archiver"
+            sh '''#!/usr/bin/env bash
+              # for branch-1.1 we don't do jdk8 findbugs, so do it here
+              if [ "${BRANCH_NAME}" == "branch-1.1" ]; then
+                TESTS+=",findbugs"
+              fi
+              rm -rf "${OUTPUT_DIR}/commentfile}"
+              declare -i status=0
+              if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
+                echo '(/) {color:green}+1 jdk7 checks{color}' >> "${OUTPUT_DIR}/commentfile"
               else
-                echo "No archived files, skipping compressing."
+                echo '(x) {color:red}-1 jdk7 checks{color}' >> "${OUTPUT_DIR}/commentfile"
+                status=1
               fi
-            else
-              echo "No archiver directory, skipping compressing."
-            fi
+              echo "-- For more information [see jdk7 report|${BUILD_URL}/JDK7_Nightly_Build_Report/]" >> "${OUTPUT_DIR}/commentfile"
+              exit "${status}"
+            '''
+          }
+          post {
+            always {
+              junit testResults: "${env.OUTPUT_DIR_RELATIVE}/**/target/**/TEST-*.xml", allowEmptyResults: true
+              // zip surefire reports.
+              sh '''#!/bin/bash -e
+                if [ -d "${OUTPUT_DIR}/archiver" ]; then
+                  count=$(find "${OUTPUT_DIR}/archiver" -type f | wc -l)
+                  if [[ 0 -ne ${count} ]]; then
+                    echo "zipping ${count} archived files"
+                    zip -q -m -r "${OUTPUT_DIR}/test_logs.zip" "${OUTPUT_DIR}/archiver"
+                  else
+                    echo "No archived files, skipping compressing."
+                  fi
+                else
+                  echo "No archiver directory, skipping compressing."
+                fi
 '''
-          // Has to be relative to WORKSPACE.
-          archive "${env.OUTPUT_DIR_RELATIVE}/*"
-          archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
-          publishHTML target: [
-            allowMissing         : true,
-            keepAll              : true,
-            alwaysLinkToLastBuild: true,
-            // Has to be relative to WORKSPACE.
-            reportDir            : "${env.OUTPUT_DIR_RELATIVE}",
-            reportFiles          : 'console-report.html',
-            reportName           : 'JDK7 Nightly Build Report'
-          ]
+              // Has to be relative to WORKSPACE.
+              archive "${env.OUTPUT_DIR_RELATIVE}/*"
+              archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
+              publishHTML target: [
+                allowMissing         : true,
+                keepAll              : true,
+                alwaysLinkToLastBuild: true,
+                // Has to be relative to WORKSPACE.
+                reportDir            : "${env.OUTPUT_DIR_RELATIVE}",
+                reportFiles          : 'console-report.html',
+                reportName           : 'JDK7 Nightly Build Report'
+              ]
+            }
+          }
         }
-      }
+        stage ('yetus jdk8 hadoop2 checks') {
+  agent {
+    node {
+      label 'Hadoop'
     }
-    stage ('yetus jdk8 hadoop2 checks') {
-      when {
-        not {
-          branch 'branch-1.1*'
-        }
-      }
-      environment {
-        TESTS = 'mvninstall,compile,javac,unit,htmlout'
-        OUTPUT_DIR_RELATIVE = "output-jdk8-hadoop2"
-        OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE}"
-        // This isn't strictly needed on branches that only support jdk8, but doesn't hurt
-        // and is needed on branches that do both jdk7 and jdk8
-        SET_JAVA_HOME = '/usr/lib/jvm/java-8-openjdk-amd64'
-      }
-      steps {
-        unstash 'yetus'
-        sh '''#!/usr/bin/env bash
-          rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
-          rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
-          "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
+  }
+          when {
+            not {
+              branch 'branch-1.1*'
+            }
+          }
+          environment {
+            TESTS = 'mvninstall,compile,javac,unit,htmlout'
+            OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP2}"
+            OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_HADOOP2}"
+            // This isn't strictly needed on branches that only support jdk8, but doesn't hurt
+            // and is needed on branches that do both jdk7 and jdk8
+            SET_JAVA_HOME = '/usr/lib/jvm/java-8-openjdk-amd64'
+          }
+          steps {
+            unstash 'yetus'
+            unstash 'component'
+            sh '''#!/usr/bin/env bash
+              rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
+              rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
+              "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
 '''
-        sh "${env.BASEDIR}/dev-support/hbase_nightly_yetus.sh"
-      }
-      post {
-        always {
-          junit testResults: "${env.OUTPUT_DIR_RELATIVE}/**/target/**/TEST-*.xml", allowEmptyResults: true
-          // zip surefire reports.
-          sh '''#!/bin/bash -e
-            if [ -d "${OUTPUT_DIR}/archiver" ]; then
-              count=$(find "${OUTPUT_DIR}/archiver" -type f | wc -l)
-              if [[ 0 -ne ${count} ]]; then
-                echo "zipping ${count} archived files"
-                zip -q -m -r "${OUTPUT_DIR}/test_logs.zip" "${OUTPUT_DIR}/archiver"
+            sh '''#!/usr/bin/env bash
+              rm -rf "${OUTPUT_DIR}/commentfile}"
+              declare -i status=0
+              if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
+                echo '(/) {color:green}+1 jdk8 hadoop2 checks{color}' >> "${OUTPUT_DIR}/commentfile"
               else
-                echo "No archived files, skipping compressing."
+                echo '(x) {color:red}-1 jdk8 hadoop2 checks{color}' >> "${OUTPUT_DIR}/commentfile"
+                status=1
               fi
-            else
-              echo "No archiver directory, skipping compressing."
-            fi
+              echo "-- For more information [see jdk8 (hadoop2) report|${BUILD_URL}/JDK8_Nightly_Build_Report_(Hadoop2)/]" >> "${OUTPUT_DIR}/commentfile"
+              exit "${status}"
+            '''
+          }
+          post {
+            always {
+              junit testResults: "${env.OUTPUT_DIR_RELATIVE}/**/target/**/TEST-*.xml", allowEmptyResults: true
+              // zip surefire reports.
+              sh '''#!/bin/bash -e
+                if [ -d "${OUTPUT_DIR}/archiver" ]; then
+                  count=$(find "${OUTPUT_DIR}/archiver" -type f | wc -l)
+                  if [[ 0 -ne ${count} ]]; then
+                    echo "zipping ${count} archived files"
+                    zip -q -m -r "${OUTPUT_DIR}/test_logs.zip" "${OUTPUT_DIR}/archiver"
+                  else
+                    echo "No archived files, skipping compressing."
+                  fi
+                else
+                  echo "No archiver directory, skipping compressing."
+                fi
 '''
-          // Has to be relative to WORKSPACE.
-          archive "${env.OUTPUT_DIR_RELATIVE}/*"
-          archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
-          publishHTML target: [
-            allowMissing         : true,
-            keepAll              : true,
-            alwaysLinkToLastBuild: true,
-            // Has to be relative to WORKSPACE.
-            reportDir            : "${env.OUTPUT_DIR_RELATIVE}",
-            reportFiles          : 'console-report.html',
-            reportName           : 'JDK8 Nightly Build Report (Hadoop2)'
-          ]
+              // Has to be relative to WORKSPACE.
+              archive "${env.OUTPUT_DIR_RELATIVE}/*"
+              archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
+              publishHTML target: [
+                allowMissing         : true,
+                keepAll              : true,
+                alwaysLinkToLastBuild: true,
+                // Has to be relative to WORKSPACE.
+                reportDir            : "${env.OUTPUT_DIR_RELATIVE}",
+                reportFiles          : 'console-report.html',
+                reportName           : 'JDK8 Nightly Build Report (Hadoop2)'
+              ]
+            }
+          }
         }
-      }
+        stage ('yetus jdk8 hadoop3 checks') {
+  agent {
+    node {
+      label 'Hadoop'
     }
-    stage ('yetus jdk8 hadoop3 checks') {
-      when {
-        not {
-          branch 'branch-1*'
-        }
-      }
-      environment {
-        // Failure in any stage fails the build and consecutive stages are not built.
-        // Findbugs is part of this last yetus stage to prevent findbugs precluding hadoop3
-        // tests.
-        TESTS = 'mvninstall,compile,javac,unit,findbugs,htmlout'
-        OUTPUT_DIR_RELATIVE = "output-jdk8-hadoop3"
-        OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE}"
-        // This isn't strictly needed on branches that only support jdk8, but doesn't hurt
-        // and is needed on branches that do both jdk7 and jdk8
-        SET_JAVA_HOME = '/usr/lib/jvm/java-8-openjdk-amd64'
-        // Activates hadoop 3.0 profile in maven runs.
-        HADOOP_PROFILE = '3.0'
-      }
-      steps {
-        unstash 'yetus'
-        sh '''#!/usr/bin/env bash
-          rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
-          rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
-          "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
+  }
+          when {
+            not {
+              branch 'branch-1*'
+            }
+          }
+          environment {
+            // Failure in any stage fails the build and consecutive stages are not built.
+            // Findbugs is part of this last yetus stage to prevent findbugs precluding hadoop3
+            // tests.
+            TESTS = 'mvninstall,compile,javac,unit,findbugs,htmlout'
+            OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP3}"
+            OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_HADOOP3}"
+            // This isn't strictly needed on branches that only support jdk8, but doesn't hurt
+            // and is needed on branches that do both jdk7 and jdk8
+            SET_JAVA_HOME = '/usr/lib/jvm/java-8-openjdk-amd64'
+            // Activates hadoop 3.0 profile in maven runs.
+            HADOOP_PROFILE = '3.0'
+          }
+          steps {
+            unstash 'yetus'
+            unstash 'component'
+            sh '''#!/usr/bin/env bash
+              rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
+              rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
+              "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
 '''
-        sh "${env.BASEDIR}/dev-support/hbase_nightly_yetus.sh"
-      }
-      post {
-        always {
-          // Not sure how two junit test reports will work. Disabling this for now.
-          // junit testResults: "${env.OUTPUT_DIR_RELATIVE}/**/target/**/TEST-*.xml", allowEmptyResults: true
-          // zip surefire reports.
-          sh '''#!/bin/bash -e
-            if [ -d "${OUTPUT_DIR}/archiver" ]; then
-              count=$(find "${OUTPUT_DIR}/archiver" -type f | wc -l)
-              if [[ 0 -ne ${count} ]]; then
-                echo "zipping ${count} archived files"
-                zip -q -m -r "${OUTPUT_DIR}/test_logs.zip" "${OUTPUT_DIR}/archiver"
+            sh '''#!/usr/bin/env bash
+              rm -rf "${OUTPUT_DIR}/commentfile}"
+              declare -i status=0
+              if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
+                echo '(/) {color:green}+1 jdk8 hadoop3 checks{color}' >> "${OUTPUT_DIR}/commentfile"
               else
-                echo "No archived files, skipping compressing."
+                echo '(x) {color:red}-1 jdk8 hadoop3 checks{color}' >> "${OUTPUT_DIR}/commentfile"
+                status=1
               fi
-            else
-              echo "No archiver directory, skipping compressing."
-            fi
+              echo "-- For more information [see jdk8 (hadoop3) report|${BUILD_URL}/JDK8_Nightly_Build_Report_(Hadoop3)/]" >> "${OUTPUT_DIR}/commentfile"
+              exit "${status}"
+            '''
+          }
+          post {
+            always {
+              // Not sure how two junit test reports will work. Disabling this for now.
+              // junit testResults: "${env.OUTPUT_DIR_RELATIVE}/**/target/**/TEST-*.xml", allowEmptyResults: true
+              // zip surefire reports.
+              sh '''#!/bin/bash -e
+                if [ -d "${OUTPUT_DIR}/archiver" ]; then
+                  count=$(find "${OUTPUT_DIR}/archiver" -type f | wc -l)
+                  if [[ 0 -ne ${count} ]]; then
+                    echo "zipping ${count} archived files"
+                    zip -q -m -r "${OUTPUT_DIR}/test_logs.zip" "${OUTPUT_DIR}/archiver"
+                  else
+                    echo "No archived files, skipping compressing."
+                  fi
+                else
+                  echo "No archiver directory, skipping compressing."
+                fi
 '''
-          // Has to be relative to WORKSPACE.
-          archive "${env.OUTPUT_DIR_RELATIVE}/*"
-          archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
-          publishHTML target: [
-            allowMissing         : true,
-            keepAll              : true,
-            alwaysLinkToLastBuild: true,
-            // Has to be relative to WORKSPACE.
-            reportDir            : "${env.OUTPUT_DIR_RELATIVE}",
-            reportFiles          : 'console-report.html',
-            reportName           : 'JDK8 Nightly Build Report (Hadoop3)'
-          ]
+              // Has to be relative to WORKSPACE.
+              archive "${env.OUTPUT_DIR_RELATIVE}/*"
+              archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
+              publishHTML target: [
+                allowMissing         : true,
+                keepAll              : true,
+                alwaysLinkToLastBuild: true,
+                // Has to be relative to WORKSPACE.
+                reportDir            : "${env.OUTPUT_DIR_RELATIVE}",
+                reportFiles          : 'console-report.html',
+                reportName           : 'JDK8 Nightly Build Report (Hadoop3)'
+              ]
+            }
+          }
         }
-      }
-    }
-    // This is meant to mimic what a release manager will do to create RCs.
-    // See http://hbase.apache.org/book.html#maven.release
-    stage ('create source tarball') {
-      tools {
-        maven 'Maven (latest)'
-        // this needs to be set to the jdk that ought to be used to build releases on the branch the Jenkinsfile is stored in.
-        jdk "JDK 1.8 (latest)"
-      }
-      steps {
-        sh '''#!/bin/bash -e
-          echo "Setting up directories"
-          rm -rf "output-srctarball" && mkdir "output-srctarball"
-          rm -rf "unpacked_src_tarball" && mkdir "unpacked_src_tarball"
-          rm -rf ".m2-for-repo" && mkdir ".m2-for-repo"
-          rm -rf ".m2-for-src" && mkdir ".m2-for-src"
+        // This is meant to mimic what a release manager will do to create RCs.
+        // See http://hbase.apache.org/book.html#maven.release
+        stage ('create source tarball') {
+          tools {
+            maven 'Maven (latest)'
+            // this needs to be set to the jdk that ought to be used to build releases on the branch the Jenkinsfile is stored in.
+            jdk "JDK 1.8 (latest)"
+          }
+          steps {
+            sh '''#!/bin/bash -e
+              echo "Setting up directories"
+              rm -rf "output-srctarball" && mkdir "output-srctarball"
+              rm -rf "unpacked_src_tarball" && mkdir "unpacked_src_tarball"
+              rm -rf ".m2-for-repo" && mkdir ".m2-for-repo"
+              rm -rf ".m2-for-src" && mkdir ".m2-for-src"
 '''
-        sh '''#!/usr/bin/env bash
-          rm -rf "output-srctarball/machine" && mkdir "output-srctarball/machine"
-          "${BASEDIR}/dev-support/gather_machine_environment.sh" "output-srctarball/machine"
+            sh '''#!/usr/bin/env bash
+              rm -rf "output-srctarball/machine" && mkdir "output-srctarball/machine"
+              "${BASEDIR}/dev-support/gather_machine_environment.sh" "output-srctarball/machine"
 '''
-        sh """#!/bin/bash -e
-          ${env.BASEDIR}/dev-support/hbase_nightly_source-artifact.sh \
-              --intermediate-file-dir output-srctarball \
-              --unpack-temp-dir unpacked_src_tarball \
-              --maven-m2-initial .m2-for-repo \
-              --maven-m2-src-build .m2-for-src \
-              --clean-source-checkout \
-              ${env.BASEDIR}
+            sh """#!/bin/bash -e
+              if "${env.BASEDIR}/dev-support/hbase_nightly_source-artifact.sh" \
+                  --intermediate-file-dir output-srctarball \
+                  --unpack-temp-dir unpacked_src_tarball \
+                  --maven-m2-initial .m2-for-repo \
+                  --maven-m2-src-build .m2-for-src \
+                  --clean-source-checkout \
+                  "${env.BASEDIR}" ; then
+                echo '(/) {color:green}+1 source release artifact{color}\n-- See build output for details.' >output-srctarball/commentfile
+              else
+                echo '(x) {color:red}-1 source release artifact{color}\n-- See build output for details.' >output-srctarball/commentfile
+              fi
 """
+          }
+          post {
+            always {
+              archive 'output-srctarball/*'
+            }
+          }
+        }
       }
-      post {
-        always {
-          archive 'output-srctarball/*'
+    }
+  }
+  post {
+    always {
+      script {
+         try {
+           sh "printenv"
+           def results = ["${env.OUTPUT_DIR_RELATIVE_GENERAL}/commentfile",
+                          "${env.OUTPUT_DIR_RELATIVE_JDK7}/commentfile",
+                          "${env.OUTPUT_DIR_RELATIVE_HADOOP2}/commentfile",
+                          "${env.OUTPUT_DIR_RELATIVE_HADOOP3}/commentfile",
+                          'output-srctarball/commentfile']
+           echo env.BRANCH_NAME
+           echo env.BUILD_URL
+           echo currentBuild.result
+           echo currentBuild.durationString
+           def comment = "Results for branch ${env.BRANCH_NAME}, done in ${currentBuild.durationString}\n"
+           comment += "\t[build ${currentBuild.displayName} on builds.a.o|${env.BUILD_URL}]:\n----\ndetails (if available):\n\n"
+           if (currentBuild.result == "SUCCESS") {
+              comment += "(/) *{color:green}+1 overall{color}*\n\n"
+           } else {
+              comment += "(x) *{color:red}-1 overall{color}*\n"
+              // Ideally get the committer our of the change and @ mention them in the per-jira comment
+              comment += "    Committer, please check your recent inclusion of a patch for this issue.\n\n"
+           }
+           echo ""
+           echo "[DEBUG] trying to aggregate step-wise results"
+           comment += results.collect { fileExists(file: it) ? readFile(file: it) : "" }.join("\n\n")
+           echo "[INFO] Comment:"
+           echo comment
+           echo ""
+           echo "[INFO] There are ${currentBuild.changeSets.size()} change sets."
+           getJirasToComment(currentBuild).each { currentIssue ->
+             jiraComment issueKey: currentIssue, body: comment
+           }
+        } catch (Exception exception) {
+          echo "Got exception: ${exception}"
+          echo "    ${exception.getStackTrace()}"
+        }
+      }
+    }
+  }
+}
+import org.jenkinsci.plugins.workflow.support.steps.build.RunWrapper
+@NonCPS
+List<String> getJirasToComment(RunWrapper thisBuild) {
+  def seenJiras = []
+  thisBuild.changeSets.each { cs ->
+    cs.getItems().each { change ->
+      CharSequence msg = change.msg
+      echo "change: ${change}"
+      echo "     ${msg}"
+      echo "     ${change.commitId}"
+      echo "     ${change.author}"
+      echo ""
+      msg.eachMatch("HBASE-[0-9]+") { currentIssue ->
+        echo "[DEBUG] found jira key: ${currentIssue}"
+        if (currentIssue in seenJiras) {
+          echo "[DEBUG] already commented on ${currentIssue}."
+        } else {
+          echo "[INFO] commenting on ${currentIssue}."
+          seenJiras << currentIssue
         }
       }
     }
   }
+  return seenJiras
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/aa345614/dev-support/hbase_nightly_source-artifact.sh
----------------------------------------------------------------------
diff --git a/dev-support/hbase_nightly_source-artifact.sh b/dev-support/hbase_nightly_source-artifact.sh
index 375d121..b334fa5 100755
--- a/dev-support/hbase_nightly_source-artifact.sh
+++ b/dev-support/hbase_nightly_source-artifact.sh
@@ -180,5 +180,4 @@ else
   echo "Building a binary tarball from the source tarball failed. see srtarball_install.log for details."
   exit 1
 fi
-
 # TODO check the layout of the binary artifact we just made.


[02/11] hbase git commit: HBASE-20084 Refactor the RSRpcServices#doBatchOp

Posted by bu...@apache.org.
HBASE-20084 Refactor the RSRpcServices#doBatchOp

Signed-off-by: tedyu <yu...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/197bd790
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/197bd790
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/197bd790

Branch: refs/heads/HBASE-15151
Commit: 197bd790701553bd5c7de8b6af47500e0e028920
Parents: 7f6e971
Author: Chia-Ping Tsai <ch...@gmail.com>
Authored: Mon Feb 26 20:49:05 2018 +0800
Committer: Chia-Ping Tsai <ch...@gmail.com>
Committed: Wed Feb 28 15:15:34 2018 +0800

----------------------------------------------------------------------
 .../hbase/regionserver/RSRpcServices.java       | 115 ++++++++++---------
 1 file changed, 58 insertions(+), 57 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/197bd790/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 7e01c9a..4dd826f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -121,6 +121,7 @@ import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.AccessChecker;
 import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.CollectionUtils;
 import org.apache.hadoop.hbase.util.DNS;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
@@ -763,7 +764,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     // Gather up CONTIGUOUS Puts and Deletes in this mutations List.  Idea is that rather than do
     // one at a time, we instead pass them in batch.  Be aware that the corresponding
     // ResultOrException instance that matches each Put or Delete is then added down in the
-    // doBatchOp call.  We should be staying aligned though the Put and Delete are deferred/batched
+    // doNonAtomicBatchOp call.  We should be staying aligned though the Put and Delete are
+    // deferred/batched
     List<ClientProtos.Action> mutations = null;
     long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
     IOException sizeIOE = null;
@@ -802,7 +804,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
           // use it for the response.
           //
           // This will create a copy in the builder.
-          hasResultOrException = true;
           NameBytesPair pair = ResponseConverter.buildException(sizeIOE);
           resultOrExceptionBuilder.setException(pair);
           context.incrementResponseExceptionSize(pair.getSerializedSize());
@@ -829,29 +830,23 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
           }
         } else if (action.hasServiceCall()) {
           hasResultOrException = true;
-          try {
-            com.google.protobuf.Message result =
-                execServiceOnRegion(region, action.getServiceCall());
-            ClientProtos.CoprocessorServiceResult.Builder serviceResultBuilder =
-                ClientProtos.CoprocessorServiceResult.newBuilder();
-            resultOrExceptionBuilder.setServiceResult(
-                serviceResultBuilder.setValue(
-                  serviceResultBuilder.getValueBuilder()
-                    .setName(result.getClass().getName())
-                    // TODO: Copy!!!
-                    .setValue(UnsafeByteOperations.unsafeWrap(result.toByteArray()))));
-          } catch (IOException ioe) {
-            rpcServer.getMetrics().exception(ioe);
-            NameBytesPair pair = ResponseConverter.buildException(ioe);
-            resultOrExceptionBuilder.setException(pair);
-            context.incrementResponseExceptionSize(pair.getSerializedSize());
-          }
+          com.google.protobuf.Message result =
+            execServiceOnRegion(region, action.getServiceCall());
+          ClientProtos.CoprocessorServiceResult.Builder serviceResultBuilder =
+            ClientProtos.CoprocessorServiceResult.newBuilder();
+          resultOrExceptionBuilder.setServiceResult(
+            serviceResultBuilder.setValue(
+              serviceResultBuilder.getValueBuilder()
+                .setName(result.getClass().getName())
+                // TODO: Copy!!!
+                .setValue(UnsafeByteOperations.unsafeWrap(result.toByteArray()))));
         } else if (action.hasMutation()) {
           MutationType type = action.getMutation().getMutateType();
           if (type != MutationType.PUT && type != MutationType.DELETE && mutations != null &&
               !mutations.isEmpty()) {
             // Flush out any Puts or Deletes already collected.
-            doBatchOp(builder, region, quota, mutations, cellScanner, spaceQuotaEnforcement, false);
+            doNonAtomicBatchOp(builder, region, quota, mutations, cellScanner,
+              spaceQuotaEnforcement);
             mutations.clear();
           }
           switch (type) {
@@ -896,7 +891,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
         // Could get to here and there was no result and no exception.  Presumes we added
         // a Put or Delete to the collecting Mutations List for adding later.  In this
         // case the corresponding ResultOrException instance for the Put or Delete will be added
-        // down in the doBatchOp method call rather than up here.
+        // down in the doNonAtomicBatchOp method call rather than up here.
       } catch (IOException ie) {
         rpcServer.getMetrics().exception(ie);
         hasResultOrException = true;
@@ -911,18 +906,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
       }
     }
     // Finish up any outstanding mutations
-    if (mutations != null && !mutations.isEmpty()) {
-      try {
-        doBatchOp(builder, region, quota, mutations, cellScanner, spaceQuotaEnforcement, false);
-      } catch (IOException ioe) {
-        // TODO do the refactor to avoid this catch as it is useless
-        // doBatchOp has handled the IOE for all non-atomic operations.
-        rpcServer.getMetrics().exception(ioe);
-        NameBytesPair pair = ResponseConverter.buildException(ioe);
-        resultOrExceptionBuilder.setException(pair);
-        context.incrementResponseExceptionSize(pair.getSerializedSize());
-        builder.addResultOrException(resultOrExceptionBuilder.build());
-      }
+    if (!CollectionUtils.isEmpty(mutations)) {
+      doNonAtomicBatchOp(builder, region, quota, mutations, cellScanner, spaceQuotaEnforcement);
     }
     return cellsToReturn;
   }
@@ -943,6 +928,33 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     }
   }
 
+  private void doAtomicBatchOp(final RegionActionResult.Builder builder, final HRegion region,
+    final OperationQuota quota, final List<ClientProtos.Action> mutations,
+    final CellScanner cells, ActivePolicyEnforcement spaceQuotaEnforcement)
+    throws IOException {
+    // Just throw the exception. The exception will be caught and then added to region-level
+    // exception for RegionAction. Leaving the null to action result is ok since the null
+    // result is viewed as failure by hbase client. And the region-lever exception will be used
+    // to replaced the null result. see AsyncRequestFutureImpl#receiveMultiAction and
+    // AsyncBatchRpcRetryingCaller#onComplete for more details.
+    doBatchOp(builder, region, quota, mutations, cells, spaceQuotaEnforcement, true);
+  }
+
+  private void doNonAtomicBatchOp(final RegionActionResult.Builder builder, final HRegion region,
+    final OperationQuota quota, final List<ClientProtos.Action> mutations,
+    final CellScanner cells, ActivePolicyEnforcement spaceQuotaEnforcement) {
+    try {
+      doBatchOp(builder, region, quota, mutations, cells, spaceQuotaEnforcement, false);
+    } catch (IOException e) {
+      // Set the exception for each action. The mutations in same RegionAction are group to
+      // different batch and then be processed individually. Hence, we don't set the region-level
+      // exception here for whole RegionAction.
+      for (Action mutation : mutations) {
+        builder.addResultOrException(getResultOrException(e, mutation.getIndex()));
+      }
+    }
+  }
+
   /**
    * Execute a list of Put/Delete mutations.
    *
@@ -1029,30 +1041,29 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
             break;
         }
       }
-    } catch (IOException ie) {
+    } finally {
       int processedMutationIndex = 0;
       for (Action mutation : mutations) {
         // The non-null mArray[i] means the cell scanner has been read.
         if (mArray[processedMutationIndex++] == null) {
           skipCellsForMutation(mutation, cells);
         }
-        if (!atomic) {
-          builder.addResultOrException(getResultOrException(ie, mutation.getIndex()));
-        }
-      }
-      if (atomic) {
-        throw ie;
       }
+      updateMutationMetrics(region, before, batchContainsPuts, batchContainsDelete);
     }
+  }
+
+  private void updateMutationMetrics(HRegion region, long starttime, boolean batchContainsPuts,
+    boolean batchContainsDelete) {
     if (regionServer.metricsRegionServer != null) {
       long after = EnvironmentEdgeManager.currentTime();
       if (batchContainsPuts) {
-        regionServer.metricsRegionServer.updatePutBatch(
-            region.getTableDescriptor().getTableName(), after - before);
+        regionServer.metricsRegionServer
+          .updatePutBatch(region.getTableDescriptor().getTableName(), after - starttime);
       }
       if (batchContainsDelete) {
-        regionServer.metricsRegionServer.updateDeleteBatch(
-            region.getTableDescriptor().getTableName(), after - before);
+        regionServer.metricsRegionServer
+          .updateDeleteBatch(region.getTableDescriptor().getTableName(), after - starttime);
       }
     }
   }
@@ -1121,17 +1132,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
       return region.batchReplay(mutations.toArray(
         new WALSplitter.MutationReplay[mutations.size()]), replaySeqId);
     } finally {
-      if (regionServer.metricsRegionServer != null) {
-        long after = EnvironmentEdgeManager.currentTime();
-        if (batchContainsPuts) {
-          regionServer.metricsRegionServer.updatePutBatch(
-              region.getTableDescriptor().getTableName(), after - before);
-        }
-        if (batchContainsDelete) {
-          regionServer.metricsRegionServer.updateDeleteBatch(
-              region.getTableDescriptor().getTableName(), after - before);
-        }
-      }
+      updateMutationMetrics(region, before, batchContainsPuts, batchContainsDelete);
     }
   }
 
@@ -2614,8 +2615,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
                   cellScanner, row, family, qualifier, op,
                   comparator, regionActionResultBuilder, spaceQuotaEnforcement);
           } else {
-            doBatchOp(regionActionResultBuilder, region, quota, regionAction.getActionList(),
-              cellScanner, spaceQuotaEnforcement, true);
+            doAtomicBatchOp(regionActionResultBuilder, region, quota, regionAction.getActionList(),
+              cellScanner, spaceQuotaEnforcement);
             processed = Boolean.TRUE;
           }
         } catch (IOException e) {


[11/11] hbase git commit: HBASE-15151 ensure findbugs check runs on all branches.

Posted by bu...@apache.org.
HBASE-15151 ensure findbugs check runs on all branches.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d7a3f227
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d7a3f227
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d7a3f227

Branch: refs/heads/HBASE-15151
Commit: d7a3f2276b528307318f5bfa2308cfcc6043583b
Parents: dd18ab6
Author: Sean Busbey <bu...@apache.org>
Authored: Sun Feb 25 00:35:45 2018 -0600
Committer: Sean Busbey <bu...@apache.org>
Committed: Wed Feb 28 14:30:34 2018 -0600

----------------------------------------------------------------------
 dev-support/Jenkinsfile | 7 ++-----
 1 file changed, 2 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d7a3f227/dev-support/Jenkinsfile
----------------------------------------------------------------------
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 00d0403..57d677a 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -255,7 +255,7 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
             }
           }
           environment {
-            TESTS = 'mvninstall,compile,javac,unit,htmlout'
+            TESTS = 'mvninstall,compile,javac,unit,findbugs,htmlout'
             OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP2}"
             OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_HADOOP2}"
             // This isn't strictly needed on branches that only support jdk8, but doesn't hurt
@@ -327,10 +327,7 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
             }
           }
           environment {
-            // Failure in any stage fails the build and consecutive stages are not built.
-            // Findbugs is part of this last yetus stage to prevent findbugs precluding hadoop3
-            // tests.
-            TESTS = 'mvninstall,compile,javac,unit,findbugs,htmlout'
+            TESTS = 'mvninstall,compile,javac,unit,htmlout'
             OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP3}"
             OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_HADOOP3}"
             // This isn't strictly needed on branches that only support jdk8, but doesn't hurt