You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by xu...@apache.org on 2019/09/16 18:31:31 UTC

[hbase] branch branch-1.3 updated: HBASE-22804 Provide an API to get list of successful regions and total expected regions in Canary

This is an automated email from the ASF dual-hosted git repository.

xucang pushed a commit to branch branch-1.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1.3 by this push:
     new c6b0809  HBASE-22804 Provide an API to get list of successful regions and total expected regions in Canary
c6b0809 is described below

commit c6b0809d9be9825cf0d0c70cf9f06c0cdd19b41b
Author: Caroline Zhou <ca...@salesforce.com>
AuthorDate: Mon Sep 16 11:24:53 2019 -0700

    HBASE-22804 Provide an API to get list of successful regions and total expected regions in Canary
    
    Signed-off-by: Xu Cang <xu...@apache.org>
---
 .../java/org/apache/hadoop/hbase/tool/Canary.java  | 138 ++++++++++++++++++++-
 .../apache/hadoop/hbase/tool/TestCanaryTool.java   |  55 +++++++-
 2 files changed, 191 insertions(+), 2 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 7c17db9..d66d1d6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -121,13 +121,19 @@ public final class Canary implements Tool {
     public long incWriteFailureCount();
     public Map<String,String> getWriteFailures();
     public void updateWriteFailures(String regionName, String serverName);
+    public long getReadSuccessCount();
+    public long incReadSuccessCount();
+    public long getWriteSuccessCount();
+    public long incWriteSuccessCount();
   }
 
   // Simple implementation of canary sink that allows to plot on
   // file or standard output timings or failures.
   public static class StdOutSink implements Sink {
     private AtomicLong readFailureCount = new AtomicLong(0),
-        writeFailureCount = new AtomicLong(0);
+        writeFailureCount = new AtomicLong(0),
+        readSuccessCount = new AtomicLong(0),
+        writeSuccessCount = new AtomicLong(0);
 
     private Map<String, String> readFailures = new ConcurrentHashMap<String, String>();
     private Map<String, String> writeFailures = new ConcurrentHashMap<String, String>();
@@ -171,6 +177,26 @@ public final class Canary implements Tool {
     public void updateWriteFailures(String regionName, String serverName) {
       writeFailures.put(regionName, serverName);
     }
+
+    @Override
+    public long getReadSuccessCount() {
+      return readSuccessCount.get();
+    }
+
+    @Override
+    public long incReadSuccessCount() {
+      return readSuccessCount.incrementAndGet();
+    }
+
+    @Override
+    public long getWriteSuccessCount() {
+      return writeSuccessCount.get();
+    }
+
+    @Override
+    public long incWriteSuccessCount() {
+      return writeSuccessCount.incrementAndGet();
+    }
   }
 
   public static class RegionServerStdOutSink extends StdOutSink {
@@ -203,6 +229,7 @@ public final class Canary implements Tool {
 
     private Map<String, AtomicLong> perTableReadLatency = new HashMap<>();
     private AtomicLong writeLatency = new AtomicLong();
+    private Map<String, RegionTaskResult> regionMap = new ConcurrentHashMap<>();
 
     public void publishReadFailure(ServerName serverName, HRegionInfo region, Exception e) {
       incReadFailureCount();
@@ -216,6 +243,10 @@ public final class Canary implements Tool {
     }
 
     public void publishReadTiming(ServerName serverName, HRegionInfo region, HColumnDescriptor column, long msTime) {
+      incReadSuccessCount();
+      RegionTaskResult res = this.regionMap.get(region.getRegionNameAsString());
+      res.setReadSuccess();
+      res.setReadLatency(msTime);
       LOG.info(String.format("read from region %s on regionserver %s column family %s in %dms",
         region.getRegionNameAsString(), serverName, column.getNameAsString(), msTime));
     }
@@ -232,6 +263,10 @@ public final class Canary implements Tool {
     }
 
     public void publishWriteTiming(ServerName serverName, HRegionInfo region, HColumnDescriptor column, long msTime) {
+      incWriteSuccessCount();
+      RegionTaskResult res = this.regionMap.get(region.getRegionNameAsString());
+      res.setWriteSuccess();
+      res.setWriteLatency(msTime);
       LOG.info(String.format("write to region %s on regionserver %s column family %s in %dms",
         region.getRegionNameAsString(), serverName, column.getNameAsString(), msTime));
     }
@@ -253,6 +288,14 @@ public final class Canary implements Tool {
     public AtomicLong getWriteLatency() {
       return this.writeLatency;
     }
+
+    public Map<String, RegionTaskResult> getRegionMap() {
+      return this.regionMap;
+    }
+
+    public int getTotalExpectedRegions() {
+      return this.regionMap.size();
+    }
   }
 
   static class ZookeeperTask implements Callable<Void> {
@@ -885,6 +928,96 @@ public final class Canary implements Tool {
   }
 
   /**
+   * Canary region mode-specific data structure which stores information about each region
+   * to be scanned
+   */
+  public static class RegionTaskResult {
+    private HRegionInfo region;
+    private TableName tableName;
+    private ServerName serverName;
+    private AtomicLong readLatency = null;
+    private AtomicLong writeLatency = null;
+    private boolean readSuccess = false;
+    private boolean writeSuccess = false;
+
+    public RegionTaskResult(HRegionInfo region, TableName tableName, ServerName serverName) {
+      this.region = region;
+      this.tableName = tableName;
+      this.serverName = serverName;
+    }
+
+    public HRegionInfo getRegionInfo() {
+      return this.region;
+    }
+
+    public String getRegionNameAsString() {
+      return this.region.getRegionNameAsString();
+    }
+
+    public TableName getTableName() {
+      return this.tableName;
+    }
+
+    public String getTableNameAsString() {
+      return this.tableName.getNameAsString();
+    }
+
+    public ServerName getServerName() {
+      return this.serverName;
+    }
+
+    public String getServerNameAsString() {
+      return this.serverName.getServerName();
+    }
+
+    public long getReadLatency() {
+      if (this.readLatency == null) {
+        return -1;
+      }
+      return this.readLatency.get();
+    }
+
+    public void setReadLatency(long readLatency) {
+      if (this.readLatency != null) {
+        this.readLatency.set(readLatency);
+      } else {
+        this.readLatency = new AtomicLong(readLatency);
+      }
+    }
+
+    public long getWriteLatency() {
+      if (this.writeLatency == null) {
+        return -1;
+      }
+      return this.writeLatency.get();
+    }
+
+    public void setWriteLatency(long writeLatency) {
+      if (this.writeLatency != null) {
+        this.writeLatency.set(writeLatency);
+      } else {
+        this.writeLatency = new AtomicLong(writeLatency);
+      }
+    }
+
+    public boolean isReadSuccess() {
+      return this.readSuccess;
+    }
+
+    public void setReadSuccess() {
+      this.readSuccess = true;
+    }
+
+    public boolean isWriteSuccess() {
+      return this.writeSuccess;
+    }
+
+    public void setWriteSuccess() {
+      this.writeSuccess = true;
+    }
+  }
+
+  /**
    * A Factory method for {@link Monitor}.
    * Can be overridden by user.
    * @param index a start index for monitor target
@@ -1296,6 +1429,9 @@ public final class Canary implements Tool {
         HRegionInfo region = location.getRegionInfo();
         tasks.add(new RegionTask(admin.getConnection(), region, rs, (RegionStdOutSink) sink, taskType, rawScanEnabled,
           rwLatency));
+        Map<String, RegionTaskResult> regionMap = ((RegionStdOutSink) sink).getRegionMap();
+        regionMap.put(region.getRegionNameAsString(), new RegionTaskResult(region,
+          region.getTable(), rs));
       }
     } finally {
       if (regionLocator != null) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java
index aa76c85..d2049a4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java
@@ -47,15 +47,19 @@ import org.mockito.ArgumentMatcher;
 import org.mockito.Mock;
 import org.mockito.runners.MockitoJUnitRunner;
 
+import java.util.Map;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.Matchers.anyLong;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Matchers.isA;
 import static org.mockito.Matchers.argThat;
-import static org.junit.Assert.assertEquals;
 import static org.mockito.Mockito.*;
 
 @RunWith(MockitoJUnitRunner.class)
@@ -116,6 +120,55 @@ public class TestCanaryTool {
   }
 
   @Test
+  public void testCanaryRegionTaskResult() throws Exception {
+    TableName tableName = TableName.valueOf("testCanaryRegionTaskResult");
+    HTable table = testingUtility.createTable(tableName, new byte[][]{FAMILY});
+    // insert some test rows
+    for (int i = 0; i < 1000; i++) {
+      byte[] iBytes = Bytes.toBytes(i);
+      Put p = new Put(iBytes);
+      p.addColumn(FAMILY, COLUMN, iBytes);
+      table.put(p);
+    }
+    ExecutorService executor = new ScheduledThreadPoolExecutor(1);
+    Canary.RegionStdOutSink sink = spy(new Canary.RegionStdOutSink());
+    Canary canary = new Canary(executor, sink);
+    String[] args = {"-writeSniffing", "-t", "10000", "testCanaryRegionTaskResult"};
+    assertEquals(0, ToolRunner.run(testingUtility.getConfiguration(), canary, args));
+
+    assertTrue("verify read success count > 0", sink.getReadSuccessCount() > 0);
+    assertTrue("verify write success count > 0", sink.getWriteSuccessCount() > 0);
+    verify(sink, atLeastOnce()).publishReadTiming(isA(ServerName.class), isA(HRegionInfo.class),
+      isA(HColumnDescriptor.class), anyLong());
+    verify(sink, atLeastOnce()).publishWriteTiming(isA(ServerName.class), isA(HRegionInfo.class),
+      isA(HColumnDescriptor.class), anyLong());
+
+    assertTrue("canary should expect to scan at least 1 region",
+      sink.getTotalExpectedRegions() > 0);
+    Map<String, Canary.RegionTaskResult> regionMap = sink.getRegionMap();
+    assertFalse("verify region map has size > 0", regionMap.isEmpty());
+
+    for (String regionName : regionMap.keySet()) {
+      Canary.RegionTaskResult res = regionMap.get(regionName);
+      assertNotNull("verify each expected region has a RegionTaskResult object in the map", res);
+      assertNotNull("verify getRegionNameAsString()", regionName);
+      assertNotNull("verify getRegionInfo()", res.getRegionInfo());
+      assertNotNull("verify getTableName()", res.getTableName());
+      assertNotNull("verify getTableNameAsString()", res.getTableNameAsString());
+      assertNotNull("verify getServerName()", res.getServerName());
+      assertNotNull("verify getServerNameAsString()", res.getServerNameAsString());
+
+      if (regionName.contains(Canary.DEFAULT_WRITE_TABLE_NAME.getNameAsString())) {
+        assertTrue("write to region " + regionName + " succeeded", res.isWriteSuccess());
+        assertTrue("write took some time", res.getWriteLatency() > -1);
+      } else {
+        assertTrue("read from region " + regionName + " succeeded", res.isReadSuccess());
+        assertTrue("read took some time", res.getReadLatency() > -1);
+      }
+    }
+  }
+
+  @Test
   @Ignore("Intermittent argument matching failures, see HBASE-18813")
   public void testReadTableTimeouts() throws Exception {
     final TableName [] tableNames = new TableName[2];