You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ch...@apache.org on 2017/12/22 11:03:23 UTC
[2/4] hbase git commit: HBASE-19496 Reusing the ByteBuffer in rpc
layer corrupt the ServerLoad and RegionLoad
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
index cbf4b1c..2988ddf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
@@ -20,7 +20,6 @@
package org.apache.hadoop.hbase.master;
-
import java.io.Closeable;
import java.io.IOException;
import java.net.Inet6Address;
@@ -35,8 +34,8 @@ import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
-
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ClusterMetricsBuilder;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
@@ -66,7 +65,6 @@ import org.apache.hadoop.hbase.shaded.io.netty.channel.socket.InternetProtocolFa
import org.apache.hadoop.hbase.shaded.io.netty.channel.socket.nio.NioDatagramChannel;
import org.apache.hadoop.hbase.shaded.io.netty.handler.codec.MessageToMessageEncoder;
import org.apache.hadoop.hbase.shaded.io.netty.util.internal.StringUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
@@ -161,12 +159,12 @@ public class ClusterStatusPublisher extends ScheduledChore {
// We're reusing an existing protobuf message, but we don't send everything.
// This could be extended in the future, for example if we want to send stuff like the
// hbase:meta server name.
- ClusterStatus.Builder csBuilder = ClusterStatus.newBuilder();
- csBuilder.setHBaseVersion(VersionInfo.getVersion())
- .setClusterId(master.getMasterFileSystem().getClusterId().toString())
- .setMaster(master.getServerName())
- .setDeadServers(sns);
- publisher.publish(csBuilder.build());
+ publisher.publish(new ClusterStatus(ClusterMetricsBuilder.newBuilder()
+ .setHBaseVersion(VersionInfo.getVersion())
+ .setClusterId(master.getMasterFileSystem().getClusterId().toString())
+ .setMasterName(master.getServerName())
+ .setDeadServerNames(sns)
+ .build()));
}
protected void cleanup() {
@@ -340,7 +338,8 @@ public class ClusterStatusPublisher extends ScheduledChore {
@Override
protected void encode(ChannelHandlerContext channelHandlerContext,
ClusterStatus clusterStatus, List<Object> objects) {
- ClusterStatusProtos.ClusterStatus csp = ProtobufUtil.convert(clusterStatus);
+ ClusterStatusProtos.ClusterStatus csp
+ = ClusterMetricsBuilder.toClusterStatus(clusterStatus);
objects.add(new DatagramPacket(Unpooled.wrappedBuffer(csp.toByteArray()), isa));
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index e31db82..cf95030 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -18,10 +18,8 @@
*/
package org.apache.hadoop.hbase.master;
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
+import com.google.protobuf.Descriptors;
+import com.google.protobuf.Service;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.lang.reflect.Constructor;
@@ -30,6 +28,7 @@ import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
@@ -50,11 +49,16 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import java.util.regex.Pattern;
-
+import java.util.stream.Collectors;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
+import org.apache.hadoop.hbase.ClusterMetricsBuilder;
import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.CoordinatedStateException;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseIOException;
@@ -66,6 +70,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.PleaseHoldException;
import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.ServerMetricsBuilder;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
@@ -180,8 +185,8 @@ import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;
import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.eclipse.jetty.server.Server;
@@ -190,6 +195,7 @@ import org.eclipse.jetty.servlet.ServletHolder;
import org.eclipse.jetty.webapp.WebAppContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
@@ -201,9 +207,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolat
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-import com.google.protobuf.Descriptors;
-import com.google.protobuf.Service;
-
/**
* HMaster is the "master server" for HBase. An HBase cluster has one active
* master. If many masters are started, all compete. Whichever wins goes on to
@@ -1021,7 +1024,7 @@ public class HMaster extends HRegionServer implements MasterServices {
for (ServerName sn: this.regionServerTracker.getOnlineServers()) {
// The isServerOnline check is opportunistic, correctness is handled inside
if (!this.serverManager.isServerOnline(sn) &&
- serverManager.checkAndRecordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD)) {
+ serverManager.checkAndRecordNewServer(sn, new ServerLoad(ServerMetricsBuilder.of(sn)))) {
LOG.info("Registered server found up in zk but who has not yet reported in: " + sn);
}
}
@@ -2412,7 +2415,7 @@ public class HMaster extends HRegionServer implements MasterServices {
public ClusterStatus getClusterStatusWithoutCoprocessor(EnumSet<Option> options)
throws InterruptedIOException {
- ClusterStatus.Builder builder = ClusterStatus.newBuilder();
+ ClusterMetricsBuilder builder = ClusterMetricsBuilder.newBuilder();
// given that hbase1 can't submit the request with Option,
// we return all information to client if the list of Option is empty.
if (options.isEmpty()) {
@@ -2423,30 +2426,32 @@ public class HMaster extends HRegionServer implements MasterServices {
switch (opt) {
case HBASE_VERSION: builder.setHBaseVersion(VersionInfo.getVersion()); break;
case CLUSTER_ID: builder.setClusterId(getClusterId()); break;
- case MASTER: builder.setMaster(getServerName()); break;
- case BACKUP_MASTERS: builder.setBackupMasters(getBackupMasters()); break;
+ case MASTER: builder.setMasterName(getServerName()); break;
+ case BACKUP_MASTERS: builder.setBackerMasterNames(getBackupMasters()); break;
case LIVE_SERVERS: {
if (serverManager != null) {
- builder.setLiveServers(serverManager.getOnlineServers());
+ builder.setLiveServerMetrics(serverManager.getOnlineServers().entrySet().stream()
+ .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue())));
}
break;
}
case DEAD_SERVERS: {
if (serverManager != null) {
- builder.setDeadServers(new ArrayList<>(
+ builder.setDeadServerNames(new ArrayList<>(
serverManager.getDeadServers().copyServerNames()));
}
break;
}
case MASTER_COPROCESSORS: {
if (cpHost != null) {
- builder.setMasterCoprocessors(getMasterCoprocessors());
+ builder.setMasterCoprocessorNames(Arrays.asList(getMasterCoprocessors()));
}
break;
}
case REGIONS_IN_TRANSITION: {
if (assignmentManager != null) {
- builder.setRegionState(assignmentManager.getRegionStates().getRegionsStateInTransition());
+ builder.setRegionsInTransition(assignmentManager.getRegionStates()
+ .getRegionsStateInTransition());
}
break;
}
@@ -2464,7 +2469,7 @@ public class HMaster extends HRegionServer implements MasterServices {
}
}
}
- return builder.build();
+ return new ClusterStatus(builder.build());
}
/**
@@ -2496,7 +2501,7 @@ public class HMaster extends HRegionServer implements MasterServices {
backupMasterStrings = null;
}
- List<ServerName> backupMasters = null;
+ List<ServerName> backupMasters = Collections.emptyList();
if (backupMasterStrings != null && !backupMasterStrings.isEmpty()) {
backupMasters = new ArrayList<>(backupMasterStrings.size());
for (String s: backupMasterStrings) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 4a4bbe1..f0f2e10 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -32,6 +32,7 @@ import java.util.Set;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ClusterMetricsBuilder;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -446,7 +447,7 @@ public class MasterRpcServices extends RSRpcServices
ClusterStatusProtos.ServerLoad sl = request.getLoad();
ServerName serverName = ProtobufUtil.toServerName(request.getServer());
ServerLoad oldLoad = master.getServerManager().getLoad(serverName);
- ServerLoad newLoad = new ServerLoad(sl);
+ ServerLoad newLoad = new ServerLoad(serverName, sl);
master.getServerManager().regionServerReport(serverName, newLoad);
int version = VersionInfoUtil.getCurrentClientVersionNumber();
master.getAssignmentManager().reportOnlineRegions(serverName,
@@ -902,8 +903,8 @@ public class MasterRpcServices extends RSRpcServices
GetClusterStatusResponse.Builder response = GetClusterStatusResponse.newBuilder();
try {
master.checkInitialized();
- response.setClusterStatus(ProtobufUtil.convert(
- master.getClusterStatus(ProtobufUtil.toOptions(req.getOptionsList()))));
+ response.setClusterStatus(ClusterMetricsBuilder.toClusterStatus(
+ master.getClusterStatus(ClusterMetricsBuilder.toOptions(req.getOptionsList()))));
} catch (IOException e) {
throw new ServiceException(e);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
index 370f1f2..96b31c7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
@@ -31,7 +31,6 @@ import java.util.Random;
import java.util.Scanner;
import java.util.Set;
import java.util.TreeMap;
-
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
@@ -40,7 +39,7 @@ import org.apache.commons.cli.ParseException;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName;
@@ -58,6 +57,7 @@ import org.apache.hadoop.hbase.util.Pair;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 923a0a7..f84391f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.ServerMetricsBuilder;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.YouAreDeadException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
@@ -244,7 +245,7 @@ public class ServerManager {
request.getServerStartCode());
checkClockSkew(sn, request.getServerCurrentTime());
checkIsDead(sn, "STARTUP");
- if (!checkAndRecordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD)) {
+ if (!checkAndRecordNewServer(sn, new ServerLoad(ServerMetricsBuilder.of(sn)))) {
LOG.warn("THIS SHOULD NOT HAPPEN, RegionServerStartup"
+ " could not record the server: " + sn);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 9bb8013..8ead08c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1614,7 +1614,7 @@ public class HRegionServer extends HasThread implements
int storefileSizeMB = 0;
int memstoreSizeMB = (int) (r.getMemStoreSize() / 1024 / 1024);
long storefileIndexSizeKB = 0;
- int rootIndexSizeKB = 0;
+ int rootLevelIndexSizeKB = 0;
int totalStaticIndexSizeKB = 0;
int totalStaticBloomSizeKB = 0;
long totalCompactingKVs = 0;
@@ -1625,13 +1625,14 @@ public class HRegionServer extends HasThread implements
storefiles += store.getStorefilesCount();
storeUncompressedSizeMB += (int) (store.getStoreSizeUncompressed() / 1024 / 1024);
storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024);
- storefileIndexSizeKB += store.getStorefilesIndexSize() / 1024;
+ //TODO: storefileIndexSizeKB is same with rootLevelIndexSizeKB?
+ storefileIndexSizeKB += store.getStorefilesRootLevelIndexSize() / 1024;
CompactionProgress progress = store.getCompactionProgress();
if (progress != null) {
totalCompactingKVs += progress.totalCompactingKVs;
currentCompactedKVs += progress.currentCompactedKVs;
}
- rootIndexSizeKB += (int) (store.getStorefilesIndexSize() / 1024);
+ rootLevelIndexSizeKB += (int) (store.getStorefilesRootLevelIndexSize() / 1024);
totalStaticIndexSizeKB += (int) (store.getTotalStaticIndexSize() / 1024);
totalStaticBloomSizeKB += (int) (store.getTotalStaticBloomSize() / 1024);
}
@@ -1653,7 +1654,7 @@ public class HRegionServer extends HasThread implements
.setStorefileSizeMB(storefileSizeMB)
.setMemStoreSizeMB(memstoreSizeMB)
.setStorefileIndexSizeKB(storefileIndexSizeKB)
- .setRootIndexSizeKB(rootIndexSizeKB)
+ .setRootIndexSizeKB(rootLevelIndexSizeKB)
.setTotalStaticIndexSizeKB(totalStaticIndexSizeKB)
.setTotalStaticBloomSizeKB(totalStaticBloomSizeKB)
.setReadRequestsCount(r.getReadRequestsCount())
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index a5d4b4d..95ca9dc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -2084,7 +2084,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
}
@Override
- public long getStorefilesIndexSize() {
+ public long getStorefilesRootLevelIndexSize() {
return getStoreFileFieldSize(StoreFileReader::indexSize);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
index b643ecf..eaaa4ae 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@ -791,7 +791,7 @@ class MetricsRegionServerWrapperImpl
avgAgeNumerator += storeAvgStoreFileAge.getAsDouble() * storeHFiles;
}
- tempStorefileIndexSize += store.getStorefilesIndexSize();
+ tempStorefileIndexSize += store.getStorefilesRootLevelIndexSize();
tempTotalStaticBloomSize += store.getTotalStaticBloomSize();
tempTotalStaticIndexSize += store.getTotalStaticIndexSize();
tempFlushedCellsCount += store.getFlushedCellsCount();
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
index d60de6b..042129f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
@@ -172,9 +172,9 @@ public interface Store {
long getHFilesSize();
/**
- * @return The size of the store file indexes, in bytes.
+ * @return The size of the store file root-level indexes, in bytes.
*/
- long getStorefilesIndexSize();
+ long getStorefilesRootLevelIndexSize();
/**
* Returns the total size of all index blocks in the data block indexes, including the root level,
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 969a757..5222019 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -47,13 +47,12 @@ import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.LongAdder;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-
import org.apache.commons.lang3.time.StopWatch;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.AuthUtil;
import org.apache.hadoop.hbase.ChoreService;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -98,6 +97,7 @@ import org.apache.zookeeper.client.ConnectStringParser;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 5e7d728..c96e3e8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -58,7 +58,6 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
-
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.lang3.StringUtils;
@@ -73,8 +72,8 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
@@ -569,7 +568,7 @@ public class HBaseFsck extends Configured implements Closeable {
errors.print("Number of requests: " + status.getRequestCount());
errors.print("Number of regions: " + status.getRegionsCount());
- List<RegionState> rits = status.getRegionsInTransition();
+ List<RegionState> rits = status.getRegionStatesInTransition();
errors.print("Number of regions in transition: " + rits.size());
if (details) {
for (RegionState state: rits) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index b8811c7..64f2766 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -23,10 +23,9 @@ import java.util.Collection;
import java.util.EnumSet;
import java.util.List;
import java.util.Random;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@@ -120,7 +119,7 @@ public class HBaseFsckRepair {
try {
boolean inTransition = false;
for (RegionState rs : admin.getClusterStatus(EnumSet.of(Option.REGIONS_IN_TRANSITION))
- .getRegionsInTransition()) {
+ .getRegionStatesInTransition()) {
if (RegionInfo.COMPARATOR.compare(rs.getRegion(), region) == 0) {
inTransition = true;
break;
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
index 711507b..75c7dd5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
@@ -41,10 +41,9 @@ import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
-
import org.apache.commons.cli.CommandLine;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName;
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index 5f480a5..ecc87fb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.util;
import java.io.IOException;
import java.math.BigInteger;
-
import java.util.Arrays;
import java.util.Collection;
import java.util.EnumSet;
@@ -42,10 +41,8 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@@ -53,6 +50,8 @@ import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -434,7 +433,7 @@ public class RegionSplitter {
* Alternative getCurrentNrHRS which is no longer available.
* @param connection
* @return Rough count of regionservers out on cluster.
- * @throws IOException
+ * @throws IOException if a remote or network exception occurs
*/
private static int getRegionServerCount(final Connection connection) throws IOException {
try (Admin admin = connection.getAdmin()) {
@@ -785,7 +784,7 @@ public class RegionSplitter {
* @param conf
* @param tableName
* @return A Pair where first item is table dir and second is the split file.
- * @throws IOException
+ * @throws IOException if a remote or network exception occurs
*/
private static Pair<Path, Path> getTableDirAndSplitFile(final Configuration conf,
final TableName tableName)
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index e5cd13a..9e17a79 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -60,7 +60,7 @@ import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
import org.apache.hadoop.hbase.Waiter.Predicate;
import org.apache.hadoop.hbase.client.Admin;
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
new file mode 100644
index 0000000..ac116d8
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
@@ -0,0 +1,233 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Stream;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
+import org.apache.hadoop.hbase.Waiter.Predicate;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.AsyncAdmin;
+import org.apache.hadoop.hbase.client.AsyncConnection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.MasterObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestClientClusterMetrics {
+ private static HBaseTestingUtility UTIL;
+ private static Admin ADMIN;
+ private final static int SLAVES = 5;
+ private final static int MASTERS = 3;
+ private static MiniHBaseCluster CLUSTER;
+ private static HRegionServer DEAD;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ Configuration conf = HBaseConfiguration.create();
+ conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, MyObserver.class.getName());
+ UTIL = new HBaseTestingUtility(conf);
+ UTIL.startMiniCluster(MASTERS, SLAVES);
+ CLUSTER = UTIL.getHBaseCluster();
+ CLUSTER.waitForActiveAndReadyMaster();
+ ADMIN = UTIL.getAdmin();
+ // Kill one region server
+ List<RegionServerThread> rsts = CLUSTER.getLiveRegionServerThreads();
+ RegionServerThread rst = rsts.get(rsts.size() - 1);
+ DEAD = rst.getRegionServer();
+ DEAD.stop("Test dead servers metrics");
+ while (rst.isAlive()) {
+ Thread.sleep(500);
+ }
+ }
+
+ @Test
+ public void testDefaults() throws Exception {
+ ClusterMetrics origin = ADMIN.getClusterStatus();
+ ClusterMetrics defaults = ADMIN.getClusterStatus(EnumSet.allOf(Option.class));
+ Assert.assertEquals(origin.getHBaseVersion(), defaults.getHBaseVersion());
+ Assert.assertEquals(origin.getClusterId(), defaults.getClusterId());
+ Assert.assertEquals(origin.getAverageLoad(), defaults.getAverageLoad(), 0);
+ Assert.assertEquals(origin.getBackupMasterNames().size(),
+ defaults.getBackupMasterNames().size());
+ Assert.assertEquals(origin.getDeadServerNames().size(), defaults.getDeadServerNames().size());
+ Assert.assertEquals(origin.getRegionCount(), defaults.getRegionCount());
+ Assert.assertEquals(origin.getLiveServerMetrics().size(),
+ defaults.getLiveServerMetrics().size());
+ Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort());
+ }
+
+ @Test
+ public void testAsyncClient() throws Exception {
+ try (AsyncConnection asyncConnect = ConnectionFactory.createAsyncConnection(
+ UTIL.getConfiguration()).get()) {
+ AsyncAdmin asyncAdmin = asyncConnect.getAdmin();
+ CompletableFuture<ClusterStatus> originFuture =
+ asyncAdmin.getClusterStatus();
+ CompletableFuture<ClusterStatus> defaultsFuture =
+ asyncAdmin.getClusterStatus(EnumSet.allOf(Option.class));
+ ClusterMetrics origin = originFuture.get();
+ ClusterMetrics defaults = defaultsFuture.get();
+ Assert.assertEquals(origin.getHBaseVersion(), defaults.getHBaseVersion());
+ Assert.assertEquals(origin.getClusterId(), defaults.getClusterId());
+ Assert.assertEquals(origin.getHBaseVersion(), defaults.getHBaseVersion());
+ Assert.assertEquals(origin.getClusterId(), defaults.getClusterId());
+ Assert.assertEquals(origin.getAverageLoad(), defaults.getAverageLoad(), 0);
+ Assert.assertEquals(origin.getBackupMasterNames().size(),
+ defaults.getBackupMasterNames().size());
+ Assert.assertEquals(origin.getDeadServerNames().size(), defaults.getDeadServerNames().size());
+ Assert.assertEquals(origin.getRegionCount(), defaults.getRegionCount());
+ Assert.assertEquals(origin.getLiveServerMetrics().size(),
+ defaults.getLiveServerMetrics().size());
+ Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort());
+ }
+ }
+
+ @Test
+ public void testLiveAndDeadServersStatus() throws Exception {
+ // Count the number of live regionservers
+ List<RegionServerThread> regionserverThreads = CLUSTER.getLiveRegionServerThreads();
+ int numRs = 0;
+ int len = regionserverThreads.size();
+ for (int i = 0; i < len; i++) {
+ if (regionserverThreads.get(i).isAlive()) {
+ numRs++;
+ }
+ }
+ // Depending on the (random) order of unit execution we may run this unit before the
+ // minicluster is fully up and recovered from the RS shutdown done during test init.
+ Waiter.waitFor(CLUSTER.getConfiguration(), 10 * 1000, 100, new Predicate<Exception>() {
+ @Override
+ public boolean evaluate() throws Exception {
+ ClusterMetrics metrics = ADMIN.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
+ Assert.assertNotNull(metrics);
+ return metrics.getRegionCount() > 0;
+ }
+ });
+ // Retrieve live servers and dead servers info.
+ EnumSet<Option> options = EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS);
+ ClusterMetrics metrics = ADMIN.getClusterStatus(options);
+ Assert.assertNotNull(metrics);
+ // exclude a dead region server
+ Assert.assertEquals(SLAVES -1, numRs);
+ // live servers = nums of regionservers
+ // By default, HMaster don't carry any regions so it won't report its load.
+ // Hence, it won't be in the server list.
+ Assert.assertEquals(numRs, metrics.getLiveServerMetrics().size());
+ Assert.assertTrue(metrics.getRegionCount() > 0);
+ Assert.assertNotNull(metrics.getDeadServerNames());
+ Assert.assertEquals(1, metrics.getDeadServerNames().size());
+ ServerName deadServerName = metrics.getDeadServerNames().iterator().next();
+ Assert.assertEquals(DEAD.getServerName(), deadServerName);
+ }
+
+ @Test
+ public void testMasterAndBackupMastersStatus() throws Exception {
+ // get all the master threads
+ List<MasterThread> masterThreads = CLUSTER.getMasterThreads();
+ int numActive = 0;
+ int activeIndex = 0;
+ ServerName activeName = null;
+ HMaster active = null;
+ for (int i = 0; i < masterThreads.size(); i++) {
+ if (masterThreads.get(i).getMaster().isActiveMaster()) {
+ numActive++;
+ activeIndex = i;
+ active = masterThreads.get(activeIndex).getMaster();
+ activeName = active.getServerName();
+ }
+ }
+ Assert.assertNotNull(active);
+ Assert.assertEquals(1, numActive);
+ Assert.assertEquals(MASTERS, masterThreads.size());
+ // Retrieve master and backup masters infos only.
+ EnumSet<Option> options = EnumSet.of(Option.MASTER, Option.BACKUP_MASTERS);
+ ClusterMetrics metrics = ADMIN.getClusterStatus(options);
+ Assert.assertTrue(metrics.getMasterName().equals(activeName));
+ Assert.assertEquals(MASTERS - 1, metrics.getBackupMasterNames().size());
+ }
+
+ @Test
+ public void testOtherStatusInfos() throws Exception {
+ EnumSet<Option> options =
+ EnumSet.of(Option.MASTER_COPROCESSORS, Option.HBASE_VERSION,
+ Option.CLUSTER_ID, Option.BALANCER_ON);
+ ClusterMetrics metrics = ADMIN.getClusterStatus(options);
+ Assert.assertEquals(1, metrics.getMasterCoprocessorNames().size());
+ Assert.assertNotNull(metrics.getHBaseVersion());
+ Assert.assertNotNull(metrics.getClusterId());
+ Assert.assertTrue(metrics.getAverageLoad() == 0.0);
+ Assert.assertNotNull(metrics.getBalancerOn());
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ if (ADMIN != null) {
+ ADMIN.close();
+ }
+ UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testObserver() throws IOException {
+ int preCount = MyObserver.PRE_COUNT.get();
+ int postCount = MyObserver.POST_COUNT.get();
+ Assert.assertTrue(Stream.of(ADMIN.getClusterStatus().getMasterCoprocessors())
+ .anyMatch(s -> s.equals(MyObserver.class.getSimpleName())));
+ Assert.assertEquals(preCount + 1, MyObserver.PRE_COUNT.get());
+ Assert.assertEquals(postCount + 1, MyObserver.POST_COUNT.get());
+ }
+
+ public static class MyObserver implements MasterCoprocessor, MasterObserver {
+ private static final AtomicInteger PRE_COUNT = new AtomicInteger(0);
+ private static final AtomicInteger POST_COUNT = new AtomicInteger(0);
+
+ @Override public Optional<MasterObserver> getMasterObserver() {
+ return Optional.of(this);
+ }
+
+ @Override public void preGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx)
+ throws IOException {
+ PRE_COUNT.incrementAndGet();
+ }
+
+ @Override public void postGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx,
+ ClusterStatus metrics) throws IOException {
+ POST_COUNT.incrementAndGet();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterStatus.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterStatus.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterStatus.java
new file mode 100644
index 0000000..245f3c9
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterStatus.java
@@ -0,0 +1,258 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Stream;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
+import org.apache.hadoop.hbase.Waiter.Predicate;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.AsyncAdmin;
+import org.apache.hadoop.hbase.client.AsyncConnection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.MasterObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test the ClusterStatus.
+ */
+@Category(SmallTests.class)
+public class TestClientClusterStatus {
+ private static HBaseTestingUtility UTIL;
+ private static Admin ADMIN;
+ private final static int SLAVES = 5;
+ private final static int MASTERS = 3;
+ private static MiniHBaseCluster CLUSTER;
+ private static HRegionServer DEAD;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ Configuration conf = HBaseConfiguration.create();
+ conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, MyObserver.class.getName());
+ UTIL = new HBaseTestingUtility(conf);
+ UTIL.startMiniCluster(MASTERS, SLAVES);
+ CLUSTER = UTIL.getHBaseCluster();
+ CLUSTER.waitForActiveAndReadyMaster();
+ ADMIN = UTIL.getAdmin();
+ // Kill one region server
+ List<RegionServerThread> rsts = CLUSTER.getLiveRegionServerThreads();
+ RegionServerThread rst = rsts.get(rsts.size() - 1);
+ DEAD = rst.getRegionServer();
+ DEAD.stop("Test dead servers status");
+ while (rst.isAlive()) {
+ Thread.sleep(500);
+ }
+ }
+
+ @Test
+ public void testDefaults() throws Exception {
+ ClusterStatus origin = ADMIN.getClusterStatus();
+ ClusterStatus defaults = ADMIN.getClusterStatus(EnumSet.allOf(Option.class));
+ checkPbObjectNotNull(origin);
+ checkPbObjectNotNull(defaults);
+ Assert.assertEquals(origin.getHBaseVersion(), defaults.getHBaseVersion());
+ Assert.assertEquals(origin.getClusterId(), defaults.getClusterId());
+ Assert.assertTrue(origin.getAverageLoad() == defaults.getAverageLoad());
+ Assert.assertTrue(origin.getBackupMastersSize() == defaults.getBackupMastersSize());
+ Assert.assertTrue(origin.getDeadServersSize() == defaults.getDeadServersSize());
+ Assert.assertTrue(origin.getRegionsCount() == defaults.getRegionsCount());
+ Assert.assertTrue(origin.getServersSize() == defaults.getServersSize());
+ Assert.assertTrue(origin.getMasterInfoPort() == defaults.getMasterInfoPort());
+ Assert.assertTrue(origin.equals(defaults));
+ }
+
+ @Test
+ public void testNone() throws Exception {
+ ClusterStatus status0 = ADMIN.getClusterStatus(EnumSet.allOf(Option.class));
+ ClusterStatus status1 = ADMIN.getClusterStatus(EnumSet.noneOf(Option.class));
+ Assert.assertEquals(status0, status1);
+ checkPbObjectNotNull(status0);
+ checkPbObjectNotNull(status1);
+ }
+
+ @Test
+ public void testAsyncClient() throws Exception {
+ try (AsyncConnection asyncConnect = ConnectionFactory.createAsyncConnection(
+ UTIL.getConfiguration()).get()) {
+ AsyncAdmin asyncAdmin = asyncConnect.getAdmin();
+ CompletableFuture<ClusterStatus> originFuture =
+ asyncAdmin.getClusterStatus();
+ CompletableFuture<ClusterStatus> defaultsFuture =
+ asyncAdmin.getClusterStatus(EnumSet.allOf(Option.class));
+ ClusterStatus origin = originFuture.get();
+ ClusterStatus defaults = defaultsFuture.get();
+ checkPbObjectNotNull(origin);
+ checkPbObjectNotNull(defaults);
+ Assert.assertEquals(origin.getHBaseVersion(), defaults.getHBaseVersion());
+ Assert.assertEquals(origin.getClusterId(), defaults.getClusterId());
+ Assert.assertTrue(origin.getAverageLoad() == defaults.getAverageLoad());
+ Assert.assertTrue(origin.getBackupMastersSize() == defaults.getBackupMastersSize());
+ Assert.assertTrue(origin.getDeadServersSize() == defaults.getDeadServersSize());
+ Assert.assertTrue(origin.getRegionsCount() == defaults.getRegionsCount());
+ Assert.assertTrue(origin.getServersSize() == defaults.getServersSize());
+ }
+ }
+
+ @Test
+ public void testLiveAndDeadServersStatus() throws Exception {
+ // Count the number of live regionservers
+ List<RegionServerThread> regionserverThreads = CLUSTER.getLiveRegionServerThreads();
+ int numRs = 0;
+ int len = regionserverThreads.size();
+ for (int i = 0; i < len; i++) {
+ if (regionserverThreads.get(i).isAlive()) {
+ numRs++;
+ }
+ }
+ // Depending on the (random) order of unit execution we may run this unit before the
+ // minicluster is fully up and recovered from the RS shutdown done during test init.
+ Waiter.waitFor(CLUSTER.getConfiguration(), 10 * 1000, 100, new Predicate<Exception>() {
+ @Override
+ public boolean evaluate() throws Exception {
+ ClusterStatus status = ADMIN.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
+ Assert.assertNotNull(status);
+ return status.getRegionsCount() > 0;
+ }
+ });
+ // Retrieve live servers and dead servers info.
+ EnumSet<Option> options = EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS);
+ ClusterStatus status = ADMIN.getClusterStatus(options);
+ checkPbObjectNotNull(status);
+ Assert.assertNotNull(status);
+ Assert.assertNotNull(status.getServers());
+ // exclude a dead region server
+ Assert.assertEquals(SLAVES -1, numRs);
+ // live servers = nums of regionservers
+ // By default, HMaster don't carry any regions so it won't report its load.
+ // Hence, it won't be in the server list.
+ Assert.assertEquals(status.getServers().size(), numRs);
+ Assert.assertTrue(status.getRegionsCount() > 0);
+ Assert.assertNotNull(status.getDeadServerNames());
+ Assert.assertEquals(1, status.getDeadServersSize());
+ ServerName deadServerName = status.getDeadServerNames().iterator().next();
+ Assert.assertEquals(DEAD.getServerName(), deadServerName);
+ }
+
+ @Test
+ public void testMasterAndBackupMastersStatus() throws Exception {
+ // get all the master threads
+ List<MasterThread> masterThreads = CLUSTER.getMasterThreads();
+ int numActive = 0;
+ int activeIndex = 0;
+ ServerName activeName = null;
+ HMaster active = null;
+ for (int i = 0; i < masterThreads.size(); i++) {
+ if (masterThreads.get(i).getMaster().isActiveMaster()) {
+ numActive++;
+ activeIndex = i;
+ active = masterThreads.get(activeIndex).getMaster();
+ activeName = active.getServerName();
+ }
+ }
+ Assert.assertNotNull(active);
+ Assert.assertEquals(1, numActive);
+ Assert.assertEquals(MASTERS, masterThreads.size());
+ // Retrieve master and backup masters infos only.
+ EnumSet<Option> options = EnumSet.of(Option.MASTER, Option.BACKUP_MASTERS);
+ ClusterStatus status = ADMIN.getClusterStatus(options);
+ Assert.assertTrue(status.getMaster().equals(activeName));
+ Assert.assertEquals(MASTERS - 1, status.getBackupMastersSize());
+ }
+
+ @Test
+ public void testOtherStatusInfos() throws Exception {
+ EnumSet<Option> options =
+ EnumSet.of(Option.MASTER_COPROCESSORS, Option.HBASE_VERSION,
+ Option.CLUSTER_ID, Option.BALANCER_ON);
+ ClusterStatus status = ADMIN.getClusterStatus(options);
+ Assert.assertTrue(status.getMasterCoprocessors().length == 1);
+ Assert.assertNotNull(status.getHBaseVersion());
+ Assert.assertNotNull(status.getClusterId());
+ Assert.assertTrue(status.getAverageLoad() == 0.0);
+ Assert.assertNotNull(status.getBalancerOn());
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ if (ADMIN != null) ADMIN.close();
+ UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testObserver() throws IOException {
+ int preCount = MyObserver.PRE_COUNT.get();
+ int postCount = MyObserver.POST_COUNT.get();
+ Assert.assertTrue(Stream.of(ADMIN.getClusterStatus().getMasterCoprocessors())
+ .anyMatch(s -> s.equals(MyObserver.class.getSimpleName())));
+ Assert.assertEquals(preCount + 1, MyObserver.PRE_COUNT.get());
+ Assert.assertEquals(postCount + 1, MyObserver.POST_COUNT.get());
+ }
+
+ /**
+ * HBASE-19496 do the refactor for ServerLoad and RegionLoad so the inner pb object is useless
+ * now. However, they are Public classes, and consequently we must make sure the all pb objects
+ * have initialized.
+ */
+ private static void checkPbObjectNotNull(ClusterStatus status) {
+ for (ServerName name : status.getLiveServerMetrics().keySet()) {
+ ServerLoad load = status.getLoad(name);
+ Assert.assertNotNull(load.obtainServerLoadPB());
+ for (RegionLoad rl : load.getRegionsLoad().values()) {
+ Assert.assertNotNull(rl.regionLoadPB);
+ }
+ }
+ }
+
+ public static class MyObserver implements MasterCoprocessor, MasterObserver {
+ private static final AtomicInteger PRE_COUNT = new AtomicInteger(0);
+ private static final AtomicInteger POST_COUNT = new AtomicInteger(0);
+
+ @Override public Optional<MasterObserver> getMasterObserver() {
+ return Optional.of(this);
+ }
+
+ @Override public void preGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx)
+ throws IOException {
+ PRE_COUNT.incrementAndGet();
+ }
+
+ @Override public void postGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx,
+ ClusterStatus status) throws IOException {
+ POST_COUNT.incrementAndGet();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
index d482484..d454c50 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
@@ -19,11 +19,16 @@
*/
package org.apache.hadoop.hbase;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -34,11 +39,8 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
@Category({MiscTests.class, MediumTests.class})
public class TestRegionLoad {
@@ -124,6 +126,10 @@ public class TestRegionLoad {
private void checkRegionsAndRegionLoads(Collection<HRegionInfo> regions,
Collection<RegionLoad> regionLoads) {
+ for (RegionLoad load : regionLoads) {
+ assertNotNull(load.regionLoadPB);
+ }
+
assertEquals("No of regions and regionloads doesn't match",
regions.size(), regionLoads.size());
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionMetrics.java
new file mode 100644
index 0000000..e731807
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionMetrics.java
@@ -0,0 +1,130 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
+
+@Category({ MiscTests.class, MediumTests.class })
+public class TestRegionMetrics {
+
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ private static Admin admin;
+
+ private static final TableName TABLE_1 = TableName.valueOf("table_1");
+ private static final TableName TABLE_2 = TableName.valueOf("table_2");
+ private static final TableName TABLE_3 = TableName.valueOf("table_3");
+ private static final TableName[] tables = new TableName[] { TABLE_1, TABLE_2, TABLE_3 };
+
+ @BeforeClass
+ public static void beforeClass() throws Exception {
+ UTIL.startMiniCluster(4);
+ admin = UTIL.getAdmin();
+ admin.balancerSwitch(false, true);
+ byte[] FAMILY = Bytes.toBytes("f");
+ for (TableName tableName : tables) {
+ Table table = UTIL.createMultiRegionTable(tableName, FAMILY, 16);
+ UTIL.waitTableAvailable(tableName);
+ UTIL.loadTable(table, FAMILY);
+ }
+ }
+
+ @AfterClass
+ public static void afterClass() throws Exception {
+ for (TableName table : tables) {
+ UTIL.deleteTableIfAny(table);
+ }
+ UTIL.shutdownMiniCluster();
+ }
+
+
+ @Test
+ public void testRegionMetrics() throws Exception {
+
+ // Check if regions match with the RegionMetrics from the server
+ for (ServerName serverName : admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS))
+ .getLiveServerMetrics().keySet()) {
+ List<RegionInfo> regions = admin.getRegions(serverName);
+ Collection<RegionMetrics> regionMetricsList =
+ admin.getRegionLoads(serverName).stream().collect(Collectors.toList());
+ checkRegionsAndRegionMetrics(regions, regionMetricsList);
+ }
+
+ // Check if regionMetrics matches the table's regions and nothing is missed
+ for (TableName table : new TableName[] { TABLE_1, TABLE_2, TABLE_3 }) {
+ List<RegionInfo> tableRegions = admin.getRegions(table);
+
+ List<RegionMetrics> regionMetrics = new ArrayList<>();
+ for (ServerName serverName : admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS))
+ .getLiveServerMetrics().keySet()) {
+ regionMetrics.addAll(admin.getRegionLoads(serverName, table));
+ }
+ checkRegionsAndRegionMetrics(tableRegions, regionMetrics);
+ }
+
+ // Check RegionMetrics matches the RegionMetrics from ClusterStatus
+ ClusterMetrics clusterStatus = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
+ for (Map.Entry<ServerName, ServerMetrics> entry : clusterStatus.getLiveServerMetrics()
+ .entrySet()) {
+ ServerName serverName = entry.getKey();
+ ServerMetrics serverMetrics = entry.getValue();
+ List<RegionMetrics> regionMetrics =
+ admin.getRegionLoads(serverName).stream().collect(Collectors.toList());
+ assertEquals(serverMetrics.getRegionMetrics().size(), regionMetrics.size());
+ }
+ }
+
+ private void checkRegionsAndRegionMetrics(Collection<RegionInfo> regions,
+ Collection<RegionMetrics> regionMetrics) {
+
+ assertEquals("No of regions and regionMetrics doesn't match", regions.size(),
+ regionMetrics.size());
+
+ Map<byte[], RegionMetrics> regionMetricsMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
+ for (RegionMetrics r : regionMetrics) {
+ regionMetricsMap.put(r.getRegionName(), r);
+ }
+ for (RegionInfo info : regions) {
+ assertTrue("Region not in RegionMetricsMap region:"
+ + info.getRegionNameAsString() + " regionMap: "
+ + regionMetricsMap, regionMetricsMap.containsKey(info.getRegionName()));
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
index 84d509b..b81a64c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
@@ -20,42 +20,44 @@
package org.apache.hadoop.hbase;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-@Category({MiscTests.class, SmallTests.class})
+@Category({ MiscTests.class, SmallTests.class })
public class TestServerLoad {
@Test
public void testRegionLoadAggregation() {
- ServerLoad sl = new ServerLoad(createServerLoadProto());
+ ServerLoad sl = new ServerLoad(ServerName.valueOf("localhost,1,1"), createServerLoadProto());
assertEquals(13, sl.getStores());
assertEquals(114, sl.getStorefiles());
assertEquals(129, sl.getStoreUncompressedSizeMB());
assertEquals(504, sl.getRootIndexSizeKB());
assertEquals(820, sl.getStorefileSizeMB());
assertEquals(82, sl.getStorefileIndexSizeKB());
- assertEquals(((long)Integer.MAX_VALUE)*2, sl.getReadRequestsCount());
+ assertEquals(((long) Integer.MAX_VALUE) * 2, sl.getReadRequestsCount());
assertEquals(300, sl.getFilteredReadRequestsCount());
-
}
-
+
@Test
public void testToString() {
- ServerLoad sl = new ServerLoad(createServerLoadProto());
+ ServerLoad sl = new ServerLoad(ServerName.valueOf("localhost,1,1"), createServerLoadProto());
String slToString = sl.toString();
+ assertNotNull(sl.obtainServerLoadPB());
assertTrue(slToString.contains("numberOfStores=13"));
assertTrue(slToString.contains("numberOfStorefiles=114"));
assertTrue(slToString.contains("storefileUncompressedSizeMB=129"));
- assertTrue(slToString.contains("storefileSizeMB=820"));
+ assertTrue(slToString.contains("storefileSizeMB=820"));
assertTrue(slToString.contains("rootIndexSizeKB=504"));
assertTrue(slToString.contains("coprocessors=[]"));
assertTrue(slToString.contains("filteredReadRequestsCount=300"));
@@ -63,36 +65,35 @@ public class TestServerLoad {
@Test
public void testRegionLoadWrapAroundAggregation() {
- ServerLoad sl = new ServerLoad(createServerLoadProto());
- long totalCount = ((long)Integer.MAX_VALUE)*2;
- assertEquals(totalCount, sl.getReadRequestsCount());
- assertEquals(totalCount, sl.getWriteRequestsCount());
+ ServerLoad sl = new ServerLoad(ServerName.valueOf("localhost,1,1"), createServerLoadProto());
+ assertNotNull(sl.obtainServerLoadPB());
+ long totalCount = ((long) Integer.MAX_VALUE) * 2;
+ assertEquals(totalCount, sl.getReadRequestsCount());
+ assertEquals(totalCount, sl.getWriteRequestsCount());
}
private ClusterStatusProtos.ServerLoad createServerLoadProto() {
- HBaseProtos.RegionSpecifier rSpecOne =
- HBaseProtos.RegionSpecifier.newBuilder()
- .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
- .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
- HBaseProtos.RegionSpecifier rSpecTwo =
- HBaseProtos.RegionSpecifier.newBuilder()
- .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
- .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
+ HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder()
+ .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
+ .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
+ HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder()
+ .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
+ .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
ClusterStatusProtos.RegionLoad rlOne =
- ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
- .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
- .setFilteredReadRequestsCount(100)
- .setStorefileIndexSizeKB(42).setRootIndexSizeKB(201).setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
+ ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
+ .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
+ .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201)
+ .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
ClusterStatusProtos.RegionLoad rlTwo =
- ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
- .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
- .setFilteredReadRequestsCount(200)
- .setStorefileIndexSizeKB(40).setRootIndexSizeKB(303).setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
+ ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
+ .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
+ .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303)
+ .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
ClusterStatusProtos.ServerLoad sl =
- ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
- addRegionLoads(rlTwo).build();
+ ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
+ addRegionLoads(rlTwo).build();
return sl;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java
new file mode 100644
index 0000000..68a4360
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java
@@ -0,0 +1,114 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+
+@Category({ MiscTests.class, SmallTests.class })
+public class TestServerMetrics {
+
+ @Test
+ public void testRegionLoadAggregation() {
+ ServerMetrics metrics = ServerMetricsBuilder.toServerMetrics(
+ ServerName.valueOf("localhost,1,1"), createServerLoadProto());
+ assertEquals(13,
+ metrics.getRegionMetrics().values().stream().mapToInt(v -> v.getStoreCount()).sum());
+ assertEquals(114,
+ metrics.getRegionMetrics().values().stream().mapToInt(v -> v.getStoreFileCount()).sum());
+ assertEquals(129, metrics.getRegionMetrics().values().stream()
+ .mapToDouble(v -> v.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE)).sum(), 0);
+ assertEquals(504, metrics.getRegionMetrics().values().stream()
+ .mapToDouble(v -> v.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE)).sum(), 0);
+ assertEquals(820, metrics.getRegionMetrics().values().stream()
+ .mapToDouble(v -> v.getStoreFileSize().get(Size.Unit.MEGABYTE)).sum(), 0);
+ assertEquals(82, metrics.getRegionMetrics().values().stream()
+ .mapToDouble(v -> v.getStoreFileIndexSize().get(Size.Unit.KILOBYTE)).sum(), 0);
+ assertEquals(((long) Integer.MAX_VALUE) * 2,
+ metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getReadRequestCount()).sum());
+ assertEquals(300,
+ metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getFilteredReadRequestCount())
+ .sum());
+ }
+
+ @Test
+ public void testToString() {
+ ServerMetrics metrics = ServerMetricsBuilder.toServerMetrics(
+ ServerName.valueOf("localhost,1,1"), createServerLoadProto());
+ String slToString = metrics.toString();
+ assertTrue(slToString.contains("numberOfStores=13"));
+ assertTrue(slToString.contains("numberOfStorefiles=114"));
+ assertTrue(slToString.contains("storefileUncompressedSizeMB=129"));
+ assertTrue(slToString.contains("storefileSizeMB=820"));
+ assertTrue(slToString.contains("rootIndexSizeKB=504"));
+ assertTrue(slToString.contains("coprocessors=[]"));
+ assertTrue(slToString.contains("filteredReadRequestsCount=300"));
+ }
+
+ @Test
+ public void testRegionLoadWrapAroundAggregation() {
+ ServerMetrics metrics = ServerMetricsBuilder.toServerMetrics(
+ ServerName.valueOf("localhost,1,1"), createServerLoadProto());
+ long totalCount = ((long) Integer.MAX_VALUE) * 2;
+ assertEquals(totalCount,
+ metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getReadRequestCount()).sum());
+ assertEquals(totalCount,
+ metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getWriteRequestCount())
+ .sum());
+ }
+
+ private ClusterStatusProtos.ServerLoad createServerLoadProto() {
+ HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder()
+ .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
+ .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
+ HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder()
+ .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
+ .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
+
+ ClusterStatusProtos.RegionLoad rlOne =
+ ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
+ .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
+ .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201)
+ .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE)
+ .build();
+ ClusterStatusProtos.RegionLoad rlTwo =
+ ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
+ .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
+ .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303)
+ .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE)
+ .build();
+
+ ClusterStatusProtos.ServerLoad sl =
+ ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
+ addRegionLoads(rlTwo).build();
+ return sl;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSize.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSize.java
new file mode 100644
index 0000000..30d88bd
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSize.java
@@ -0,0 +1,84 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.Set;
+import java.util.TreeSet;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({MiscTests.class, SmallTests.class})
+public class TestSize {
+
+ @Test
+ public void testConversion() {
+ Size kbSize = new Size(1024D, Size.Unit.MEGABYTE);
+ assertEquals(1D, kbSize.get(Size.Unit.GIGABYTE), 0);
+ assertEquals(1024D, kbSize.get(), 0);
+ assertEquals(1024D * 1024D, kbSize.get(Size.Unit.KILOBYTE), 0);
+ assertEquals(1024D * 1024D * 1024D, kbSize.get(Size.Unit.BYTE), 0);
+ }
+
+ @Test
+ public void testCompare() {
+ Size size00 = new Size(100D, Size.Unit.GIGABYTE);
+ Size size01 = new Size(100D, Size.Unit.MEGABYTE);
+ Size size02 = new Size(100D, Size.Unit.BYTE);
+ Set<Size> sizes = new TreeSet<>();
+ sizes.add(size00);
+ sizes.add(size01);
+ sizes.add(size02);
+ int count = 0;
+ for (Size s : sizes) {
+ switch (count++) {
+ case 0:
+ assertEquals(size02, s);
+ break;
+ case 1:
+ assertEquals(size01, s);
+ break;
+ default:
+ assertEquals(size00, s);
+ break;
+ }
+ }
+ assertEquals(3, count);
+ }
+
+ @Test
+ public void testEqual() {
+ assertEquals(new Size(1024D, Size.Unit.TERABYTE),
+ new Size(1D, Size.Unit.PETABYTE));
+ assertEquals(new Size(1024D, Size.Unit.GIGABYTE),
+ new Size(1D, Size.Unit.TERABYTE));
+ assertEquals(new Size(1024D, Size.Unit.MEGABYTE),
+ new Size(1D, Size.Unit.GIGABYTE));
+ assertEquals(new Size(1024D, Size.Unit.KILOBYTE),
+ new Size(1D, Size.Unit.MEGABYTE));
+ assertEquals(new Size(1024D, Size.Unit.BYTE),
+ new Size(1D, Size.Unit.KILOBYTE));
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index fb5febc..d701a81 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -25,20 +25,14 @@ import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Collection;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Random;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
-
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
@@ -58,7 +52,6 @@ import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -68,13 +61,13 @@ import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
-import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
index da77f29..26f45bc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
@@ -21,6 +21,8 @@ import static org.apache.hadoop.hbase.client.AsyncProcess.START_LOG_ERRORS_AFTER
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
import java.io.IOException;
import java.nio.file.FileSystems;
import java.nio.file.Files;
@@ -31,10 +33,9 @@ import java.util.Collection;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
-
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.RegionLoad;
@@ -53,9 +54,6 @@ import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
@RunWith(Parameterized.class)
@Category({ ClientTests.class, MediumTests.class })
public class TestAsyncClusterAdminApi extends TestAsyncAdminBase {
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4af099e/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java
index 856a31a..a3d7616 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java
@@ -25,9 +25,8 @@ import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.stream.Collectors;
-
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.junit.Test;