You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2018/01/23 10:24:46 UTC
[11/50] [abbrv] hbase git commit: HBASE-19811 Fix findbugs and
error-prone warnings in hbase-server (branch-2)
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
index 65c4d08..ab282d5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
@@ -117,6 +117,7 @@ public class RSProcedureDispatcher
}
}
+ @Override
protected void abortPendingOperations(final ServerName serverName,
final Set<RemoteProcedure> operations) {
// TODO: Replace with a ServerNotOnlineException()
@@ -126,10 +127,12 @@ public class RSProcedureDispatcher
}
}
+ @Override
public void serverAdded(final ServerName serverName) {
addNode(serverName);
}
+ @Override
public void serverRemoved(final ServerName serverName) {
removeNode(serverName);
}
@@ -138,6 +141,7 @@ public class RSProcedureDispatcher
* Base remote call
*/
protected abstract class AbstractRSRemoteCall implements Callable<Void> {
+ @Override
public abstract Void call();
private final ServerName serverName;
@@ -269,6 +273,7 @@ public class RSProcedureDispatcher
this.remoteProcedures = remoteProcedures;
}
+ @Override
public Void call() {
request = ExecuteProceduresRequest.newBuilder();
if (LOG.isTraceEnabled()) {
@@ -290,11 +295,13 @@ public class RSProcedureDispatcher
return null;
}
+ @Override
public void dispatchOpenRequests(final MasterProcedureEnv env,
final List<RegionOpenOperation> operations) {
request.addOpenRegion(buildOpenRegionRequest(env, getServerName(), operations));
}
+ @Override
public void dispatchCloseRequests(final MasterProcedureEnv env,
final List<RegionCloseOperation> operations) {
for (RegionCloseOperation op: operations) {
@@ -471,11 +478,13 @@ public class RSProcedureDispatcher
return null;
}
+ @Override
public void dispatchOpenRequests(final MasterProcedureEnv env,
final List<RegionOpenOperation> operations) {
submitTask(new OpenRegionRemoteCall(serverName, operations));
}
+ @Override
public void dispatchCloseRequests(final MasterProcedureEnv env,
final List<RegionCloseOperation> operations) {
for (RegionCloseOperation op: operations) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java
index a8475f0..559863e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java
@@ -86,6 +86,7 @@ public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate {
return false;
}
+ @Override
public void setConf(final Configuration conf) {
super.setConf(conf);
try {
@@ -95,6 +96,7 @@ public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate {
Path rootDir = FSUtils.getRootDir(conf);
cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod,
"snapshot-hfile-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() {
+ @Override
public Collection<String> filesUnderSnapshot(final Path snapshotDir)
throws IOException {
return SnapshotReferenceUtil.getHFileNames(conf, fs, snapshotDir);
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java
index 397570c..7436d9c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java
@@ -54,6 +54,7 @@ public class CachedMobFile extends MobFile implements Comparable<CachedMobFile>
this.accessCount = accessCount;
}
+ @Override
public int compareTo(CachedMobFile that) {
if (this.accessCount == that.accessCount) return 0;
return this.accessCount < that.accessCount ? 1 : -1;
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
index 053cba6..120f11e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
@@ -86,6 +86,7 @@ public class ExpiredMobFileCleaner extends Configured implements Tool {
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION",
justification="Intentional")
+ @Override
public int run(String[] args) throws Exception {
if (args.length != 2) {
printUsage();
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java
index aaf545b..ab917a2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java
@@ -294,6 +294,7 @@ public class PartitionedMobCompactionRequest extends MobCompactionRequest {
this.endKey = endKey;
}
+ @Override
public int compareTo(CompactionDelPartitionId o) {
/*
* 1). Compare the start key, if the k1 < k2, then k1 is less
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
index 6b90e6b..1b6ad91 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
@@ -74,6 +74,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
* monitored Handler.
* @return the queue timestamp or -1 if there is no RPC currently running.
*/
+ @Override
public long getRPCQueueTime() {
if (getState() != State.RUNNING) {
return -1;
@@ -86,6 +87,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
* monitored Handler.
* @return the start timestamp or -1 if there is no RPC currently running.
*/
+ @Override
public long getRPCStartTime() {
if (getState() != State.RUNNING) {
return -1;
@@ -98,6 +100,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
* by this Handler.
* @return a string representing the method call without parameters
*/
+ @Override
public synchronized String getRPC() {
return getRPC(false);
}
@@ -108,6 +111,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
* @param withParams toggle inclusion of parameters in the RPC String
* @return A human-readable string representation of the method call.
*/
+ @Override
public synchronized String getRPC(boolean withParams) {
if (getState() != State.RUNNING) {
// no RPC is currently running
@@ -132,6 +136,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
* by this Handler.
* @return A human-readable string representation of the method call.
*/
+ @Override
public long getRPCPacketLength() {
if (getState() != State.RUNNING || packet == null) {
// no RPC is currently running, or we don't have an RPC's packet info
@@ -146,6 +151,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
* @return A human-readable string representation of the address and port
* of the client.
*/
+ @Override
public String getClient() {
return clientAddress + ":" + remotePort;
}
@@ -155,6 +161,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
* RPC call.
* @return true if the monitored handler is currently servicing an RPC call.
*/
+ @Override
public boolean isRPCRunning() {
return getState() == State.RUNNING;
}
@@ -166,6 +173,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
* @return true if the monitored handler is currently servicing an RPC call
* to a database command.
*/
+ @Override
public synchronized boolean isOperationRunning() {
if(!isRPCRunning()) {
return false;
@@ -183,6 +191,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
* @param methodName The name of the method that will be called by the RPC.
* @param params The parameters that will be passed to the indicated method.
*/
+ @Override
public synchronized void setRPC(String methodName, Object [] params,
long queueTime) {
this.methodName = methodName;
@@ -197,6 +206,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
* that it can later compute its size if asked for it.
* @param param The protobuf received by the RPC for this call
*/
+ @Override
public void setRPCPacket(Message param) {
this.packet = param;
}
@@ -206,6 +216,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
* @param clientAddress the address of the current client
* @param remotePort the port from which the client connected
*/
+ @Override
public void setConnection(String clientAddress, int remotePort) {
this.clientAddress = clientAddress;
this.remotePort = remotePort;
@@ -218,6 +229,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
this.packet = null;
}
+ @Override
public synchronized Map<String, Object> toMap() {
// only include RPC info if the Handler is actively servicing an RPC call
Map<String, Object> map = super.toMap();
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
index b3869f4..bedb5e2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
@@ -154,6 +154,7 @@ class MonitoredTaskImpl implements MonitoredTask {
* Force the completion timestamp backwards so that
* it expires now.
*/
+ @Override
public void expireNow() {
stateTime -= 180 * 1000;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
index 6749d2f..4aff779 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
@@ -319,7 +319,7 @@ public class TaskMonitor {
OPERATION("operation"),
ALL("all");
- private String type;
+ private final String type;
private TaskType(String type) {
this.type = type.toLowerCase();
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java
index 49b344f..fe3edfa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java
@@ -103,7 +103,7 @@ public class Procedure implements Callable<Void>, ForeignExceptionListener {
//
/** lock to prevent nodes from acquiring and then releasing before we can track them */
- private Object joinBarrierLock = new Object();
+ private final Object joinBarrierLock = new Object();
private final List<String> acquiringMembers;
private final List<String> inBarrierMembers;
private final HashMap<String, byte[]> dataFromFinishedMembers;
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java
index af4d2d7..9ebb1d7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java
@@ -88,11 +88,9 @@ public abstract class ProcedureManagerHost<E extends ProcedureManager> {
E impl;
Object o = null;
try {
- o = implClass.newInstance();
+ o = implClass.getDeclaredConstructor().newInstance();
impl = (E)o;
- } catch (InstantiationException e) {
- throw new IOException(e);
- } catch (IllegalAccessException e) {
+ } catch (Exception e) {
throw new IOException(e);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java
index 6416e6a..d15f5ac 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java
@@ -153,6 +153,7 @@ abstract public class Subprocedure implements Callable<Void> {
* Subprocedure, ForeignException)}.
*/
@SuppressWarnings("finally")
+ @Override
final public Void call() {
LOG.debug("Starting subprocedure '" + barrierName + "' with timeout " +
executionTimeoutTimer.getMaxTime() + "ms");
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java
index c1fb8f5..71ba28e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java
@@ -19,19 +19,21 @@ package org.apache.hadoop.hbase.procedure;
import java.io.IOException;
import java.io.InterruptedIOException;
+import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.List;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
+import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+
/**
* ZooKeeper based {@link ProcedureCoordinatorRpcs} for a {@link ProcedureCoordinator}
*/
@@ -218,8 +220,8 @@ public class ZKProcedureCoordinator implements ProcedureCoordinatorRpcs {
} else {
dataFromMember = Arrays.copyOfRange(dataFromMember, ProtobufUtil.lengthOfPBMagic(),
dataFromMember.length);
- LOG.debug("Finished data from procedure '" + procName
- + "' member '" + member + "': " + new String(dataFromMember));
+ LOG.debug("Finished data from procedure '{}' member '{}': {}", procName, member,
+ new String(dataFromMember, StandardCharsets.UTF_8));
coordinator.memberFinishedBarrier(procName, member, dataFromMember);
}
} else {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java
index ea41ae8..f29d133 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java
@@ -348,6 +348,7 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs {
}
}
+ @Override
public void start(final String memberName, final ProcedureMember listener) {
LOG.debug("Starting procedure member '" + memberName + "'");
this.member = listener;
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java
index 976e36b..9eb3fb3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java
@@ -157,6 +157,7 @@ public abstract class ZKProcedureUtil
return ZNodePaths.joinZNode(controller.abortZnode, opInstanceName);
}
+ @Override
public ZKWatcher getWatcher() {
return watcher;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index e68a1ce..6783e7d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -455,7 +455,7 @@ public class MasterQuotaManager implements RegionStateListener {
}
private static class NamedLock<T> {
- private HashSet<T> locks = new HashSet<>();
+ private final HashSet<T> locks = new HashSet<>();
public void lock(final T name) throws InterruptedException {
synchronized (locks) {
@@ -501,6 +501,7 @@ public class MasterQuotaManager implements RegionStateListener {
return time;
}
+ @Override
public boolean equals(Object o) {
if (o instanceof SizeSnapshotWithTimestamp) {
SizeSnapshotWithTimestamp other = (SizeSnapshotWithTimestamp) o;
@@ -509,6 +510,7 @@ public class MasterQuotaManager implements RegionStateListener {
return false;
}
+ @Override
public int hashCode() {
HashCodeBuilder hcb = new HashCodeBuilder();
return hcb.append(size).append(time).toHashCode();
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
index 5e20ce9..869ead3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
@@ -386,7 +386,8 @@ public class QuotaObserverChore extends ScheduledChore {
for (TableName tableInNS : tablesByNamespace.get(namespace)) {
final SpaceQuotaSnapshot tableQuotaSnapshot =
tableSnapshotStore.getCurrentState(tableInNS);
- final boolean hasTableQuota = QuotaSnapshotStore.NO_QUOTA != tableQuotaSnapshot;
+ final boolean hasTableQuota =
+ !Objects.equals(QuotaSnapshotStore.NO_QUOTA, tableQuotaSnapshot);
if (hasTableQuota && tableQuotaSnapshot.getQuotaStatus().isInViolation()) {
// Table-level quota violation policy is being applied here.
if (LOG.isTraceEnabled()) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java
index 0c856b1..852d8a6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java
@@ -100,6 +100,7 @@ public abstract class RateLimiter {
this.avail = limit;
}
+ @Override
public String toString() {
String rateLimiter = this.getClass().getSimpleName();
if (getLimit() == Long.MAX_VALUE) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java
index 2d4414c..b0bdede 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java
@@ -53,7 +53,7 @@ public class RegionServerSpaceQuotaManager {
private SpaceQuotaRefresherChore spaceQuotaRefresher;
private AtomicReference<Map<TableName, SpaceQuotaSnapshot>> currentQuotaSnapshots;
private boolean started = false;
- private ConcurrentHashMap<TableName,SpaceViolationPolicyEnforcement> enforcedPolicies;
+ private final ConcurrentHashMap<TableName,SpaceViolationPolicyEnforcement> enforcedPolicies;
private SpaceViolationPolicyEnforcementFactory factory;
public RegionServerSpaceQuotaManager(RegionServerServices rsServices) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifierFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifierFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifierFactory.java
index 3fb7ad3..f19595f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifierFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifierFactory.java
@@ -54,8 +54,8 @@ public class SpaceQuotaSnapshotNotifierFactory {
.getClass(SNAPSHOT_NOTIFIER_KEY, SNAPSHOT_NOTIFIER_DEFAULT,
SpaceQuotaSnapshotNotifier.class);
try {
- return clz.newInstance();
- } catch (InstantiationException | IllegalAccessException e) {
+ return clz.getDeclaredConstructor().newInstance();
+ } catch (Exception e) {
throw new IllegalArgumentException("Failed to instantiate the implementation", e);
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
index 8a19908..6dbe0a8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
@@ -54,7 +54,7 @@ public abstract class AbstractMemStore implements MemStore {
// Used to track when to flush
private volatile long timeOfOldestEdit;
- public final static long FIXED_OVERHEAD = ClassSize.OBJECT
+ public final static long FIXED_OVERHEAD = (long) ClassSize.OBJECT
+ (4 * ClassSize.REFERENCE)
+ (2 * Bytes.SIZEOF_LONG); // snapshotId, timeOfOldestEdit
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AdaptiveMemStoreCompactionStrategy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AdaptiveMemStoreCompactionStrategy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AdaptiveMemStoreCompactionStrategy.java
index 232ffe3..9a866a1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AdaptiveMemStoreCompactionStrategy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AdaptiveMemStoreCompactionStrategy.java
@@ -101,10 +101,13 @@ public class AdaptiveMemStoreCompactionStrategy extends MemStoreCompactionStrate
public void resetStats() {
compactionProbability = initialCompactionProbability;
}
+
+ @Override
protected Action getMergingAction() {
return Action.MERGE_COUNT_UNIQUE_KEYS;
}
+ @Override
protected Action getFlattenAction() {
return Action.FLATTEN;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
index 523ccf2..bf9b191 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
@@ -82,7 +82,7 @@ public class CellChunkImmutableSegment extends ImmutableSegment {
@Override
protected long indexEntrySize() {
- return (ClassSize.CELL_CHUNK_MAP_ENTRY - KeyValue.FIXED_OVERHEAD);
+ return ((long) ClassSize.CELL_CHUNK_MAP_ENTRY - KeyValue.FIXED_OVERHEAD);
}
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java
index 6159385..a4fe883 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java
@@ -73,113 +73,140 @@ public class CellSet implements NavigableSet<Cell> {
return delegatee;
}
+ @Override
public Cell ceiling(Cell e) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
+ @Override
public Iterator<Cell> descendingIterator() {
return this.delegatee.descendingMap().values().iterator();
}
+ @Override
public NavigableSet<Cell> descendingSet() {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
+ @Override
public Cell floor(Cell e) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
+ @Override
public SortedSet<Cell> headSet(final Cell toElement) {
return headSet(toElement, false);
}
+ @Override
public NavigableSet<Cell> headSet(final Cell toElement,
boolean inclusive) {
return new CellSet(this.delegatee.headMap(toElement, inclusive), UNKNOWN_NUM_UNIQUES);
}
+ @Override
public Cell higher(Cell e) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
+ @Override
public Iterator<Cell> iterator() {
return this.delegatee.values().iterator();
}
+ @Override
public Cell lower(Cell e) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
+ @Override
public Cell pollFirst() {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
+ @Override
public Cell pollLast() {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
+ @Override
public SortedSet<Cell> subSet(Cell fromElement, Cell toElement) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
+ @Override
public NavigableSet<Cell> subSet(Cell fromElement,
boolean fromInclusive, Cell toElement, boolean toInclusive) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
+ @Override
public SortedSet<Cell> tailSet(Cell fromElement) {
return tailSet(fromElement, true);
}
+ @Override
public NavigableSet<Cell> tailSet(Cell fromElement, boolean inclusive) {
return new CellSet(this.delegatee.tailMap(fromElement, inclusive), UNKNOWN_NUM_UNIQUES);
}
+ @Override
public Comparator<? super Cell> comparator() {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
+ @Override
public Cell first() {
return this.delegatee.firstEntry().getValue();
}
+ @Override
public Cell last() {
return this.delegatee.lastEntry().getValue();
}
+ @Override
public boolean add(Cell e) {
return this.delegatee.put(e, e) == null;
}
+ @Override
public boolean addAll(Collection<? extends Cell> c) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
+ @Override
public void clear() {
this.delegatee.clear();
}
+ @Override
public boolean contains(Object o) {
//noinspection SuspiciousMethodCalls
return this.delegatee.containsKey(o);
}
+ @Override
public boolean containsAll(Collection<?> c) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
+ @Override
public boolean isEmpty() {
return this.delegatee.isEmpty();
}
+ @Override
public boolean remove(Object o) {
return this.delegatee.remove(o) != null;
}
+ @Override
public boolean removeAll(Collection<?> c) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
+ @Override
public boolean retainAll(Collection<?> c) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
@@ -188,14 +215,17 @@ public class CellSet implements NavigableSet<Cell> {
return this.delegatee.get(kv);
}
+ @Override
public int size() {
return this.delegatee.size();
}
+ @Override
public Object[] toArray() {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
+ @Override
public <T> T[] toArray(T[] a) {
throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index d874b2e..3cb4103 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -143,7 +143,7 @@ public class CompactingMemStore extends AbstractMemStore {
factor = conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY,
IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT);
}
- inmemoryFlushSize *= factor;
+ inmemoryFlushSize = (long) (inmemoryFlushSize * factor);
LOG.info("Setting in-memory flush size threshold to " + inmemoryFlushSize
+ " and immutable segments index to be of type " + indexType);
}
@@ -365,7 +365,7 @@ public class CompactingMemStore extends AbstractMemStore {
MutableSegment activeTmp = active;
List<? extends Segment> pipelineList = pipeline.getSegments();
List<? extends Segment> snapshotList = snapshot.getAllSegments();
- long order = 1 + pipelineList.size() + snapshotList.size();
+ long order = 1L + pipelineList.size() + snapshotList.size();
// The list of elements in pipeline + the active element + the snapshot segment
// The order is the Segment ordinal
List<KeyValueScanner> list = createList((int) order);
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
index 1aae068..8bd990a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
@@ -279,6 +279,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
/**
* Dumps all cells of the segment into the given log
*/
+ @Override
void dump(Logger log) {
for (ImmutableSegment s : segments) {
s.dump(log);
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
index 4539ed6..daae083 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
@@ -89,6 +89,7 @@ public class DateTieredStoreEngine extends StoreEngine<DefaultStoreFlusher,
super.forceSelect(request);
}
+ @Override
public List<Path> compact(ThroughputController throughputController, User user)
throws IOException {
if (request instanceof DateTieredCompactionRequest) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
index b3f0a44..26bf640 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
@@ -61,11 +61,9 @@ public class DefaultStoreFlusher extends StoreFlusher {
synchronized (flushLock) {
status.setStatus("Flushing " + store + ": creating writer");
// Write the map out to the disk
- writer = store.createWriterInTmp(cellsCount, store.getColumnFamilyDescriptor().getCompressionType(),
- /* isCompaction = */ false,
- /* includeMVCCReadpoint = */ true,
- /* includesTags = */ snapshot.isTagsPresent(),
- /* shouldDropBehind = */ false);
+ writer = store.createWriterInTmp(cellsCount,
+ store.getColumnFamilyDescriptor().getCompressionType(), false, true,
+ snapshot.isTagsPresent(), false);
IOException e = null;
try {
performFlush(scanner, writer, smallestReadPoint, throughputController);
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
index d56a1c2..740eb08 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
@@ -26,6 +26,7 @@ import java.util.Map;
import java.util.NavigableSet;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
@@ -82,15 +83,15 @@ public class HMobStore extends HStore {
private MobCacheConfig mobCacheConfig;
private Path homePath;
private Path mobFamilyPath;
- private volatile long cellsCountCompactedToMob = 0;
- private volatile long cellsCountCompactedFromMob = 0;
- private volatile long cellsSizeCompactedToMob = 0;
- private volatile long cellsSizeCompactedFromMob = 0;
- private volatile long mobFlushCount = 0;
- private volatile long mobFlushedCellsCount = 0;
- private volatile long mobFlushedCellsSize = 0;
- private volatile long mobScanCellsCount = 0;
- private volatile long mobScanCellsSize = 0;
+ private AtomicLong cellsCountCompactedToMob = new AtomicLong();
+ private AtomicLong cellsCountCompactedFromMob = new AtomicLong();
+ private AtomicLong cellsSizeCompactedToMob = new AtomicLong();
+ private AtomicLong cellsSizeCompactedFromMob = new AtomicLong();
+ private AtomicLong mobFlushCount = new AtomicLong();
+ private AtomicLong mobFlushedCellsCount = new AtomicLong();
+ private AtomicLong mobFlushedCellsSize = new AtomicLong();
+ private AtomicLong mobScanCellsCount = new AtomicLong();
+ private AtomicLong mobScanCellsSize = new AtomicLong();
private ColumnFamilyDescriptor family;
private Map<String, List<Path>> map = new ConcurrentHashMap<>();
private final IdLock keyLock = new IdLock();
@@ -453,76 +454,75 @@ public class HMobStore extends HStore {
}
public void updateCellsCountCompactedToMob(long count) {
- cellsCountCompactedToMob += count;
+ cellsCountCompactedToMob.addAndGet(count);
}
public long getCellsCountCompactedToMob() {
- return cellsCountCompactedToMob;
+ return cellsCountCompactedToMob.get();
}
public void updateCellsCountCompactedFromMob(long count) {
- cellsCountCompactedFromMob += count;
+ cellsCountCompactedFromMob.addAndGet(count);
}
public long getCellsCountCompactedFromMob() {
- return cellsCountCompactedFromMob;
+ return cellsCountCompactedFromMob.get();
}
public void updateCellsSizeCompactedToMob(long size) {
- cellsSizeCompactedToMob += size;
+ cellsSizeCompactedToMob.addAndGet(size);
}
public long getCellsSizeCompactedToMob() {
- return cellsSizeCompactedToMob;
+ return cellsSizeCompactedToMob.get();
}
public void updateCellsSizeCompactedFromMob(long size) {
- cellsSizeCompactedFromMob += size;
+ cellsSizeCompactedFromMob.addAndGet(size);
}
public long getCellsSizeCompactedFromMob() {
- return cellsSizeCompactedFromMob;
+ return cellsSizeCompactedFromMob.get();
}
- @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "VO_VOLATILE_INCREMENT")
public void updateMobFlushCount() {
- mobFlushCount++;
+ mobFlushCount.incrementAndGet();
}
public long getMobFlushCount() {
- return mobFlushCount;
+ return mobFlushCount.get();
}
public void updateMobFlushedCellsCount(long count) {
- mobFlushedCellsCount += count;
+ mobFlushedCellsCount.addAndGet(count);
}
public long getMobFlushedCellsCount() {
- return mobFlushedCellsCount;
+ return mobFlushedCellsCount.get();
}
public void updateMobFlushedCellsSize(long size) {
- mobFlushedCellsSize += size;
+ mobFlushedCellsSize.addAndGet(size);
}
public long getMobFlushedCellsSize() {
- return mobFlushedCellsSize;
+ return mobFlushedCellsSize.get();
}
public void updateMobScanCellsCount(long count) {
- mobScanCellsCount += count;
+ mobScanCellsCount.addAndGet(count);
}
public long getMobScanCellsCount() {
- return mobScanCellsCount;
+ return mobScanCellsCount.get();
}
public void updateMobScanCellsSize(long size) {
- mobScanCellsSize += size;
+ mobScanCellsSize.addAndGet(size);
}
public long getMobScanCellsSize() {
- return mobScanCellsSize;
+ return mobScanCellsSize.get();
}
public byte[] getRefCellTags() {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index e18c80e..c0ccc1d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -26,6 +26,7 @@ import java.io.IOException;
import java.io.InterruptedIOException;
import java.lang.reflect.Constructor;
import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
import java.text.ParseException;
import java.util.AbstractList;
import java.util.ArrayList;
@@ -1015,7 +1016,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
long storeMaxSequenceId = store.getMaxSequenceId().orElse(0L);
- maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(),
+ maxSeqIdInStores.put(Bytes.toBytes(store.getColumnFamilyName()),
storeMaxSequenceId);
if (maxSeqId == -1 || storeMaxSequenceId > maxSeqId) {
maxSeqId = storeMaxSequenceId;
@@ -5524,7 +5525,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
HStore store = this.stores.get(column);
if (store == null) {
throw new IllegalArgumentException(
- "No column family : " + new String(column) + " available");
+ "No column family : " + new String(column, StandardCharsets.UTF_8) + " available");
}
Collection<HStoreFile> storeFiles = store.getStorefiles();
if (storeFiles == null) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 37ec595..bd7b4a0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -904,7 +904,7 @@ public class HRegionServer extends HasThread implements
*/
private boolean isClusterUp() {
return this.masterless ||
- this.clusterStatusTracker != null && this.clusterStatusTracker.isClusterUp();
+ (this.clusterStatusTracker != null && this.clusterStatusTracker.isClusterUp());
}
/**
@@ -1745,7 +1745,7 @@ public class HRegionServer extends HasThread implements
if (r.shouldFlush(whyFlush)) {
FlushRequester requester = server.getFlushRequester();
if (requester != null) {
- long randomDelay = RandomUtils.nextInt(0, RANGE_OF_DELAY) + MIN_DELAY_TIME;
+ long randomDelay = (long) RandomUtils.nextInt(0, RANGE_OF_DELAY) + MIN_DELAY_TIME;
LOG.info(getName() + " requesting flush of " +
r.getRegionInfo().getRegionNameAsString() + " because " +
whyFlush.toString() +
@@ -3111,13 +3111,13 @@ public class HRegionServer extends HasThread implements
}
}
- final Boolean previous = this.regionsInTransitionInRS.putIfAbsent(encodedName.getBytes(),
+ final Boolean previous = this.regionsInTransitionInRS.putIfAbsent(Bytes.toBytes(encodedName),
Boolean.FALSE);
if (Boolean.TRUE.equals(previous)) {
LOG.info("Received CLOSE for the region:" + encodedName + " , which we are already " +
"trying to OPEN. Cancelling OPENING.");
- if (!regionsInTransitionInRS.replace(encodedName.getBytes(), previous, Boolean.FALSE)){
+ if (!regionsInTransitionInRS.replace(Bytes.toBytes(encodedName), previous, Boolean.FALSE)) {
// The replace failed. That should be an exceptional case, but theoretically it can happen.
// We're going to try to do a standard close then.
LOG.warn("The opening for region " + encodedName + " was done before we could cancel it." +
@@ -3140,7 +3140,7 @@ public class HRegionServer extends HasThread implements
if (actualRegion == null) {
LOG.debug("Received CLOSE for a region which is not online, and we're not opening.");
- this.regionsInTransitionInRS.remove(encodedName.getBytes());
+ this.regionsInTransitionInRS.remove(Bytes.toBytes(encodedName));
// The master deletes the znode when it receives this exception.
throw new NotServingRegionException("The region " + encodedName +
" is not online, and is not opening.");
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
index d3509c2..afd85f8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
@@ -44,6 +44,7 @@ public class HRegionServerCommandLine extends ServerCommandLine {
this.regionServerClass = clazz;
}
+ @Override
protected String getUsage() {
return USAGE;
}
@@ -73,6 +74,7 @@ public class HRegionServerCommandLine extends ServerCommandLine {
return 0;
}
+ @Override
public int run(String args[]) throws Exception {
if (args.length != 1) {
usage(null);
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 01121dd..f228d44 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -42,6 +42,7 @@ import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Predicate;
@@ -149,8 +150,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
volatile boolean forceMajor = false;
/* how many bytes to write between status checks */
static int closeCheckInterval = 0;
- private volatile long storeSize = 0L;
- private volatile long totalUncompressedBytes = 0L;
+ private AtomicLong storeSize = new AtomicLong();
+ private AtomicLong totalUncompressedBytes = new AtomicLong();
/**
* RWLock for store operations.
@@ -209,13 +210,13 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
private int compactionCheckMultiplier;
protected Encryption.Context cryptoContext = Encryption.Context.NONE;
- private volatile long flushedCellsCount = 0;
- private volatile long compactedCellsCount = 0;
- private volatile long majorCompactedCellsCount = 0;
- private volatile long flushedCellsSize = 0;
- private volatile long flushedOutputFileSize = 0;
- private volatile long compactedCellsSize = 0;
- private volatile long majorCompactedCellsSize = 0;
+ private AtomicLong flushedCellsCount = new AtomicLong();
+ private AtomicLong compactedCellsCount = new AtomicLong();
+ private AtomicLong majorCompactedCellsCount = new AtomicLong();
+ private AtomicLong flushedCellsSize = new AtomicLong();
+ private AtomicLong flushedOutputFileSize = new AtomicLong();
+ private AtomicLong compactedCellsSize = new AtomicLong();
+ private AtomicLong majorCompactedCellsSize = new AtomicLong();
/**
* Constructor
@@ -544,8 +545,9 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
HStoreFile storeFile = completionService.take().get();
if (storeFile != null) {
long length = storeFile.getReader().length();
- this.storeSize += length;
- this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes();
+ this.storeSize.addAndGet(length);
+ this.totalUncompressedBytes
+ .addAndGet(storeFile.getReader().getTotalUncompressedBytes());
LOG.debug("loaded {}", storeFile);
results.add(storeFile);
}
@@ -844,8 +846,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
private void bulkLoadHFile(HStoreFile sf) throws IOException {
StoreFileReader r = sf.getReader();
- this.storeSize += r.length();
- this.totalUncompressedBytes += r.getTotalUncompressedBytes();
+ this.storeSize.addAndGet(r.length());
+ this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes());
// Append the new storefile into the list
this.lock.writeLock().lock();
@@ -1021,8 +1023,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
HStoreFile sf = createStoreFileAndReader(dstPath);
StoreFileReader r = sf.getReader();
- this.storeSize += r.length();
- this.totalUncompressedBytes += r.getTotalUncompressedBytes();
+ this.storeSize.addAndGet(r.length());
+ this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes());
if (LOG.isInfoEnabled()) {
LOG.info("Added " + sf + ", entries=" + r.getEntries() +
@@ -1373,11 +1375,11 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
writeCompactionWalRecord(filesToCompact, sfs);
replaceStoreFiles(filesToCompact, sfs);
if (cr.isMajor()) {
- majorCompactedCellsCount += getCompactionProgress().totalCompactingKVs;
- majorCompactedCellsSize += getCompactionProgress().totalCompactedSize;
+ majorCompactedCellsCount.addAndGet(getCompactionProgress().totalCompactingKVs);
+ majorCompactedCellsSize.addAndGet(getCompactionProgress().totalCompactedSize);
} else {
- compactedCellsCount += getCompactionProgress().totalCompactingKVs;
- compactedCellsSize += getCompactionProgress().totalCompactedSize;
+ compactedCellsCount.addAndGet(getCompactionProgress().totalCompactingKVs);
+ compactedCellsSize.addAndGet(getCompactionProgress().totalCompactedSize);
}
long outputBytes = getTotalSize(sfs);
@@ -1478,7 +1480,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
}
}
message.append("total size for store is ")
- .append(StringUtils.TraditionalBinaryPrefix.long2String(storeSize, "", 1))
+ .append(StringUtils.TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1))
.append(". This selection was in queue for ")
.append(StringUtils.formatTimeDiff(compactionStartTime, cr.getSelectionTime()))
.append(", and took ").append(StringUtils.formatTimeDiff(now, compactionStartTime))
@@ -1772,7 +1774,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
completeCompaction(delSfs);
LOG.info("Completed removal of " + delSfs.size() + " unnecessary (expired) file(s) in "
+ this + " of " + this.getRegionInfo().getRegionNameAsString()
- + "; total size for store is " + TraditionalBinaryPrefix.long2String(storeSize, "", 1));
+ + "; total size for store is "
+ + TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1));
}
public void cancelRequestedCompaction(CompactionContext compaction) {
@@ -1826,16 +1829,16 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
@VisibleForTesting
protected void completeCompaction(Collection<HStoreFile> compactedFiles)
throws IOException {
- this.storeSize = 0L;
- this.totalUncompressedBytes = 0L;
+ this.storeSize.set(0L);
+ this.totalUncompressedBytes.set(0L);
for (HStoreFile hsf : this.storeEngine.getStoreFileManager().getStorefiles()) {
StoreFileReader r = hsf.getReader();
if (r == null) {
LOG.warn("StoreFile {} has a null Reader", hsf);
continue;
}
- this.storeSize += r.length();
- this.totalUncompressedBytes += r.getTotalUncompressedBytes();
+ this.storeSize.addAndGet(r.length());
+ this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes());
}
}
@@ -1896,7 +1899,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
@Override
public long getSize() {
- return storeSize;
+ return storeSize.get();
}
public void triggerMajorCompaction() {
@@ -2043,7 +2046,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
@Override
public long getStoreSizeUncompressed() {
- return this.totalUncompressedBytes;
+ return this.totalUncompressedBytes.get();
}
@Override
@@ -2235,9 +2238,9 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
committedFiles.add(sf.getPath());
}
- HStore.this.flushedCellsCount += cacheFlushCount;
- HStore.this.flushedCellsSize += cacheFlushSize;
- HStore.this.flushedOutputFileSize += outputFileSize;
+ HStore.this.flushedCellsCount.addAndGet(cacheFlushCount);
+ HStore.this.flushedCellsSize.addAndGet(cacheFlushSize);
+ HStore.this.flushedOutputFileSize.addAndGet(outputFileSize);
// Add new file to store files. Clear snapshot too while we have the Store write lock.
return HStore.this.updateStorefiles(storeFiles, snapshot.getId());
@@ -2270,8 +2273,9 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), file);
HStoreFile storeFile = createStoreFileAndReader(storeFileInfo);
storeFiles.add(storeFile);
- HStore.this.storeSize += storeFile.getReader().length();
- HStore.this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes();
+ HStore.this.storeSize.addAndGet(storeFile.getReader().length());
+ HStore.this.totalUncompressedBytes
+ .addAndGet(storeFile.getReader().getTotalUncompressedBytes());
if (LOG.isInfoEnabled()) {
LOG.info("Region: " + HStore.this.getRegionInfo().getEncodedName() +
" added " + storeFile + ", entries=" + storeFile.getReader().getEntries() +
@@ -2315,7 +2319,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
}
public static final long FIXED_OVERHEAD =
- ClassSize.align(ClassSize.OBJECT + (17 * ClassSize.REFERENCE) + (11 * Bytes.SIZEOF_LONG)
+ ClassSize.align(ClassSize.OBJECT + (26 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG)
+ (5 * Bytes.SIZEOF_INT) + (2 * Bytes.SIZEOF_BOOLEAN));
public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD
@@ -2354,37 +2358,37 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
@Override
public long getFlushedCellsCount() {
- return flushedCellsCount;
+ return flushedCellsCount.get();
}
@Override
public long getFlushedCellsSize() {
- return flushedCellsSize;
+ return flushedCellsSize.get();
}
@Override
public long getFlushedOutputFileSize() {
- return flushedOutputFileSize;
+ return flushedOutputFileSize.get();
}
@Override
public long getCompactedCellsCount() {
- return compactedCellsCount;
+ return compactedCellsCount.get();
}
@Override
public long getCompactedCellsSize() {
- return compactedCellsSize;
+ return compactedCellsSize.get();
}
@Override
public long getMajorCompactedCellsCount() {
- return majorCompactedCellsCount;
+ return majorCompactedCellsCount.get();
}
@Override
public long getMajorCompactedCellsSize() {
- return majorCompactedCellsSize;
+ return majorCompactedCellsSize.get();
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
index 19a63b4..21446d2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
@@ -95,7 +95,7 @@ public class IncreasingToUpperBoundRegionSplitPolicy extends ConstantSizeRegionS
}
}
- return foundABigStore | force;
+ return foundABigStore || force;
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java
index 1e71bc8..fe52758 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java
@@ -67,5 +67,6 @@ public interface InternalScanner extends Closeable {
* Closes the scanner and releases any resources it has allocated
* @throws IOException
*/
+ @Override
void close() throws IOException;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
index 779ed49..053ae99 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
@@ -104,6 +104,7 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner
}
}
+ @Override
public Cell peek() {
if (this.current == null) {
return null;
@@ -111,6 +112,7 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner
return this.current.peek();
}
+ @Override
public Cell next() throws IOException {
if(this.current == null) {
return null;
@@ -182,6 +184,8 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner
public KVScannerComparator(CellComparator kvComparator) {
this.kvComparator = kvComparator;
}
+
+ @Override
public int compare(KeyValueScanner left, KeyValueScanner right) {
int comparison = compare(left.peek(), right.peek());
if (comparison != 0) {
@@ -210,6 +214,7 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner
}
}
+ @Override
public void close() {
for (KeyValueScanner scanner : this.scannersForDelayedClose) {
scanner.close();
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
index 796f7c9..864cc06 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
@@ -84,6 +84,7 @@ public interface KeyValueScanner extends Shipper, Closeable {
/**
* Close the KeyValue scanner.
*/
+ @Override
void close();
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
index d564e40..a8c3362 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
@@ -167,8 +167,8 @@ public class MemStoreCompactor {
// Substitute the pipeline with one segment
if (!isInterrupted.get()) {
- if (resultSwapped = compactingMemStore.swapCompactedSegments(
- versionedList, result, merge)) {
+ resultSwapped = compactingMemStore.swapCompactedSegments(versionedList, result, merge);
+ if (resultSwapped) {
// update compaction strategy
strategy.updateStats(result);
// update the wal so it can be truncated and not get too long
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index fdee404..f7493b0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -26,6 +26,7 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.Set;
import java.util.SortedMap;
import java.util.concurrent.BlockingQueue;
@@ -714,8 +715,13 @@ class MemStoreFlusher implements FlushRequester {
}
@Override
+ public int hashCode() {
+ return System.identityHashCode(this);
+ }
+
+ @Override
public boolean equals(Object obj) {
- return (this == obj);
+ return Objects.equals(this, obj);
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java
index f43573e..02824ba 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java
@@ -76,6 +76,7 @@ public class MemStoreMergerSegmentsIterator extends MemStoreSegmentsIterator {
return null;
}
+ @Override
public void close() {
if (closed) {
return;
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
index eaaa4ae..09929e1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@ -788,7 +788,8 @@ class MetricsRegionServerWrapperImpl
OptionalDouble storeAvgStoreFileAge = store.getAvgStoreFileAge();
if (storeAvgStoreFileAge.isPresent()) {
- avgAgeNumerator += storeAvgStoreFileAge.getAsDouble() * storeHFiles;
+ avgAgeNumerator =
+ (long) (avgAgeNumerator + storeAvgStoreFileAge.getAsDouble() * storeHFiles);
}
tempStorefileIndexSize += store.getStorefilesRootLevelIndexSize();
@@ -931,6 +932,7 @@ class MetricsRegionServerWrapperImpl
return averageRegionSize;
}
+ @Override
public long getDataMissCount() {
if (this.cacheStats == null) {
return 0;
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
index 2aa1a82..533a05d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
@@ -249,7 +249,7 @@ public class MetricsRegionWrapperImpl implements MetricsRegionWrapper, Closeable
OptionalDouble storeAvgStoreFileAge = store.getAvgStoreFileAge();
if (storeAvgStoreFileAge.isPresent()) {
- avgAgeNumerator += storeAvgStoreFileAge.getAsDouble() * storeHFiles;
+ avgAgeNumerator += (long) storeAvgStoreFileAge.getAsDouble() * storeHFiles;
}
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
index 54095e0..0c3551b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
@@ -74,8 +74,12 @@ public class MultiVersionConcurrencyControl {
public void advanceTo(long newStartPoint) {
while (true) {
long seqId = this.getWritePoint();
- if (seqId >= newStartPoint) break;
- if (this.tryAdvanceTo(/* newSeqId = */ newStartPoint, /* expected = */ seqId)) break;
+ if (seqId >= newStartPoint) {
+ break;
+ }
+ if (this.tryAdvanceTo(newStartPoint, seqId)) {
+ break;
+ }
}
}
@@ -239,6 +243,7 @@ public class MultiVersionConcurrencyControl {
}
@VisibleForTesting
+ @Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("readPoint", readPoint)
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index b6c0ebe..5a01581 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -2807,7 +2807,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
private static final long serialVersionUID = -4305297078988180130L;
@Override
- public Throwable fillInStackTrace() {
+ public synchronized Throwable fillInStackTrace() {
return this;
}
};
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java
index dc1708c..1986668 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java
@@ -279,6 +279,7 @@ public class RegionServerCoprocessorHost extends
* @return An instance of RegionServerServices, an object NOT for general user-space Coprocessor
* consumption.
*/
+ @Override
public RegionServerServices getRegionServerServices() {
return this.regionServerServices;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
index 017e0fb..6b2267f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
@@ -435,8 +435,8 @@ public class ScannerContext {
TIME_LIMIT_REACHED_MID_ROW(true, true),
BATCH_LIMIT_REACHED(true, true);
- private boolean moreValues;
- private boolean limitReached;
+ private final boolean moreValues;
+ private final boolean limitReached;
private NextState(boolean moreValues, boolean limitReached) {
this.moreValues = moreValues;
@@ -492,13 +492,13 @@ public class ScannerContext {
* limits, the checker must know their own scope (i.e. are they checking the limits between
* rows, between cells, etc...)
*/
- int depth;
+ final int depth;
LimitScope(int depth) {
this.depth = depth;
}
- int depth() {
+ final int depth() {
return depth;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java
index b67b54e..0b1d251 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java
@@ -97,7 +97,7 @@ public class ServerNonceManager {
}
public boolean isExpired(long minRelevantTime) {
- return getActivityTime() < (minRelevantTime & (~0l >>> 3));
+ return getActivityTime() < (minRelevantTime & (~0L >>> 3));
}
public void setMvcc(long mvcc) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.java
index 4f3e0f2..9753080 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.java
@@ -24,6 +24,7 @@ public class SteppingSplitPolicy extends IncreasingToUpperBoundRegionSplitPolicy
* This allows a table to spread quickly across servers, while avoiding creating
* too many regions.
*/
+ @Override
protected long getSizeToCheck(final int tableRegionsCount) {
return tableRegionsCount == 1 ? this.initialSize : getDesiredMaxFileSize();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index 89b2acd..80d0ad7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -187,14 +187,17 @@ public class StoreFileScanner implements KeyValueScanner {
return scanners;
}
+ @Override
public String toString() {
return "StoreFileScanner[" + hfs.toString() + ", cur=" + cur + "]";
}
+ @Override
public Cell peek() {
return cur;
}
+ @Override
public Cell next() throws IOException {
Cell retKey = cur;
@@ -215,6 +218,7 @@ public class StoreFileScanner implements KeyValueScanner {
return retKey;
}
+ @Override
public boolean seek(Cell key) throws IOException {
if (seekCount != null) seekCount.increment();
@@ -242,6 +246,7 @@ public class StoreFileScanner implements KeyValueScanner {
}
}
+ @Override
public boolean reseek(Cell key) throws IOException {
if (seekCount != null) seekCount.increment();
@@ -298,6 +303,7 @@ public class StoreFileScanner implements KeyValueScanner {
return true;
}
+ @Override
public void close() {
if (closed) return;
cur = null;
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java
index 595231f..59b91d5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java
@@ -104,12 +104,8 @@ public class StripeStoreFlusher extends StoreFlusher {
return new StripeMultiFileWriter.WriterFactory() {
@Override
public StoreFileWriter createWriter() throws IOException {
- StoreFileWriter writer = store.createWriterInTmp(
- kvCount, store.getColumnFamilyDescriptor().getCompressionType(),
- /* isCompaction = */ false,
- /* includeMVCCReadpoint = */ true,
- /* includesTags = */ true,
- /* shouldDropBehind = */ false);
+ StoreFileWriter writer = store.createWriterInTmp(kvCount,
+ store.getColumnFamilyDescriptor().getCompressionType(), false, true, true, false);
return writer;
}
};
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
index ed4a025..056f076 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
import org.apache.hadoop.hbase.regionserver.CellSink;
-import org.apache.hadoop.hbase.regionserver.CustomizedScanInfoBuilder;
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
@@ -261,10 +260,8 @@ public abstract class Compactor<T extends CellSink> {
throws IOException {
// When all MVCC readpoints are 0, don't write them.
// See HBASE-8166, HBASE-12600, and HBASE-13389.
- return store.createWriterInTmp(fd.maxKeyCount, this.compactionCompression,
- /* isCompaction = */true,
- /* includeMVCCReadpoint = */fd.maxMVCCReadpoint > 0,
- /* includesTags = */fd.maxTagsLength > 0, shouldDropBehind);
+ return store.createWriterInTmp(fd.maxKeyCount, this.compactionCompression, true,
+ fd.maxMVCCReadpoint > 0, fd.maxTagsLength > 0, shouldDropBehind);
}
private ScanInfo preCompactScannerOpen(CompactionRequestImpl request, ScanType scanType,
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
index 905562c..cf04d00 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
@@ -108,6 +108,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy {
}
}
+ @Override
public boolean shouldPerformMajorCompaction(Collection<HStoreFile> filesToCompact)
throws IOException {
long mcTime = getNextMajorCompactTime(filesToCompact);
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
index a6ea9b2..e0be6cf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
@@ -209,6 +209,7 @@ public class RatioBasedCompactionPolicy extends SortedCompactionPolicy {
* @param filesCompacting files being scheduled to compact.
* @return true to schedule a request.
*/
+ @Override
public boolean needsCompaction(Collection<HStoreFile> storeFiles,
List<HStoreFile> filesCompacting) {
int numCandidates = storeFiles.size() - filesCompacting.size();
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java
index 4f6aba9..3eb830a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java
@@ -106,6 +106,7 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy {
* @param filesToCompact Files to compact. Can be null.
* @return True if we should run a major compaction.
*/
+ @Override
public abstract boolean shouldPerformMajorCompaction(Collection<HStoreFile> filesToCompact)
throws IOException;
@@ -154,6 +155,7 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy {
* @param compactionSize Total size of some compaction
* @return whether this should be a large or small compaction
*/
+ @Override
public boolean throttleCompaction(long compactionSize) {
return compactionSize > comConf.getThrottlePoint();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java
index 85394fd..c0f13c0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java
@@ -89,10 +89,12 @@ public class ExplicitColumnTracker implements ColumnTracker {
/**
* Done when there are no more columns to match against.
*/
+ @Override
public boolean done() {
return this.index >= columns.length;
}
+ @Override
public ColumnCount getColumnHint() {
return this.column;
}
@@ -182,6 +184,7 @@ public class ExplicitColumnTracker implements ColumnTracker {
}
// Called between every row.
+ @Override
public void reset() {
this.index = 0;
this.column = this.columns[this.index];
@@ -240,6 +243,7 @@ public class ExplicitColumnTracker implements ColumnTracker {
}
}
+ @Override
public boolean isDone(long timestamp) {
return minVersions <= 0 && isExpired(timestamp);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java
index 419e93b..f2ad1e6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java
@@ -180,6 +180,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker {
* scanner).
* @return The column count.
*/
+ @Override
public ColumnCount getColumnHint() {
return null;
}
@@ -205,6 +206,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker {
}
}
+ @Override
public boolean isDone(long timestamp) {
return minVersions <= 0 && isExpired(timestamp);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index 0ace782..faf3b77 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -346,7 +346,7 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
long currentHighestProcessedAppendTxid = highestProcessedAppendTxid;
highestProcessedAppendTxidAtLastSync = currentHighestProcessedAppendTxid;
final long startTimeNs = System.nanoTime();
- final long epoch = epochAndState >>> 2;
+ final long epoch = (long) epochAndState >>> 2L;
writer.sync().whenCompleteAsync((result, error) -> {
if (error != null) {
syncFailed(epoch, error);