You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2017/11/18 01:22:38 UTC
[10/30] hbase git commit: HBASE-19239 Fix findbugs and error-prone
issues
HBASE-19239 Fix findbugs and error-prone issues
Fixes for hbase-server
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/108ea30e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/108ea30e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/108ea30e
Branch: refs/heads/branch-1.4
Commit: 108ea30e3d84150b64fd644747c2f17170594704
Parents: daf1fc6
Author: Andrew Purtell <ap...@apache.org>
Authored: Wed Nov 15 18:47:45 2017 -0800
Committer: Andrew Purtell <ap...@apache.org>
Committed: Fri Nov 17 17:12:10 2017 -0800
----------------------------------------------------------------------
.../hadoop/hbase/conf/ConfigurationManager.java | 2 +-
.../hbase/coprocessor/CoprocessorHost.java | 8 +-
.../org/apache/hadoop/hbase/fs/HFileSystem.java | 2 +
.../hbase/http/SslSocketConnectorSecure.java | 1 +
.../hbase/io/FSDataInputStreamWrapper.java | 2 +-
.../org/apache/hadoop/hbase/io/Reference.java | 1 +
.../hbase/io/hfile/AbstractHFileReader.java | 3 +
.../hadoop/hbase/io/hfile/BlockCache.java | 1 +
.../hadoop/hbase/io/hfile/FixedFileTrailer.java | 9 +-
.../org/apache/hadoop/hbase/io/hfile/HFile.java | 20 +++++
.../hadoop/hbase/io/hfile/HFileBlock.java | 18 ++++
.../hadoop/hbase/io/hfile/HFileWriterV3.java | 1 +
.../hadoop/hbase/io/hfile/LruBlockCache.java | 10 ++-
.../hadoop/hbase/io/hfile/LruCachedBlock.java | 1 +
.../hbase/io/hfile/bucket/BucketAllocator.java | 3 +-
.../hbase/io/hfile/bucket/BucketCache.java | 46 +++++++++-
.../hbase/io/hfile/bucket/CachedEntryQueue.java | 2 +-
.../apache/hadoop/hbase/ipc/BufferChain.java | 2 +-
.../org/apache/hadoop/hbase/ipc/RpcServer.java | 10 +--
.../hadoop/hbase/mapred/GroupingTableMap.java | 1 +
.../hadoop/hbase/mapred/TableInputFormat.java | 1 +
.../hbase/mapred/TableInputFormatBase.java | 2 +
.../hadoop/hbase/mapred/TableRecordReader.java | 6 ++
.../hadoop/hbase/mapreduce/CellCounter.java | 9 +-
.../hbase/mapreduce/HFileOutputFormat2.java | 6 +-
.../hbase/mapreduce/IdentityTableMapper.java | 1 +
.../mapreduce/MultithreadedTableMapper.java | 10 ++-
.../hbase/master/ActiveMasterManager.java | 2 +
.../hadoop/hbase/master/AssignmentManager.java | 10 ++-
.../master/AssignmentVerificationReport.java | 6 +-
.../hbase/master/ClusterStatusPublisher.java | 9 +-
.../apache/hadoop/hbase/master/DeadServer.java | 1 +
.../org/apache/hadoop/hbase/master/HMaster.java | 3 +
.../hadoop/hbase/master/HMasterCommandLine.java | 3 +-
.../hadoop/hbase/master/LoadBalancer.java | 2 +-
.../hadoop/hbase/master/MasterRpcServices.java | 2 +-
.../hadoop/hbase/master/ServerManager.java | 3 +-
.../hadoop/hbase/master/SplitLogManager.java | 2 +-
.../hadoop/hbase/master/TableLockManager.java | 2 +
.../master/balancer/RegionLocationFinder.java | 2 +-
.../master/balancer/StochasticLoadBalancer.java | 6 +-
.../hbase/master/cleaner/CleanerChore.java | 4 +-
.../hbase/master/handler/TableEventHandler.java | 1 +
.../procedure/MasterProcedureScheduler.java | 7 ++
.../master/snapshot/RestoreSnapshotHandler.java | 1 -
.../master/snapshot/TakeSnapshotHandler.java | 1 +
.../hadoop/hbase/procedure/Procedure.java | 2 +-
.../hbase/procedure/ProcedureManagerHost.java | 6 +-
.../hadoop/hbase/procedure/Subprocedure.java | 1 +
.../hadoop/hbase/quotas/MasterQuotaManager.java | 4 +-
.../apache/hadoop/hbase/quotas/QuotaCache.java | 4 +-
.../hbase/regionserver/CompactSplitThread.java | 1 +
.../hadoop/hbase/regionserver/HRegion.java | 8 +-
.../hbase/regionserver/HRegionServer.java | 13 ++-
.../hadoop/hbase/regionserver/HStore.java | 82 +++++++++---------
...IncreasingToUpperBoundRegionSplitPolicy.java | 2 +-
.../hbase/regionserver/InternalScanner.java | 1 +
.../hadoop/hbase/regionserver/KeyValueHeap.java | 5 ++
.../hadoop/hbase/regionserver/Leases.java | 2 +
.../hadoop/hbase/regionserver/LogRoller.java | 6 +-
.../hbase/regionserver/MemStoreFlusher.java | 8 +-
.../MultiVersionConcurrencyControl.java | 3 +-
.../hbase/regionserver/RSRpcServices.java | 12 +--
.../regionserver/RegionCoprocessorHost.java | 12 +--
.../hbase/regionserver/ScannerContext.java | 6 +-
.../hbase/regionserver/SplitLogWorker.java | 6 +-
.../regionserver/SplitTransactionImpl.java | 2 +
.../hadoop/hbase/regionserver/StoreFile.java | 1 +
.../hbase/regionserver/StoreFileInfo.java | 72 +++++++++-------
.../hbase/regionserver/StoreFileScanner.java | 6 ++
.../hbase/regionserver/TimeRangeTracker.java | 2 +
.../compactions/CompactionRequest.java | 12 ++-
.../compactions/RatioBasedCompactionPolicy.java | 1 +
.../compactions/SortedCompactionPolicy.java | 2 +
.../regionserver/handler/OpenRegionHandler.java | 2 +
.../hadoop/hbase/regionserver/wal/FSHLog.java | 2 +-
.../hbase/regionserver/wal/FSWALEntry.java | 1 +
.../regionserver/wal/ProtobufLogReader.java | 6 +-
.../hbase/regionserver/wal/RingBufferTruck.java | 1 +
.../regionserver/ReplicationSource.java | 2 +-
.../token/AuthenticationTokenSecretManager.java | 1 +
.../security/visibility/ExpressionParser.java | 4 +-
.../expression/LeafExpressionNode.java | 1 +
.../expression/NonLeafExpressionNode.java | 1 +
.../visibility/expression/Operator.java | 3 +-
.../hadoop/hbase/snapshot/ExportSnapshot.java | 1 +
.../hbase/snapshot/RestoreSnapshotHelper.java | 2 +-
.../hadoop/hbase/util/FSTableDescriptors.java | 2 +-
.../org/apache/hadoop/hbase/util/FSUtils.java | 3 +-
.../org/apache/hadoop/hbase/util/HBaseFsck.java | 14 ++--
.../hadoop/hbase/util/IdReadWriteLock.java | 3 +
.../org/apache/hadoop/hbase/util/JSONBean.java | 6 +-
.../hadoop/hbase/util/RegionSplitter.java | 2 +
.../hadoop/hbase/wal/DefaultWALProvider.java | 5 +-
.../java/org/apache/hadoop/hbase/wal/WAL.java | 1 +
.../org/apache/hadoop/hbase/wal/WALFactory.java | 17 ++--
.../org/apache/hadoop/hbase/wal/WALKey.java | 4 +-
.../apache/hadoop/hbase/wal/WALSplitter.java | 30 +++----
.../hbase/zookeeper/MiniZooKeeperCluster.java | 7 +-
.../hbase/zookeeper/RegionServerTracker.java | 2 +-
.../hbase-webapps/master/snapshotsStats.jsp | 48 ++++++-----
.../resources/hbase-webapps/master/table.jsp | 16 +++-
.../hbase-webapps/regionserver/region.jsp | 2 +-
.../apache/hadoop/hbase/GenericTestUtils.java | 2 +-
.../hadoop/hbase/PerformanceEvaluation.java | 2 +-
.../org/apache/hadoop/hbase/TestIOFencing.java | 19 +++--
.../apache/hadoop/hbase/client/TestAdmin1.java | 30 +------
.../apache/hadoop/hbase/client/TestAdmin2.java | 2 +-
.../hadoop/hbase/client/TestFastFail.java | 12 ++-
.../org/apache/hadoop/hbase/client/TestHCM.java | 9 +-
.../client/TestHTableMultiplexerFlushCache.java | 2 +-
.../hadoop/hbase/client/TestMultiParallel.java | 13 +--
.../hbase/client/TestMvccConsistentScanner.java | 2 +
.../coprocessor/TestAggregateProtocol.java | 3 +-
...TestMasterCoprocessorExceptionWithAbort.java | 3 +
...estMasterCoprocessorExceptionWithRemove.java | 4 +-
...gionServerCoprocessorExceptionWithAbort.java | 13 +--
...ionServerCoprocessorExceptionWithRemove.java | 4 +-
.../hbase/executor/TestExecutorService.java | 2 +
.../hbase/http/HttpServerFunctionalTest.java | 15 ++--
.../io/encoding/TestPrefixTreeEncoding.java | 2 +-
.../hbase/io/hfile/TestHFileBlockIndex.java | 4 +-
.../hbase/io/hfile/bucket/TestBucketCache.java | 13 ++-
.../TestMultiTableSnapshotInputFormat.java | 1 +
.../TestMultiTableSnapshotInputFormatImpl.java | 37 ++++++--
.../mapreduce/TestMultithreadedTableMapper.java | 2 +-
.../mapreduce/TestTableSnapshotInputFormat.java | 22 +++--
.../hbase/mapreduce/TestWALRecordReader.java | 1 -
.../TestCatalogJanitorInMemoryStates.java | 3 +-
.../master/TestDistributedLogSplitting.java | 9 +-
.../hadoop/hbase/master/TestMasterFailover.java | 3 +
.../hbase/master/TestRegionPlacement.java | 7 +-
.../balancer/TestDefaultLoadBalancer.java | 4 +-
.../TestFavoredNodeAssignmentHelper.java | 7 +-
.../procedure/TestCreateTableProcedure2.java | 2 +-
.../master/snapshot/TestSnapshotFileCache.java | 38 +++++----
.../procedure/SimpleMasterProcedureManager.java | 2 +-
.../regionserver/DataBlockEncodingTool.java | 5 +-
.../hbase/regionserver/TestCompaction.java | 1 +
.../TestCompactionArchiveConcurrentClose.java | 2 +
.../hbase/regionserver/TestFSErrorsExposed.java | 5 +-
.../hadoop/hbase/regionserver/TestHRegion.java | 14 ++--
.../hbase/regionserver/TestJoinedScanners.java | 2 +-
.../TestMultiVersionConcurrencyControl.java | 1 -
.../TestRegionMergeTransaction.java | 2 +-
.../regionserver/TestRegionServerHostname.java | 1 +
.../TestRegionServerReportForDuty.java | 3 +-
.../TestScannerRetriableFailure.java | 1 -
.../regionserver/TestSplitTransaction.java | 40 +++++----
.../hbase/regionserver/TestStoreFile.java | 3 -
.../TestStripeStoreFileManager.java | 7 +-
.../TestCompactedHFilesDischarger.java | 6 +-
.../hbase/regionserver/wal/TestFSHLog.java | 5 +-
.../hbase/regionserver/wal/TestWALReplay.java | 3 +-
.../TestReplicationSourceManager.java | 2 +
.../hbase/security/token/SecureTestCluster.java | 2 +-
.../visibility/TestVisibilityLabels.java | 1 +
.../hbase/snapshot/TestExportSnapshot.java | 1 -
.../hbase/util/LauncherSecurityManager.java | 5 ++
.../hadoop/hbase/util/MultiThreadedAction.java | 10 ++-
.../hadoop/hbase/util/MultiThreadedUpdater.java | 11 +--
.../hbase/util/MultiThreadedUpdaterWithACL.java | 1 -
.../hadoop/hbase/util/MultiThreadedWriter.java | 3 +-
.../util/ProcessBasedLocalHBaseCluster.java | 13 +--
.../hadoop/hbase/util/TestEncryptionTest.java | 7 +-
.../apache/hadoop/hbase/util/TestHBaseFsck.java | 4 +-
.../hbase/wal/TestDefaultWALProvider.java | 2 -
.../hadoop/hbase/wal/TestWALFiltering.java | 5 +-
.../hbase/wal/WALPerformanceEvaluation.java | 88 ++++++++++----------
169 files changed, 782 insertions(+), 484 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java
index 1701588..1e9bc8b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java
@@ -79,7 +79,7 @@ public class ConfigurationManager {
// notified when the configuration is reloaded from disk. This is a set
// constructed from a WeakHashMap, whose entries would be removed if the
// observer classes go out of scope.
- private Set<ConfigurationObserver> configurationObservers =
+ private final Set<ConfigurationObserver> configurationObservers =
Collections.newSetFromMap(new WeakHashMap<ConfigurationObserver,
Boolean>());
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
index f2b201b..275218f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
@@ -263,11 +263,9 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
Coprocessor impl;
Object o = null;
try {
- o = implClass.newInstance();
+ o = implClass.getDeclaredConstructor().newInstance();
impl = (Coprocessor)o;
- } catch (InstantiationException e) {
- throw new IOException(e);
- } catch (IllegalAccessException e) {
+ } catch (Exception e) {
throw new IOException(e);
}
// create the environment
@@ -402,7 +400,7 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
/** Current coprocessor state */
Coprocessor.State state = Coprocessor.State.UNINSTALLED;
/** Accounting for tables opened by the coprocessor */
- protected List<HTableInterface> openTables =
+ protected final List<HTableInterface> openTables =
Collections.synchronizedList(new ArrayList<HTableInterface>());
private int seq;
private Configuration conf;
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
index daa5580..754ea65 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
@@ -295,6 +295,7 @@ public class HFileSystem extends FilterFileSystem {
(cp.getClass().getClassLoader(),
new Class[]{ClientProtocol.class, Closeable.class},
new InvocationHandler() {
+ @Override
public Object invoke(Object proxy, Method method,
Object[] args) throws Throwable {
try {
@@ -354,6 +355,7 @@ public class HFileSystem extends FilterFileSystem {
* datanode is actually dead, so if we use it it will timeout.
*/
static class ReorderWALBlocks implements ReorderBlocks {
+ @Override
public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src)
throws IOException {
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/http/SslSocketConnectorSecure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/SslSocketConnectorSecure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/SslSocketConnectorSecure.java
index 2d78a3f..8b1e375 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/SslSocketConnectorSecure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/SslSocketConnectorSecure.java
@@ -41,6 +41,7 @@ public class SslSocketConnectorSecure extends SslSocketConnector {
* Create a new ServerSocket that will not accept SSLv3 connections,
* but will accept TLSv1.x connections.
*/
+ @Override
protected ServerSocket newServerSocket(String host, int port,int backlog)
throws IOException {
SSLServerSocket socket = (SSLServerSocket)
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
index ad749f3..6ad5ced 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
@@ -67,7 +67,7 @@ public class FSDataInputStreamWrapper {
*/
private volatile FSDataInputStream stream = null;
private volatile FSDataInputStream streamNoFsChecksum = null;
- private Object streamNoFsChecksumFirstCreateLock = new Object();
+ private final Object streamNoFsChecksumFirstCreateLock = new Object();
// The configuration states that we should validate hbase checksums
private boolean useHBaseChecksumConfigured;
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java
index a38e3c1..a777085 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java
@@ -222,6 +222,7 @@ public class Reference {
return Arrays.hashCode(splitkey) + region.hashCode();
}
+ @Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null) return false;
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
index 5039427..6db0817 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
@@ -228,6 +228,7 @@ public abstract class AbstractHFileReader
* @return the total heap size of data and meta block indexes in bytes. Does
* not take into account non-root blocks of a multilevel data index.
*/
+ @Override
public long indexSize() {
return (dataBlockIndexReader != null ? dataBlockIndexReader.heapSize() : 0)
+ ((metaBlockIndexReader != null) ? metaBlockIndexReader.heapSize()
@@ -259,6 +260,7 @@ public abstract class AbstractHFileReader
this.isPrimaryReplicaReader = isPrimaryReplicaReader;
}
+ @Override
public FileInfo loadFileInfo() throws IOException {
return fileInfo;
}
@@ -345,6 +347,7 @@ public abstract class AbstractHFileReader
/** For testing */
abstract HFileBlock.FSReader getUncachedBlockReader();
+ @Override
public Path getPath() {
return path;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
index 2c2b039..6920693 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
@@ -129,6 +129,7 @@ public interface BlockCache extends Iterable<CachedBlock> {
/**
* @return Iterator over the blocks in the cache.
*/
+ @Override
Iterator<CachedBlock> iterator();
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
index 2a3a47d..09a662a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
@@ -539,7 +539,7 @@ public class FixedFileTrailer {
public void setComparatorClass(Class<? extends KVComparator> klass) {
// Is the comparator instantiable?
try {
- KVComparator comp = klass.newInstance();
+ KVComparator comp = klass.getDeclaredConstructor().newInstance();
// HFile V2 legacy comparator class names.
if (KeyValue.COMPARATOR.getClass().equals(klass)) {
@@ -592,11 +592,8 @@ public class FixedFileTrailer {
public static KVComparator createComparator(
String comparatorClassName) throws IOException {
try {
- return getComparatorClass(comparatorClassName).newInstance();
- } catch (InstantiationException e) {
- throw new IOException("Comparator class " + comparatorClassName +
- " is not instantiable", e);
- } catch (IllegalAccessException e) {
+ return getComparatorClass(comparatorClassName).getDeclaredConstructor().newInstance();
+ } catch (Exception e) {
throw new IOException("Comparator class " + comparatorClassName +
" is not instantiable", e);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 1f96b25..1600112 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -674,82 +674,102 @@ public class HFile {
return this;
}
+ @Override
public void clear() {
this.map.clear();
}
+ @Override
public Comparator<? super byte[]> comparator() {
return map.comparator();
}
+ @Override
public boolean containsKey(Object key) {
return map.containsKey(key);
}
+ @Override
public boolean containsValue(Object value) {
return map.containsValue(value);
}
+ @Override
public Set<java.util.Map.Entry<byte[], byte[]>> entrySet() {
return map.entrySet();
}
+ @Override
public boolean equals(Object o) {
return map.equals(o);
}
+ @Override
public byte[] firstKey() {
return map.firstKey();
}
+ @Override
public byte[] get(Object key) {
return map.get(key);
}
+ @Override
public int hashCode() {
return map.hashCode();
}
+ @Override
public SortedMap<byte[], byte[]> headMap(byte[] toKey) {
return this.map.headMap(toKey);
}
+ @Override
public boolean isEmpty() {
return map.isEmpty();
}
+ @Override
public Set<byte[]> keySet() {
return map.keySet();
}
+ @Override
public byte[] lastKey() {
return map.lastKey();
}
+ @Override
public byte[] put(byte[] key, byte[] value) {
return this.map.put(key, value);
}
+ @Override
public void putAll(Map<? extends byte[], ? extends byte[]> m) {
this.map.putAll(m);
}
+ @Override
public byte[] remove(Object key) {
return this.map.remove(key);
}
+ @Override
public int size() {
return map.size();
}
+ @Override
public SortedMap<byte[], byte[]> subMap(byte[] fromKey, byte[] toKey) {
return this.map.subMap(fromKey, toKey);
}
+ @Override
public SortedMap<byte[], byte[]> tailMap(byte[] fromKey) {
return this.map.tailMap(fromKey);
}
+ @Override
public Collection<byte[]> values() {
return map.values();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index d98a7b5..b46a586 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -224,6 +224,7 @@ public class HFileBlock implements Cacheable {
*/
static final CacheableDeserializer<Cacheable> BLOCK_DESERIALIZER =
new CacheableDeserializer<Cacheable>() {
+ @Override
public HFileBlock deserialize(ByteBuffer buf, boolean reuse) throws IOException{
// The buf has the file block followed by block metadata.
// Set limit to just before the BLOCK_METADATA_SPACE then rewind.
@@ -411,6 +412,7 @@ public class HFileBlock implements Cacheable {
return nextBlockOnDiskSize;
}
+ @Override
public BlockType getBlockType() {
return blockType;
}
@@ -1931,6 +1933,22 @@ public class HFileBlock implements Cacheable {
}
@Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((blockType == null) ? 0 : blockType.hashCode());
+ result = prime * result + ((buf == null) ? 0 : buf.hashCode());
+ result = prime * result + ((fileContext == null) ? 0 : fileContext.hashCode());
+ result = prime * result + nextBlockOnDiskSize;
+ result = prime * result + (int) (offset ^ (offset >>> 32));
+ result = prime * result + onDiskDataSizeWithHeader;
+ result = prime * result + onDiskSizeWithoutHeader;
+ result = prime * result + (int) (prevBlockOffset ^ (prevBlockOffset >>> 32));
+ result = prime * result + uncompressedSizeWithoutHeader;
+ return result;
+ }
+
+ @Override
public boolean equals(Object comparison) {
if (this == comparison) {
return true;
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java
index 086395c..40ad108 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java
@@ -91,6 +91,7 @@ public class HFileWriterV3 extends HFileWriterV2 {
}
}
+ @Override
protected void finishFileInfo() throws IOException {
super.finishFileInfo();
if (hFileContext.getDataBlockEncoding() == DataBlockEncoding.PREFIX_TREE) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index 69fea76..1d17a7d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -439,6 +439,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
* @param cacheKey block's cache key
* @param buf block buffer
*/
+ @Override
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
cacheBlock(cacheKey, buf, false, false);
}
@@ -488,7 +489,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
// Promote this to L1.
if (result != null && caching) {
- cacheBlock(cacheKey, result, /* inMemory = */ false, /* cacheData = */ true);
+ cacheBlock(cacheKey, result, false, true);
}
return result;
}
@@ -790,6 +791,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
return totalSize;
}
+ @Override
public int compareTo(BlockBucket that) {
return Long.compare(this.overflow(), that.overflow());
}
@@ -963,6 +965,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
* <p>Includes: total accesses, hits, misses, evicted blocks, and runs
* of the eviction processes.
*/
+ @Override
public CacheStats getStats() {
return this.stats;
}
@@ -1078,19 +1081,24 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
long acceptableSize() {
return (long)Math.floor(this.maxSize * this.acceptableFactor);
}
+
private long minSize() {
return (long)Math.floor(this.maxSize * this.minFactor);
}
+
private long singleSize() {
return (long)Math.floor(this.maxSize * this.singleFactor * this.minFactor);
}
+
private long multiSize() {
return (long)Math.floor(this.maxSize * this.multiFactor * this.minFactor);
}
+
private long memorySize() {
return (long)Math.floor(this.maxSize * this.memoryFactor * this.minFactor);
}
+ @Override
public void shutdown() {
if (victimHandler != null)
victimHandler.shutdown();
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java
index 0828563..2af6bdd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java
@@ -90,6 +90,7 @@ public class LruCachedBlock implements HeapSize, Comparable<LruCachedBlock> {
return this.cachedTime;
}
+ @Override
public long heapSize() {
return size;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
index a6c84d0..ac7c4e7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
@@ -315,7 +315,7 @@ public final class BucketAllocator {
this.bucketSizes = bucketSizes == null ? DEFAULT_BUCKET_SIZES : bucketSizes;
Arrays.sort(this.bucketSizes);
this.bigItemSize = Ints.max(this.bucketSizes);
- this.bucketCapacity = FEWEST_ITEMS_IN_BUCKET * bigItemSize;
+ this.bucketCapacity = (long) FEWEST_ITEMS_IN_BUCKET * bigItemSize;
buckets = new Bucket[(int) (availableSpace / bucketCapacity)];
if (buckets.length < this.bucketSizes.length)
throw new BucketAllocatorException("Bucket allocator size too small (" + buckets.length +
@@ -413,6 +413,7 @@ public final class BucketAllocator {
}
}
+ @Override
public String toString() {
StringBuilder sb = new StringBuilder(1024);
for (int i = 0; i < buckets.length; ++i) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index b8d7453..4ff73b4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -840,6 +840,7 @@ public class BucketCache implements BlockCache, HeapSize {
this.writerEnabled = false;
}
+ @Override
public void run() {
List<RAMQueueEntry> entries = new ArrayList<RAMQueueEntry>();
try {
@@ -1324,8 +1325,49 @@ public class BucketCache implements BlockCache, HeapSize {
}
@Override
- public boolean equals(Object that) {
- return this == that;
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + getOuterType().hashCode();
+ result = prime * result + (int) (bucketSize ^ (bucketSize >>> 32));
+ result = prime * result + ((queue == null) ? 0 : queue.hashCode());
+ result = prime * result + (int) (totalSize ^ (totalSize >>> 32));
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (getClass() != obj.getClass()) {
+ return false;
+ }
+ BucketEntryGroup other = (BucketEntryGroup) obj;
+ if (!getOuterType().equals(other.getOuterType())) {
+ return false;
+ }
+ if (bucketSize != other.bucketSize) {
+ return false;
+ }
+ if (queue == null) {
+ if (other.queue != null) {
+ return false;
+ }
+ } else if (!queue.equals(other.queue)) {
+ return false;
+ }
+ if (totalSize != other.totalSize) {
+ return false;
+ }
+ return true;
+ }
+
+ private BucketCache getOuterType() {
+ return BucketCache.this;
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java
index 0e33a56..46147a5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java
@@ -58,7 +58,7 @@ public class CachedEntryQueue {
initialSize++;
}
queue = MinMaxPriorityQueue.orderedBy(new Comparator<Map.Entry<BlockCacheKey, BucketEntry>>() {
-
+ @Override
public int compare(Entry<BlockCacheKey, BucketEntry> entry1,
Entry<BlockCacheKey, BucketEntry> entry2) {
return BucketEntry.COMPARATOR.compare(entry1.getValue(), entry2.getValue());
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java
index 1a8fa5b..d1c43d5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java
@@ -105,7 +105,7 @@ class BufferChain {
try {
long ret = channel.write(buffers, bufferOffset, bufCount);
if (ret > 0) {
- remaining -= ret;
+ remaining = (int) (remaining - ret);
}
return ret;
} finally {
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 5617acb..3000622 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -54,7 +54,6 @@ import java.nio.channels.SocketChannel;
import java.nio.channels.WritableByteChannel;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
@@ -768,8 +767,8 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
int start = 0;
int end = numConnections - 1;
if (!force) {
- start = rand.nextInt() % numConnections;
- end = rand.nextInt() % numConnections;
+ start = rand.nextInt(numConnections);
+ end = rand.nextInt(numConnections);
int temp;
if (end < start) {
temp = start;
@@ -1859,14 +1858,15 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
String className = header.getCellBlockCodecClass();
if (className == null || className.length() == 0) return;
try {
- this.codec = (Codec)Class.forName(className).newInstance();
+ this.codec = (Codec)Class.forName(className).getDeclaredConstructor().newInstance();
} catch (Exception e) {
throw new UnsupportedCellCodecException(className, e);
}
if (!header.hasCellBlockCompressorClass()) return;
className = header.getCellBlockCompressorClass();
try {
- this.compressionCodec = (CompressionCodec)Class.forName(className).newInstance();
+ this.compressionCodec = (CompressionCodec)
+ Class.forName(className).getDeclaredConstructor().newInstance();
} catch (Exception e) {
throw new UnsupportedCompressionCodecException(className, e);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
index ee6da75..2ae4996 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
@@ -94,6 +94,7 @@ implements TableMap<ImmutableBytesWritable,Result> {
* @param reporter
* @throws IOException
*/
+ @Override
public void map(ImmutableBytesWritable key, Result value,
OutputCollector<ImmutableBytesWritable,Result> output,
Reporter reporter) throws IOException {
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java
index a7d23d4..fd747ba 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java
@@ -49,6 +49,7 @@ public class TableInputFormat extends TableInputFormatBase implements
*/
public static final String COLUMN_LIST = "hbase.mapred.tablecolumns";
+ @Override
public void configure(JobConf job) {
try {
initialize(job);
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
index 874e593..bc82f71 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
@@ -102,6 +102,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
* @see org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit,
* JobConf, Reporter)
*/
+ @Override
public RecordReader<ImmutableBytesWritable, Result> getRecordReader(
InputSplit split, JobConf job, Reporter reporter)
throws IOException {
@@ -182,6 +183,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
*
* @see org.apache.hadoop.mapred.InputFormat#getSplits(org.apache.hadoop.mapred.JobConf, int)
*/
+ @Override
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
if (this.table == null) {
initialize(job);
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
index 281d13e..bd914df 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
@@ -94,6 +94,7 @@ implements RecordReader<ImmutableBytesWritable, Result> {
this.recordReaderImpl.setRowFilter(rowFilter);
}
+ @Override
public void close() {
this.recordReaderImpl.close();
}
@@ -103,6 +104,7 @@ implements RecordReader<ImmutableBytesWritable, Result> {
*
* @see org.apache.hadoop.mapred.RecordReader#createKey()
*/
+ @Override
public ImmutableBytesWritable createKey() {
return this.recordReaderImpl.createKey();
}
@@ -112,10 +114,12 @@ implements RecordReader<ImmutableBytesWritable, Result> {
*
* @see org.apache.hadoop.mapred.RecordReader#createValue()
*/
+ @Override
public Result createValue() {
return this.recordReaderImpl.createValue();
}
+ @Override
public long getPos() {
// This should be the ordinal tuple in the range;
@@ -123,6 +127,7 @@ implements RecordReader<ImmutableBytesWritable, Result> {
return this.recordReaderImpl.getPos();
}
+ @Override
public float getProgress() {
// Depends on the total number of tuples and getPos
return this.recordReaderImpl.getPos();
@@ -134,6 +139,7 @@ implements RecordReader<ImmutableBytesWritable, Result> {
* @return true if there was more data
* @throws IOException
*/
+ @Override
public boolean next(ImmutableBytesWritable key, Result value)
throws IOException {
return this.recordReaderImpl.next(key, value);
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
index 09290fd..314e10b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
@@ -126,12 +126,13 @@ public class CellCounter {
*/
@Override
+ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
+ justification="Preconditions checks insure we are not going to dereference a null value")
public void map(ImmutableBytesWritable row, Result values,
Context context)
throws IOException {
Preconditions.checkState(values != null,
"values passed to the map is null");
-
try {
byte[] currentRow = values.getRow();
if (lastRow == null || !Bytes.equals(lastRow, currentRow)) {
@@ -179,10 +180,10 @@ public class CellCounter {
static class IntSumReducer<Key> extends Reducer<Key, IntWritable,
Key, IntWritable> {
-
private IntWritable result = new IntWritable();
- public void reduce(Key key, Iterable<IntWritable> values,
- Context context)
+
+ @Override
+ public void reduce(Key key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 76746d1..fc4aee3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -22,6 +22,7 @@ import java.io.UnsupportedEncodingException;
import java.net.InetSocketAddress;
import java.net.URLDecoder;
import java.net.URLEncoder;
+import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
@@ -394,7 +395,7 @@ public class HFileOutputFormat2
new TreeSet<ImmutableBytesWritable>(startKeys);
ImmutableBytesWritable first = sorted.first();
- if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) {
+ if (!Bytes.equals(first.get(), HConstants.EMPTY_BYTE_ARRAY)) {
throw new IllegalArgumentException(
"First region of table should have empty start key. Instead has: "
+ Bytes.toStringBinary(first.get()));
@@ -646,7 +647,8 @@ public class HFileOutputFormat2
continue;
}
try {
- confValMap.put(URLDecoder.decode(familySplit[0], "UTF-8").getBytes(),
+ confValMap.put(URLDecoder.decode(familySplit[0], "UTF-8")
+ .getBytes(StandardCharsets.UTF_8),
URLDecoder.decode(familySplit[1], "UTF-8"));
} catch (UnsupportedEncodingException e) {
// will not happen with UTF-8 encoding
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java
index fdf351e..7cfe27d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java
@@ -61,6 +61,7 @@ extends TableMapper<ImmutableBytesWritable, Result> {
* @throws IOException When writing the record fails.
* @throws InterruptedException When the job is aborted.
*/
+ @Override
public void map(ImmutableBytesWritable key, Result value, Context context)
throws IOException, InterruptedException {
context.write(key, value);
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java
index d1dba1d..ca5ef01 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java
@@ -166,7 +166,7 @@ public class MultithreadedTableMapper<K2, V2> extends TableMapper<K2, V2> {
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
- synchronized (outer) {
+ synchronized (this) {
if (!outer.nextKeyValue()) {
return false;
}
@@ -177,6 +177,7 @@ public class MultithreadedTableMapper<K2, V2> extends TableMapper<K2, V2> {
}
}
+ @Override
public ImmutableBytesWritable getCurrentKey() {
return key;
}
@@ -197,7 +198,7 @@ public class MultithreadedTableMapper<K2, V2> extends TableMapper<K2, V2> {
@Override
public void write(K2 key, V2 value) throws IOException,
InterruptedException {
- synchronized (outer) {
+ synchronized (this) {
outer.write(key, value);
}
}
@@ -225,6 +226,7 @@ public class MultithreadedTableMapper<K2, V2> extends TableMapper<K2, V2> {
outer.setStatus(status);
}
+ @Override
public float getProgress() {
return 0;
}
@@ -281,7 +283,9 @@ public class MultithreadedTableMapper<K2, V2> extends TableMapper<K2, V2> {
outer.getInputSplit());
Class<?> wrappedMapperClass = Class.forName("org.apache.hadoop.mapreduce.lib.map.WrappedMapper");
Method getMapContext = wrappedMapperClass.getMethod("getMapContext", MapContext.class);
- subcontext = (Context) getMapContext.invoke(wrappedMapperClass.newInstance(), mc);
+ subcontext = (Context) getMapContext.invoke(
+ wrappedMapperClass.getDeclaredConstructor().newInstance(),
+ mc);
} catch (Exception ee) { // FindBugs: REC_CATCH_EXCEPTION
// rethrow as IOE
throw new IOException(e);
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
index efb1cfd..7b93e8f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
@@ -49,6 +49,8 @@ import org.apache.zookeeper.KeeperException;
* the active master of the cluster.
*/
@InterfaceAudience.Private
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="JLM_JSR166_UTILCONCURRENT_MONITORENTER",
+ justification="Use of an atomic type both as monitor and condition variable is intended")
public class ActiveMasterManager extends ZooKeeperListener {
private static final Log LOG = LogFactory.getLog(ActiveMasterManager.class);
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 809b980..b8c088a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -714,6 +714,7 @@ public class AssignmentManager extends ZooKeeperListener {
private void assignRegionsOnSSHCompletion() {
LOG.info("Meta is rebuild by OfflineMetaRepair tool, assigning all user regions.");
Thread regionAssignerThread = new Thread("RegionAssignerOnMetaRebuild") {
+ @Override
public void run() {
// Wait until all dead server processing finish
while (serverManager.areDeadServersInProgress()) {
@@ -1975,7 +1976,7 @@ public class AssignmentManager extends ZooKeeperListener {
|| t instanceof ServerNotRunningYetException) {
// RS is aborting or stopping, we cannot offline the region since the region may need
// to do WAL recovery. Until we see the RS expiration, we should retry.
- sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY,
+ sleepTime = 1L + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY,
RpcClient.FAILED_SERVER_EXPIRY_DEFAULT);
} else if (t instanceof NotServingRegionException) {
@@ -1990,8 +1991,8 @@ public class AssignmentManager extends ZooKeeperListener {
return;
} else if ((t instanceof FailedServerException) || (state != null &&
t instanceof RegionAlreadyInTransitionException)) {
- if(t instanceof FailedServerException) {
- sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY,
+ if (t instanceof FailedServerException) {
+ sleepTime = 1L + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY,
RpcClient.FAILED_SERVER_EXPIRY_DEFAULT);
} else {
// RS is already processing this region, only need to update the timestamp
@@ -2359,7 +2360,8 @@ public class AssignmentManager extends ZooKeeperListener {
return;
}
- if (plan != newPlan && !plan.getDestination().equals(newPlan.getDestination())) {
+ if (!plan.equals(newPlan) &&
+ !plan.getDestination().equals(newPlan.getDestination())) {
// Clean out plan we failed execute and one that doesn't look like it'll
// succeed anyways; we need a new plan!
// Transition back to OFFLINE
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java
index 1ea57b4..9b60c00 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java
@@ -242,7 +242,7 @@ public class AssignmentVerificationReport {
this.maxDispersionScoreServerSet.clear();
this.maxDispersionScoreServerSet.add(primaryRS);
this.maxDispersionScore = dispersionScore;
- } else if (dispersionScore == this.maxDispersionScore) {
+ } else if (Math.abs(dispersionScore - this.maxDispersionScore) < 0.0000001) {
this.maxDispersionScoreServerSet.add(primaryRS);
}
@@ -260,7 +260,7 @@ public class AssignmentVerificationReport {
this.minDispersionScoreServerSet.clear();
this.minDispersionScoreServerSet.add(primaryRS);
this.minDispersionScore = dispersionScore;
- } else if (dispersionScore == this.minDispersionScore) {
+ } else if (Math.abs(dispersionScore - this.minDispersionScore) < 0.0000001) {
this.minDispersionScoreServerSet.add(primaryRS);
}
@@ -418,7 +418,7 @@ public class AssignmentVerificationReport {
this.minDispersionScoreServerSet.clear();
this.minDispersionScoreServerSet.add(primaryRS);
this.minDispersionScore = dispersionScore;
- } else if (dispersionScore == this.minDispersionScore) {
+ } else if (Math.abs(dispersionScore - this.minDispersionScore) < 0.0000001) {
this.minDispersionScoreServerSet.add(primaryRS);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
index e90aae6..4d4358c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
@@ -121,10 +121,8 @@ public class ClusterStatusPublisher extends ScheduledChore {
this.master = master;
this.messagePeriod = conf.getInt(STATUS_PUBLISH_PERIOD, DEFAULT_STATUS_PUBLISH_PERIOD);
try {
- this.publisher = publisherClass.newInstance();
- } catch (InstantiationException e) {
- throw new IOException("Can't create publisher " + publisherClass.getName(), e);
- } catch (IllegalAccessException e) {
+ this.publisher = publisherClass.getDeclaredConstructor().newInstance();
+ } catch (Exception e) {
throw new IOException("Can't create publisher " + publisherClass.getName(), e);
}
this.publisher.connect(conf);
@@ -175,7 +173,8 @@ public class ClusterStatusPublisher extends ScheduledChore {
publisher.publish(cs);
}
- protected void cleanup() {
+ @Override
+ protected synchronized void cleanup() {
connected = false;
publisher.close();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
index 75ab00c..c1b5180 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
@@ -155,6 +155,7 @@ public class DeadServer {
}
}
+ @Override
public synchronized String toString() {
StringBuilder sb = new StringBuilder();
for (ServerName sn : deadServers.keySet()) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index f5f48d2..fc8ab28 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2528,6 +2528,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
return info.getInfoPort();
}
+ @Override
public String getRegionServerVersion(final ServerName sn) {
RegionServerInfo info = this.regionServerTracker.getRegionServerInfo(sn);
if (info != null && info.hasVersionInfo()) {
@@ -2817,6 +2818,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
/**
* @return the underlying snapshot manager
*/
+ @Override
public SnapshotManager getSnapshotManager() {
return this.snapshotManager;
}
@@ -2824,6 +2826,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
/**
* @return the underlying MasterProcedureManagerHost
*/
+ @Override
public MasterProcedureManagerHost getMasterProcedureManagerHost() {
return mpmHost;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java
index cc5ca0b..11e458d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java
@@ -69,11 +69,12 @@ public class HMasterCommandLine extends ServerCommandLine {
this.masterClass = masterClass;
}
+ @Override
protected String getUsage() {
return USAGE;
}
-
+ @Override
public int run(String args[]) throws Exception {
Options opt = new Options();
opt.addOption("localRegionServers", true,
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
index 937b32f..e387f59 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.master;
import java.util.List;
import java.util.Map;
-import edu.umd.cs.findbugs.annotations.Nullable;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.conf.ConfigurationObserver;
import org.apache.hadoop.conf.Configurable;
@@ -151,5 +150,6 @@ public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObse
* Notification that config has changed
* @param conf
*/
+ @Override
void onConfigurationChange(Configuration conf);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 8cc2c03..11d9159 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -189,7 +189,6 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Repor
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
import org.apache.hadoop.hbase.regionserver.RSRpcServices;
-import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.AccessController;
import org.apache.hadoop.hbase.security.visibility.VisibilityController;
@@ -314,6 +313,7 @@ public class MasterRpcServices extends RSRpcServices
/**
* @return list of blocking services and their security info classes that this server supports
*/
+ @Override
protected List<BlockingServiceAndInterface> getServices() {
List<BlockingServiceAndInterface> bssi = new ArrayList<BlockingServiceAndInterface>(4);
bssi.add(new BlockingServiceAndInterface(
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 040342f..c889395 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -80,7 +80,6 @@ import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.RetryCounter;
import org.apache.hadoop.hbase.util.RetryCounterFactory;
-import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.Triple;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -109,6 +108,8 @@ import org.apache.zookeeper.KeeperException;
* and has completed the handling.
*/
@InterfaceAudience.Private
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="JLM_JSR166_UTILCONCURRENT_MONITORENTER",
+ justification="Synchronization on concurrent map is intended")
public class ServerManager {
public static final String WAIT_ON_REGIONSERVERS_MAXTOSTART =
"hbase.master.wait.on.regionservers.maxtostart";
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
index d5620b9..2cdbbb7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
@@ -820,7 +820,7 @@ public class SplitLogManager {
public enum TerminationStatus {
IN_PROGRESS("in_progress"), SUCCESS("success"), FAILURE("failure"), DELETED("deleted");
- String statusMsg;
+ final String statusMsg;
TerminationStatus(String msg) {
statusMsg = msg;
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java
index 86e9093..16ae41a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java
@@ -369,11 +369,13 @@ public abstract class TableLockManager {
serverName, writeLockTimeoutMs, false, purpose);
}
+ @Override
public TableLock readLock(TableName tableName, String purpose) {
return new TableLockImpl(tableName, zkWatcher,
serverName, readLockTimeoutMs, true, purpose);
}
+ @Override
public void visitAllLocks(MetadataHandler handler) throws IOException {
for (String tableName : getTableNames()) {
String tableLockZNode = ZKUtil.joinZNode(zkWatcher.tableLockZNode, tableName);
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
index d5edfab..06ab3b5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
@@ -75,7 +75,7 @@ class RegionLocationFinder {
private CacheLoader<HRegionInfo, HDFSBlocksDistribution> loader =
new CacheLoader<HRegionInfo, HDFSBlocksDistribution>() {
-
+ @Override
public ListenableFuture<HDFSBlocksDistribution> reload(final HRegionInfo hri,
HDFSBlocksDistribution oldValue) throws Exception {
return executor.submit(new Callable<HDFSBlocksDistribution>() {
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index 3047d28..374070c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -348,8 +348,8 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
// Allow turning this feature off if the locality cost is not going to
// be used in any computations.
RegionLocationFinder finder = null;
- if (this.localityCost != null && this.localityCost.getMultiplier() > 0
- || this.rackLocalityCost != null && this.rackLocalityCost.getMultiplier() > 0) {
+ if ((this.localityCost != null && this.localityCost.getMultiplier() > 0)
+ || (this.rackLocalityCost != null && this.rackLocalityCost.getMultiplier() > 0)) {
finder = this.regionFinder;
}
@@ -1399,7 +1399,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
// Now if we found a region load get the type of cost that was requested.
if (regionLoadList != null) {
- cost += getRegionLoadCost(regionLoadList);
+ cost = (long) (cost + getRegionLoadCost(regionLoadList));
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
index d431b2e..743d9a4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
@@ -109,7 +109,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
Class<? extends FileCleanerDelegate> c = Class.forName(className).asSubclass(
FileCleanerDelegate.class);
@SuppressWarnings("unchecked")
- T cleaner = (T) c.newInstance();
+ T cleaner = (T) c.getDeclaredConstructor().newInstance();
cleaner.setConf(conf);
return cleaner;
} catch (Exception e) {
@@ -282,7 +282,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
}
@Override
- public void cleanup() {
+ public synchronized void cleanup() {
for (T lc : this.cleanersChain) {
try {
lc.stop("Exiting");
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
index cd728fc..43a0f65 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
@@ -75,6 +75,7 @@ public abstract class TableEventHandler extends EventHandler {
this.tableName = tableName;
}
+ @Override
public TableEventHandler prepare() throws IOException {
//acquire the table write lock, blocking
this.tableLock = masterServices.getTableLockManager()
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index 5f37720..6367dec 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -573,6 +573,7 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
super(serverName);
}
+ @Override
public boolean requireExclusiveLock(Procedure proc) {
ServerProcedureInterface spi = (ServerProcedureInterface)proc;
switch (spi.getServerOperationType()) {
@@ -617,6 +618,7 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
}
}
+ @Override
public boolean requireExclusiveLock(Procedure proc) {
TableProcedureInterface tpi = (TableProcedureInterface)proc;
switch (tpi.getTableOperationType()) {
@@ -1045,6 +1047,7 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
/**
* True if the queue is not in the run-queue and it is owned by an event.
*/
+ @Override
public boolean isSuspended() {
return suspended;
}
@@ -1093,6 +1096,7 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
// This should go away when we have the new AM and its events
// and we move xlock to the lock-event-queue.
+ @Override
public synchronized boolean isAvailable() {
return !hasExclusiveLock() && !isEmpty();
}
@@ -1128,6 +1132,7 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
super(key, priority);
}
+ @Override
public void add(final Procedure proc, final boolean addToFront) {
if (addToFront) {
addFront(proc);
@@ -1144,6 +1149,7 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
runnables.addLast(proc);
}
+ @Override
public Procedure peek() {
return runnables.peek();
}
@@ -1158,6 +1164,7 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
return runnables.isEmpty();
}
+ @Override
public int size() {
return runnables.size();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
index 1185073..0ed75a3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
@@ -171,7 +171,6 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
// 5. restore acl of snapshot into the table.
if (restoreAcl && snapshot.hasUsersAndPermissions()
- && snapshot.getUsersAndPermissions() != null
&& SnapshotDescriptionUtils.isSecurityAvailable(server.getConfiguration())) {
RestoreSnapshotHelper.restoreSnapshotACL(snapshot, tableName, server.getConfiguration());
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 39387cb..4060e0c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -132,6 +132,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
return htd;
}
+ @Override
public TakeSnapshotHandler prepare() throws Exception {
super.prepare();
this.tableLock.acquire(); // after this, you should ensure to release this lock in
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java
index 0279a60..c00a125 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java
@@ -103,7 +103,7 @@ public class Procedure implements Callable<Void>, ForeignExceptionListener {
//
/** lock to prevent nodes from acquiring and then releasing before we can track them */
- private Object joinBarrierLock = new Object();
+ private final Object joinBarrierLock = new Object();
private final List<String> acquiringMembers;
private final List<String> inBarrierMembers;
private final HashMap<String, byte[]> dataFromFinishedMembers;
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java
index 3ab4ac5..2c9b5dd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java
@@ -88,11 +88,9 @@ public abstract class ProcedureManagerHost<E extends ProcedureManager> {
E impl;
Object o = null;
try {
- o = implClass.newInstance();
+ o = implClass.getDeclaredConstructor().newInstance();
impl = (E)o;
- } catch (InstantiationException e) {
- throw new IOException(e);
- } catch (IllegalAccessException e) {
+ } catch (Exception e) {
throw new IOException(e);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java
index 8927338..6223c0f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java
@@ -153,6 +153,7 @@ abstract public class Subprocedure implements Callable<Void> {
* Subprocedure, ForeignException)}.
*/
@SuppressWarnings("finally")
+ @Override
final public Void call() {
LOG.debug("Starting subprocedure '" + barrierName + "' with timeout " +
executionTimeoutTimer.getMaxTime() + "ms");
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index 9bfa8db..f1b7ff9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -344,12 +344,14 @@ public class MasterQuotaManager implements RegionStateListener {
return -1;
}
+ @Override
public void onRegionMerged(HRegionInfo hri) throws IOException {
if (initialized) {
namespaceQuotaManager.updateQuotaForRegionMerge(hri);
}
}
+ @Override
public void onRegionSplit(HRegionInfo hri) throws IOException {
if (initialized) {
namespaceQuotaManager.checkQuotaToSplitRegion(hri);
@@ -498,7 +500,7 @@ public class MasterQuotaManager implements RegionStateListener {
}
private static class NamedLock<T> {
- private HashSet<T> locks = new HashSet<T>();
+ private final HashSet<T> locks = new HashSet<T>();
public void lock(final T name) throws InterruptedException {
synchronized (locks) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java
index 39f1456..2ceaa47 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java
@@ -199,11 +199,11 @@ public class QuotaCache implements Stoppable {
// Prefetch online tables/namespaces
for (TableName table : QuotaCache.this.rsServices.getOnlineTables()) {
if (table.isSystemTable()) continue;
- if (!QuotaCache.this.tableQuotaCache.contains(table)) {
+ if (!QuotaCache.this.tableQuotaCache.containsKey(table)) {
QuotaCache.this.tableQuotaCache.putIfAbsent(table, new QuotaState());
}
String ns = table.getNamespaceAsString();
- if (!QuotaCache.this.namespaceQuotaCache.contains(ns)) {
+ if (!QuotaCache.this.namespaceQuotaCache.containsKey(ns)) {
QuotaCache.this.namespaceQuotaCache.putIfAbsent(ns, new QuotaState());
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
index e6fe9cd..d35d620 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
@@ -314,6 +314,7 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi
return ret;
}
+ @Override
public CompactionRequest requestCompaction(final Region r, final Store s,
final String why, int priority, CompactionRequest request, User user) throws IOException {
return requestCompactionInternal(r, s, why, priority, request, true, user);
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 7a5720a..c68f813 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -37,6 +37,7 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.lang.reflect.Constructor;
+import java.nio.charset.StandardCharsets;
import java.text.ParseException;
import java.util.AbstractList;
import java.util.ArrayList;
@@ -76,7 +77,6 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.lang.RandomStringUtils;
@@ -206,6 +206,8 @@ import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
@InterfaceAudience.Private
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="JLM_JSR166_UTILCONCURRENT_MONITORENTER",
+ justification="Synchronization on concurrent map is intended")
public class HRegion implements HeapSize, PropagatingConfigurationObserver, Region {
private static final Log LOG = LogFactory.getLog(HRegion.class);
@@ -1012,7 +1014,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
this.stores.put(store.getFamily().getName(), store);
long storeMaxSequenceId = store.getMaxSequenceId();
- maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(),
+ maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(StandardCharsets.UTF_8),
storeMaxSequenceId);
if (maxSeqId == -1 || storeMaxSequenceId > maxSeqId) {
maxSeqId = storeMaxSequenceId;
@@ -5393,7 +5395,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
Store store = this.stores.get(column);
if (store == null) {
throw new IllegalArgumentException("No column family : " +
- new String(column) + " available");
+ new String(column, StandardCharsets.UTF_8) + " available");
}
Collection<StoreFile> storeFiles = store.getStorefiles();
if (storeFiles == null) continue;
http://git-wip-us.apache.org/repos/asf/hbase/blob/108ea30e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 32bab6b..4853b2b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -26,6 +26,7 @@ import java.lang.reflect.Constructor;
import java.net.BindException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
+import java.nio.charset.StandardCharsets;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Collection;
@@ -209,6 +210,8 @@ import sun.misc.SignalHandler;
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
@SuppressWarnings("deprecation")
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="JLM_JSR166_UTILCONCURRENT_MONITORENTER",
+ justification="Use of an atomic type both as monitor and condition variable is intended")
public class HRegionServer extends HasThread implements
RegionServerServices, LastSequenceId, ConfigurationObserver {
@@ -1666,7 +1669,7 @@ public class HRegionServer extends HasThread implements
if (((HRegion)r).shouldFlush(whyFlush)) {
FlushRequester requester = server.getFlushRequester();
if (requester != null) {
- long randomDelay = RandomUtils.nextInt(RANGE_OF_DELAY) + MIN_DELAY_TIME;
+ long randomDelay = (long) RandomUtils.nextInt(RANGE_OF_DELAY) + MIN_DELAY_TIME;
LOG.info(getName() + " requesting flush of " +
r.getRegionInfo().getRegionNameAsString() + " because " +
whyFlush.toString() +
@@ -2953,13 +2956,15 @@ public class HRegionServer extends HasThread implements
}
}
- final Boolean previous = this.regionsInTransitionInRS.putIfAbsent(encodedName.getBytes(),
+ final Boolean previous = this.regionsInTransitionInRS
+ .putIfAbsent(encodedName.getBytes(StandardCharsets.UTF_8),
Boolean.FALSE);
if (Boolean.TRUE.equals(previous)) {
LOG.info("Received CLOSE for the region:" + encodedName + " , which we are already " +
"trying to OPEN. Cancelling OPENING.");
- if (!regionsInTransitionInRS.replace(encodedName.getBytes(), previous, Boolean.FALSE)){
+ if (!regionsInTransitionInRS.replace(encodedName.getBytes(StandardCharsets.UTF_8),
+ previous, Boolean.FALSE)){
// The replace failed. That should be an exceptional case, but theoretically it can happen.
// We're going to try to do a standard close then.
LOG.warn("The opening for region " + encodedName + " was done before we could cancel it." +
@@ -2989,7 +2994,7 @@ public class HRegionServer extends HasThread implements
if (actualRegion == null) {
LOG.error("Received CLOSE for a region which is not online, and we're not opening.");
- this.regionsInTransitionInRS.remove(encodedName.getBytes());
+ this.regionsInTransitionInRS.remove(encodedName.getBytes(StandardCharsets.UTF_8));
// The master deletes the znode when it receives this exception.
throw new NotServingRegionException("The region " + encodedName +
" is not online, and is not opening.");