You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jm...@apache.org on 2012/04/11 06:36:26 UTC
svn commit: r1324547 - in /hbase/trunk: dev-support/
src/main/java/org/apache/hadoop/hbase/client/
src/main/java/org/apache/hadoop/hbase/executor/
src/main/java/org/apache/hadoop/hbase/io/hfile/slab/
src/main/java/org/apache/hadoop/hbase/ipc/ src/main/...
Author: jmhsieh
Date: Wed Apr 11 04:36:26 2012
New Revision: 1324547
URL: http://svn.apache.org/viewvc?rev=1324547&view=rev
Log:
HBASE-5653 [findbugs] Fix perf warnings (Uma Maheswara Rao G)
Modified:
hbase/trunk/dev-support/test-patch.properties
hbase/trunk/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
hbase/trunk/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
hbase/trunk/src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java
hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java
hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java
hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
hbase/trunk/src/main/java/org/apache/hadoop/hbase/metrics/HBaseInfo.java
hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
hbase/trunk/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
hbase/trunk/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkMetrics.java
hbase/trunk/src/main/resources/hbase-webapps/master/table.jsp
hbase/trunk/src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java
Modified: hbase/trunk/dev-support/test-patch.properties
URL: http://svn.apache.org/viewvc/hbase/trunk/dev-support/test-patch.properties?rev=1324547&r1=1324546&r2=1324547&view=diff
==============================================================================
--- hbase/trunk/dev-support/test-patch.properties (original)
+++ hbase/trunk/dev-support/test-patch.properties Wed Apr 11 04:36:26 2012
@@ -19,5 +19,5 @@ MAVEN_OPTS="-Xmx3g"
# Please update the per-module test-patch.properties if you update this file.
OK_RELEASEAUDIT_WARNINGS=84
-OK_FINDBUGS_WARNINGS=585
+OK_FINDBUGS_WARNINGS=561
OK_JAVADOC_WARNINGS=169
Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java?rev=1324547&r1=1324546&r2=1324547&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java Wed Apr 11 04:36:26 2012
@@ -113,11 +113,13 @@ extends RetriesExhaustedException {
List<Row> actions,
List<String> hostnamePort) {
String s = getDesc(classifyExs(exceptions));
- s += "servers with issues: ";
+ StringBuilder addrs = new StringBuilder(s);
+ addrs.append("servers with issues: ");
Set<String> uniqAddr = new HashSet<String>();
uniqAddr.addAll(hostnamePort);
+
for(String addr : uniqAddr) {
- s += addr + ", ";
+ addrs.append(addr).append(", ");
}
return s;
}
@@ -143,12 +145,16 @@ extends RetriesExhaustedException {
}
public static String getDesc(Map<String,Integer> classificaton) {
- String s = "";
+ StringBuilder classificatons =new StringBuilder(11);
for (Map.Entry<String, Integer> e : classificaton.entrySet()) {
- s += e.getKey() + ": " + e.getValue() + " time" +
- pluralize(e.getValue()) + ", ";
+ classificatons.append(e.getKey());
+ classificatons.append(": ");
+ classificatons.append(e.getValue());
+ classificatons.append(" time");
+ classificatons.append(pluralize(e.getValue()));
+ classificatons.append(", ");
}
- return s;
+ return classificatons.toString();
}
}
Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java?rev=1324547&r1=1324546&r2=1324547&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java Wed Apr 11 04:36:26 2012
@@ -277,7 +277,7 @@ public class ExecutorService {
*/
static class Executor {
// how long to retain excess threads
- final long keepAliveTimeInMillis = 1000;
+ static final long keepAliveTimeInMillis = 1000;
// the thread pool executor that services the requests
final TrackingThreadPoolExecutor threadPoolExecutor;
// work queue to use - unbounded queue
Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java?rev=1324547&r1=1324546&r2=1324547&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java Wed Apr 11 04:36:26 2012
@@ -333,7 +333,7 @@ public class SingleSizeCache implements
}
/* Just a pair class, holds a reference to the parent cacheable */
- private class CacheablePair implements HeapSize {
+ private static class CacheablePair implements HeapSize {
final CacheableDeserializer<Cacheable> deserializer;
ByteBuffer serializedData;
AtomicLong recentlyAccessed;
Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java?rev=1324547&r1=1324546&r2=1324547&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java Wed Apr 11 04:36:26 2012
@@ -362,7 +362,7 @@ public class SlabCache implements SlabIt
// the maximum size somebody will ever try to cache, then we multiply by
// 10
// so we have finer grained stats.
- final int MULTIPLIER = 10;
+ static final int MULTIPLIER = 10;
final int NUMDIVISIONS = (int) (Math.log(Integer.MAX_VALUE) * MULTIPLIER);
private final AtomicLong[] counts = new AtomicLong[NUMDIVISIONS];
Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java?rev=1324547&r1=1324546&r2=1324547&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java Wed Apr 11 04:36:26 2012
@@ -254,7 +254,6 @@ class WritableRpcEngine implements RpcEn
private Class<?> implementation;
private Class<?>[] ifaces;
private boolean verbose;
- private boolean authorize = false;
// for JSON encoding
private static ObjectMapper mapper = new ObjectMapper();
@@ -308,10 +307,6 @@ class WritableRpcEngine implements RpcEn
String [] metricSuffixes = new String [] {ABOVE_ONE_SEC_METRIC};
this.rpcMetrics.createMetrics(this.ifaces, false, metricSuffixes);
- this.authorize =
- conf.getBoolean(
- ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false);
-
this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME,
DEFAULT_WARN_RESPONSE_TIME);
this.warnResponseSize = conf.getInt(WARN_RESPONSE_SIZE,
Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java?rev=1324547&r1=1324546&r2=1324547&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java Wed Apr 11 04:36:26 2012
@@ -235,7 +235,6 @@ public class MultithreadedTableMapper<K2
private class MapRunner implements Runnable {
private Mapper<ImmutableBytesWritable, Result, K2,V2> mapper;
private Context subcontext;
- private Throwable throwable;
@SuppressWarnings({ "rawtypes", "unchecked" })
MapRunner(Context context) throws IOException, InterruptedException {
@@ -295,7 +294,7 @@ public class MultithreadedTableMapper<K2
try {
mapper.run(subcontext);
} catch (Throwable ie) {
- throwable = ie;
+ LOG.error("Problem in running map.", ie);
}
}
}
Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java?rev=1324547&r1=1324546&r2=1324547&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java Wed Apr 11 04:36:26 2012
@@ -121,7 +121,7 @@ public class DefaultLoadBalancer impleme
RegionInfoComparator riComparator = new RegionInfoComparator();
- private class RegionPlanComparator implements Comparator<RegionPlan> {
+ private static class RegionPlanComparator implements Comparator<RegionPlan> {
@Override
public int compare(RegionPlan l, RegionPlan r) {
long diff = r.getRegionInfo().getRegionId() - l.getRegionInfo().getRegionId();
Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java?rev=1324547&r1=1324546&r2=1324547&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java Wed Apr 11 04:36:26 2012
@@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.monitorin
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.regionserver.SplitLogWorker;
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
-import org.apache.hadoop.hbase.regionserver.wal.OrphanHLogAfterSplitException;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Threads;
@@ -408,7 +407,7 @@ public class SplitLogManager extends Zoo
// A negative retry count will lead to ignoring all error processing.
this.watcher.getRecoverableZooKeeper().getZooKeeper().
getData(path, this.watcher,
- new GetDataAsyncCallback(), new Long(-1) /* retry count */);
+ new GetDataAsyncCallback(), Long.valueOf(-1) /* retry count */);
tot_mgr_get_data_queued.incrementAndGet();
}
Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/metrics/HBaseInfo.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/metrics/HBaseInfo.java?rev=1324547&r1=1324546&r2=1324547&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/metrics/HBaseInfo.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/metrics/HBaseInfo.java Wed Apr 11 04:36:26 2012
@@ -63,29 +63,31 @@ public class HBaseInfo {
return theInstance;
}
- // HBase jar info
- private MetricsString date = new MetricsString("date", registry,
- org.apache.hadoop.hbase.util.VersionInfo.getDate());
- private MetricsString revision = new MetricsString("revision", registry,
- org.apache.hadoop.hbase.util.VersionInfo.getRevision());
- private MetricsString url = new MetricsString("url", registry,
- org.apache.hadoop.hbase.util.VersionInfo.getUrl());
- private MetricsString user = new MetricsString("user", registry,
- org.apache.hadoop.hbase.util.VersionInfo.getUser());
- private MetricsString version = new MetricsString("version", registry,
- org.apache.hadoop.hbase.util.VersionInfo.getVersion());
+ {
+ // HBase jar info
+ new MetricsString("date", registry,
+ org.apache.hadoop.hbase.util.VersionInfo.getDate());
+ new MetricsString("revision", registry,
+ org.apache.hadoop.hbase.util.VersionInfo.getRevision());
+ new MetricsString("url", registry, org.apache.hadoop.hbase.util.VersionInfo
+ .getUrl());
+ new MetricsString("user", registry,
+ org.apache.hadoop.hbase.util.VersionInfo.getUser());
+ new MetricsString("version", registry,
+ org.apache.hadoop.hbase.util.VersionInfo.getVersion());
- // Info on the HDFS jar that HBase has (aka: HDFS Client)
- private MetricsString hdfsDate = new MetricsString("hdfsDate", registry,
- org.apache.hadoop.util.VersionInfo.getDate());
- private MetricsString hdfsRev = new MetricsString("hdfsRevision", registry,
- org.apache.hadoop.util.VersionInfo.getRevision());
- private MetricsString hdfsUrl = new MetricsString("hdfsUrl", registry,
- org.apache.hadoop.util.VersionInfo.getUrl());
- private MetricsString hdfsUser = new MetricsString("hdfsUser", registry,
- org.apache.hadoop.util.VersionInfo.getUser());
- private MetricsString hdfsVer = new MetricsString("hdfsVersion", registry,
- org.apache.hadoop.util.VersionInfo.getVersion());
+ // Info on the HDFS jar that HBase has (aka: HDFS Client)
+ new MetricsString("hdfsDate", registry, org.apache.hadoop.util.VersionInfo
+ .getDate());
+ new MetricsString("hdfsRevision", registry,
+ org.apache.hadoop.util.VersionInfo.getRevision());
+ new MetricsString("hdfsUrl", registry, org.apache.hadoop.util.VersionInfo
+ .getUrl());
+ new MetricsString("hdfsUser", registry, org.apache.hadoop.util.VersionInfo
+ .getUser());
+ new MetricsString("hdfsVersion", registry,
+ org.apache.hadoop.util.VersionInfo.getVersion());
+ }
protected HBaseInfo() {
MetricsContext context = MetricsUtil.getContext("hbase");
Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java?rev=1324547&r1=1324546&r2=1324547&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java Wed Apr 11 04:36:26 2012
@@ -735,9 +735,8 @@ public class HLog implements Syncable {
Long oldestOutstandingSeqNum = getOldestOutstandingSeqNum();
// Get the set of all log files whose last sequence number is smaller than
// the oldest edit's sequence number.
- TreeSet<Long> sequenceNumbers =
- new TreeSet<Long>(this.outputfiles.headMap(
- (Long.valueOf(oldestOutstandingSeqNum.longValue()))).keySet());
+ TreeSet<Long> sequenceNumbers = new TreeSet<Long>(this.outputfiles.headMap(
+ oldestOutstandingSeqNum).keySet());
// Now remove old log files (if any)
int logsToRemove = sequenceNumbers.size();
if (logsToRemove > 0) {
@@ -757,7 +756,7 @@ public class HLog implements Syncable {
// If too many log files, figure which regions we need to flush.
// Array is an array of encoded region names.
byte [][] regions = null;
- int logCount = this.outputfiles == null? 0: this.outputfiles.size();
+ int logCount = this.outputfiles.size();
if (logCount > this.maxLogs && logCount > 0) {
// This is an array of encoded region names.
regions = findMemstoresWithEditsEqualOrOlderThan(this.outputfiles.firstKey(),
Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java?rev=1324547&r1=1324546&r2=1324547&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java Wed Apr 11 04:36:26 2012
@@ -19,11 +19,19 @@
*/
package org.apache.hadoop.hbase.replication.regionserver;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.Map.Entry;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.HTablePool;
@@ -32,13 +40,6 @@ import org.apache.hadoop.hbase.client.Ro
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.Stoppable;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
/**
* This class is responsible for replicating the edits coming
@@ -127,8 +128,8 @@ public class ReplicationSink {
}
totalReplicated++;
}
- for(byte [] table : rows.keySet()) {
- batch(table, rows.get(table));
+ for (Entry<byte[], List<Row>> entry : rows.entrySet()) {
+ batch(entry.getKey(), entry.getValue());
}
this.metrics.setAgeOfLastAppliedOp(
entries[entries.length-1].getKey().getWriteTime());
Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkMetrics.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkMetrics.java?rev=1324547&r1=1324546&r2=1324547&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkMetrics.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkMetrics.java Wed Apr 11 04:36:26 2012
@@ -37,7 +37,6 @@ import org.apache.hadoop.metrics.util.Me
public class ReplicationSinkMetrics implements Updater {
private final MetricsRecord metricsRecord;
private MetricsRegistry registry = new MetricsRegistry();
- private static ReplicationSinkMetrics instance;
/** Rate of operations applied by the sink */
public final MetricsRate appliedOpsRate =
Modified: hbase/trunk/src/main/resources/hbase-webapps/master/table.jsp
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/resources/hbase-webapps/master/table.jsp?rev=1324547&r1=1324546&r2=1324547&view=diff
==============================================================================
--- hbase/trunk/src/main/resources/hbase-webapps/master/table.jsp (original)
+++ hbase/trunk/src/main/resources/hbase-webapps/master/table.jsp Wed Apr 11 04:36:26 2012
@@ -186,7 +186,7 @@
urlRegionServer =
"http://" + addr.getHostname().toString() + ":" + infoPort + "/";
Integer i = regDistribution.get(urlRegionServer);
- if (null == i) i = new Integer(0);
+ if (null == i) i = Integer.valueOf(0);
regDistribution.put(urlRegionServer, i+1);
}
}
Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java?rev=1324547&r1=1324546&r2=1324547&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java Wed Apr 11 04:36:26 2012
@@ -124,7 +124,7 @@ public class TestExecutorService {
}
// Make sure threads are still around even after their timetolive expires.
- Thread.sleep(executor.keepAliveTimeInMillis * 2);
+ Thread.sleep(ExecutorService.Executor.keepAliveTimeInMillis * 2);
assertEquals(maxThreads, pool.getPoolSize());
executorService.shutdown();