You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by en...@apache.org on 2013/07/15 23:38:38 UTC
svn commit: r1503484 - in /hbase/trunk:
hbase-client/src/main/java/org/apache/hadoop/hbase/client/
hbase-common/src/main/java/org/apache/hadoop/hbase/
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/
Author: enis
Date: Mon Jul 15 21:38:37 2013
New Revision: 1503484
URL: http://svn.apache.org/r1503484
Log:
HBASE-8909. HBASE-6170 broke hbase.regionserver.lease.period config, we should support it for BC
Modified:
hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
Modified: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java?rev=1503484&r1=1503483&r2=1503484&view=diff
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java (original)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java Mon Jul 15 21:38:37 2013
@@ -17,11 +17,16 @@
*/
package org.apache.hadoop.hbase.client;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.LinkedList;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
@@ -35,10 +40,6 @@ import org.apache.hadoop.hbase.protobuf.
import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos;
import org.apache.hadoop.hbase.util.Bytes;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.LinkedList;
-
/**
* Implements the scanner interface for the HBase client.
* If there are multiple regions in a table, this scanner will iterate
@@ -65,7 +66,7 @@ public class ClientScanner extends Abstr
private final byte[] tableName;
private final int scannerTimeout;
private boolean scanMetricsPublished = false;
-
+
/**
* Create a new ClientScanner for the specified table. An HConnection will be
* retrieved using the passed Configuration.
@@ -108,7 +109,9 @@ public class ClientScanner extends Abstr
HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY,
HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);
}
- this.scannerTimeout = conf.getInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
+ this.scannerTimeout = HBaseConfiguration.getInt(conf,
+ HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
+ HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY,
HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
// check if application wants to collect scan metrics
@@ -253,6 +256,7 @@ public class ClientScanner extends Abstr
scanMetricsPublished = true;
}
+ @Override
public Result next() throws IOException {
// If the scanner is closed and there's nothing left in the cache, next is a no-op.
if (cache.size() == 0 && this.closed) {
@@ -377,6 +381,7 @@ public class ClientScanner extends Abstr
* if returned array is of zero-length (We never return null).
* @throws IOException
*/
+ @Override
public Result [] next(int nbRows) throws IOException {
// Collect values to be returned here
ArrayList<Result> resultSets = new ArrayList<Result>(nbRows);
@@ -391,6 +396,7 @@ public class ClientScanner extends Abstr
return resultSets.toArray(new Result[resultSets.size()]);
}
+ @Override
public void close() {
if (!scanMetricsPublished) writeScanMetrics();
if (callable != null) {
Modified: hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java?rev=1503484&r1=1503483&r2=1503484&view=diff
==============================================================================
--- hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java (original)
+++ hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java Mon Jul 15 21:38:37 2013
@@ -150,6 +150,35 @@ public class HBaseConfiguration extends
return isShowConf;
}
+ /**
+ * Get the value of the <code>name</code> property as an <code>int</code>, possibly
+ * referring to the deprecated name of the configuration property.
+ * If no such property exists, the provided default value is returned,
+ * or if the specified value is not a valid <code>int</code>,
+ * then an error is thrown.
+ *
+ * @param name property name.
+ * @param deprecatedName a deprecatedName for the property to use
+ * if non-deprecated name is not used
+ * @param defaultValue default value.
+ * @throws NumberFormatException when the value is invalid
+ * @return property value as an <code>int</code>,
+ * or <code>defaultValue</code>.
+ */
+ // TODO: developer note: This duplicates the functionality of deprecated
+ // property support in Configuration in Hadoop 2. But since Hadoop-1 does not
+ // contain these changes, we will do our own as usual. Replace these when H2 is default.
+ public static int getInt(Configuration conf, String name,
+ String deprecatedName, int defaultValue) {
+ if (conf.get(deprecatedName) != null) {
+ LOG.warn(String.format("Config option \"%s\" is deprecated. Instead, use \"%s\""
+ , deprecatedName, name));
+ return conf.getInt(deprecatedName, defaultValue);
+ } else {
+ return conf.getInt(name, defaultValue);
+ }
+ }
+
/** For debugging. Dump configurations to system output as xml format.
* Master and RS configurations can also be dumped using
* http services. e.g. "curl http://master:60010/dump"
Modified: hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java?rev=1503484&r1=1503483&r2=1503484&view=diff
==============================================================================
--- hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java Mon Jul 15 21:38:37 2013
@@ -630,6 +630,14 @@ public final class HConstants {
public static String HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD = "hbase.client.scanner.timeout.period";
/**
+ * Use {@link #HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD} instead.
+ * @deprecated This config option is deprecated. Will be removed at later releases after 0.96.
+ */
+ @Deprecated
+ public static String HBASE_REGIONSERVER_LEASE_PERIOD_KEY =
+ "hbase.regionserver.lease.period";
+
+ /**
* Default value of {@link #HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD}.
*/
public static int DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD = 60000;
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1503484&r1=1503483&r2=1503484&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Mon Jul 15 21:38:37 2013
@@ -109,11 +109,11 @@ import org.apache.hadoop.hbase.filter.Co
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
import org.apache.hadoop.hbase.ipc.RpcClient;
import org.apache.hadoop.hbase.ipc.RpcServer;
-import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
-import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
+import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
import org.apache.hadoop.hbase.master.SplitLogManager;
import org.apache.hadoop.hbase.master.TableLockManager;
@@ -171,9 +171,9 @@ import org.apache.hadoop.hbase.protobuf.
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
-import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
@@ -526,7 +526,9 @@ public class HRegionServer implements Cl
this.abortRequested = false;
this.stopped = false;
- this.scannerLeaseTimeoutPeriod = conf.getInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
+ this.scannerLeaseTimeoutPeriod = HBaseConfiguration.getInt(conf,
+ HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
+ HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY,
HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
// Server to handle client requests.
@@ -569,6 +571,7 @@ public class HRegionServer implements Cl
regionServerAccounting = new RegionServerAccounting();
cacheConfig = new CacheConfig(conf);
uncaughtExceptionHandler = new UncaughtExceptionHandler() {
+ @Override
public void uncaughtException(Thread t, Throwable e) {
abort("Uncaught exception in service thread " + t.getName(), e);
}
@@ -772,6 +775,7 @@ public class HRegionServer implements Cl
/**
* The HRegionServer sticks in this loop until closed.
*/
+ @Override
public void run() {
try {
// Do pre-registration initializations; zookeeper, lease threads, etc.
@@ -1216,6 +1220,7 @@ public class HRegionServer implements Cl
ZKUtil.deleteNode(this.zooKeeper, getMyEphemeralNodePath());
}
+ @Override
public RegionServerAccounting getRegionServerAccounting() {
return regionServerAccounting;
}
@@ -1577,7 +1582,7 @@ public class HRegionServer implements Cl
// quite a while inside HConnection layer. The worker won't be available for other
// tasks even after current task is preempted after a split task times out.
Configuration sinkConf = HBaseConfiguration.create(conf);
- sinkConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
+ sinkConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
conf.getInt("hbase.log.replay.retries.number", 8)); // 8 retries take about 23 seconds
sinkConf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
conf.getInt("hbase.log.replay.rpc.timeout", 30000)); // default 30 seconds
@@ -1740,6 +1745,7 @@ public class HRegionServer implements Cl
* @param cause
* the exception that caused the abort, or null
*/
+ @Override
public void abort(String reason, Throwable cause) {
String msg = "ABORTING region server " + this + ": " + reason;
if (cause != null) {
@@ -1781,6 +1787,7 @@ public class HRegionServer implements Cl
abort(reason, null);
}
+ @Override
public boolean isAborted() {
return this.abortRequested;
}
@@ -2024,6 +2031,7 @@ public class HRegionServer implements Cl
/**
* @return true if a stop has been requested.
*/
+ @Override
public boolean isStopped() {
return this.stopped;
}
@@ -2033,6 +2041,7 @@ public class HRegionServer implements Cl
return this.stopping;
}
+ @Override
public Map<String, HRegion> getRecoveringRegions() {
return this.recoveringRegions;
}
@@ -2041,6 +2050,7 @@ public class HRegionServer implements Cl
*
* @return the configuration
*/
+ @Override
public Configuration getConfiguration() {
return conf;
}
@@ -2081,6 +2091,7 @@ public class HRegionServer implements Cl
// we'll sort the regions in reverse
SortedMap<Long, HRegion> sortedRegions = new TreeMap<Long, HRegion>(
new Comparator<Long>() {
+ @Override
public int compare(Long a, Long b) {
return -1 * a.compareTo(b);
}
@@ -2100,6 +2111,7 @@ public class HRegionServer implements Cl
}
/** @return reference to FlushRequester */
+ @Override
public FlushRequester getFlushRequester() {
return this.cacheFlusher;
}
@@ -2140,10 +2152,12 @@ public class HRegionServer implements Cl
/**
* @return Return the fs.
*/
+ @Override
public FileSystem getFileSystem() {
return fs;
}
+ @Override
public String toString() {
return getServerName().toString();
}
@@ -2184,10 +2198,12 @@ public class HRegionServer implements Cl
}
+ @Override
public ConcurrentMap<byte[], Boolean> getRegionsInTransitionInRS() {
return this.regionsInTransitionInRS;
}
+ @Override
public ExecutorService getExecutorService() {
return service;
}
@@ -2323,7 +2339,8 @@ public class HRegionServer implements Cl
* @param tableName
* @return Online regions from <code>tableName</code>
*/
- public List<HRegion> getOnlineRegions(byte[] tableName) {
+ @Override
+ public List<HRegion> getOnlineRegions(byte[] tableName) {
List<HRegion> tableRegions = new ArrayList<HRegion>();
synchronized (this.onlineRegions) {
for (HRegion region: this.onlineRegions.values()) {
@@ -2358,6 +2375,7 @@ public class HRegionServer implements Cl
this.scannerName = n;
}
+ @Override
public void leaseExpired() {
RegionScannerHolder rsh = scanners.remove(this.scannerName);
if (rsh != null) {
@@ -2637,6 +2655,7 @@ public class HRegionServer implements Cl
*
* @return True if we OOME'd and are aborting.
*/
+ @Override
public boolean checkOOME(final Throwable e) {
boolean stop = false;
try {
@@ -4241,7 +4260,7 @@ public class HRegionServer implements Cl
nodePath = ZKUtil.joinZNode(nodePath, previousRSName);
ZKUtil.setData(zkw, nodePath,
ZKUtil.regionSequenceIdsToByteArray(minSeqIdForLogReplay, maxSeqIdInStores));
- LOG.debug("Update last flushed sequence id of region " + region.getEncodedName() + " for "
+ LOG.debug("Update last flushed sequence id of region " + region.getEncodedName() + " for "
+ previousRSName);
} else {
LOG.warn("Can't find failed region server for recovering region " + region.getEncodedName());