You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2012/09/29 19:57:52 UTC
svn commit: r1391852 [1/2] - in /hbase/trunk:
hbase-common/src/main/java/org/apache/hadoop/hbase/util/
hbase-server/src/main/java/org/apache/hadoop/hbase/
hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/
hbase-server/src/main/java/org/apache...
Author: stack
Date: Sat Sep 29 17:57:49 2012
New Revision: 1391852
URL: http://svn.apache.org/viewvc?rev=1391852&view=rev
Log:
HBASE-6476 Replace all occurrances of System.currentTimeMillis() with EnvironmentEdge equivalent; REVERT
Modified:
hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/Chore.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HServerInfo.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTransition.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcEngine.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DefaultLoadBalancer.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSinkMetrics.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetrics.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
Modified: hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java (original)
+++ hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java Sat Sep 29 17:57:49 2012
@@ -142,7 +142,7 @@ public class Threads {
* @param msToWait the amount of time to sleep in milliseconds
*/
public static void sleepWithoutInterrupt(final long msToWait) {
- long timeMillis = EnvironmentEdgeManager.currentTimeMillis();
+ long timeMillis = System.currentTimeMillis();
long endTime = timeMillis + msToWait;
boolean interrupted = false;
while (timeMillis < endTime) {
@@ -151,7 +151,7 @@ public class Threads {
} catch (InterruptedException ex) {
interrupted = true;
}
- timeMillis = EnvironmentEdgeManager.currentTimeMillis();
+ timeMillis = System.currentTimeMillis();
}
if (interrupted) {
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/Chore.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/Chore.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/Chore.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/Chore.java Sat Sep 29 17:57:49 2012
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.hbase.util.Sleeper;
@@ -61,7 +60,7 @@ public abstract class Chore extends HasT
try {
boolean initialChoreComplete = false;
while (!this.stopper.isStopped()) {
- long startTime = EnvironmentEdgeManager.currentTimeMillis();
+ long startTime = System.currentTimeMillis();
try {
if (!initialChoreComplete) {
initialChoreComplete = initialChore();
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java Sat Sep 29 17:57:49 2012
@@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.protobuf.
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.JenkinsHash;
import org.apache.hadoop.hbase.util.MD5Hash;
@@ -262,7 +261,7 @@ public class HRegionInfo implements Comp
public HRegionInfo(final byte[] tableName, final byte[] startKey, final byte[] endKey,
final boolean split)
throws IllegalArgumentException {
- this(tableName, startKey, endKey, split, EnvironmentEdgeManager.currentTimeMillis());
+ this(tableName, startKey, endKey, split, System.currentTimeMillis());
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HServerInfo.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HServerInfo.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HServerInfo.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HServerInfo.java Sat Sep 29 17:57:49 2012
@@ -24,7 +24,6 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.VersionedWritable;
import org.apache.hadoop.io.WritableComparable;
@@ -57,7 +56,7 @@ implements WritableComparable<HServerInf
* @param webuiport Port the webui runs on.
*/
public HServerInfo(final HServerAddress serverAddress, final int webuiport) {
- this(serverAddress, EnvironmentEdgeManager.currentTimeMillis(), webuiport);
+ this(serverAddress, System.currentTimeMillis(), webuiport);
}
public HServerInfo(HServerAddress serverAddress, long startCode,
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTransition.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTransition.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTransition.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTransition.java Sat Sep 29 17:57:49 2012
@@ -23,7 +23,6 @@ import org.apache.hadoop.hbase.executor.
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import com.google.protobuf.ByteString;
import com.google.protobuf.InvalidProtocolBufferException;
@@ -109,7 +108,7 @@ public class RegionTransition {
ZooKeeperProtos.RegionTransition.Builder builder = ZooKeeperProtos.RegionTransition.newBuilder().
setEventTypeCode(type.getCode()).setRegionName(ByteString.copyFrom(regionName)).
setOriginServerName(pbsn);
- builder.setCreateTime(EnvironmentEdgeManager.currentTimeMillis());
+ builder.setCreateTime(System.currentTimeMillis());
if (payload != null) builder.setPayload(ByteString.copyFrom(payload));
return new RegionTransition(builder.build());
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java Sat Sep 29 17:57:49 2012
@@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.client.Re
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.MetaNodeTracker;
import org.apache.hadoop.hbase.zookeeper.RootRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -471,10 +470,10 @@ public class CatalogTracker {
*/
public ServerName waitForMeta(long timeout)
throws InterruptedException, IOException, NotAllMetaRegionsOnlineException {
- long stop = EnvironmentEdgeManager.currentTimeMillis() + timeout;
+ long stop = System.currentTimeMillis() + timeout;
long waitTime = Math.min(50, timeout);
synchronized (metaAvailable) {
- while(!stopped && (timeout == 0 || EnvironmentEdgeManager.currentTimeMillis() < stop)) {
+ while(!stopped && (timeout == 0 || System.currentTimeMillis() < stop)) {
if (getMetaServerConnection() != null) {
return metaLocation;
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java Sat Sep 29 17:57:49 2012
@@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.UnknownSc
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.DataOutputBuffer;
/**
@@ -98,7 +97,7 @@ public class ClientScanner extends Abstr
}
this.scan = scan;
this.tableName = tableName;
- this.lastNext = EnvironmentEdgeManager.currentTimeMillis();
+ this.lastNext = System.currentTimeMillis();
this.connection = connection;
if (scan.getMaxResultSize() > 0) {
this.maxScannerResultSize = scan.getMaxResultSize();
@@ -286,8 +285,8 @@ public class ClientScanner extends Abstr
// If we are over the timeout, throw this exception to the client
// Else, it's because the region moved and we used the old id
// against the new region server; reset the scanner.
- if (timeout < EnvironmentEdgeManager.currentTimeMillis()) {
- long elapsed = EnvironmentEdgeManager.currentTimeMillis() - lastNext;
+ if (timeout < System.currentTimeMillis()) {
+ long elapsed = System.currentTimeMillis() - lastNext;
ScannerTimeoutException ex = new ScannerTimeoutException(
elapsed + "ms passed since the last invocation, " +
"timeout is currently set to " + scannerTimeout);
@@ -314,7 +313,7 @@ public class ClientScanner extends Abstr
callable = null;
continue;
}
- long currentTime = EnvironmentEdgeManager.currentTimeMillis();
+ long currentTime = System.currentTimeMillis();
if (this.scanMetrics != null ) {
this.scanMetrics.sumOfMillisSecBetweenNexts.inc(currentTime-lastNext);
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java Sat Sep 29 17:57:49 2012
@@ -54,7 +54,7 @@ import java.util.Map;
* Specifying timestamps, deleteFamily and deleteColumns will delete all
* versions with a timestamp less than or equal to that passed. If no
* timestamp is specified, an entry is added with a timestamp of 'now'
- * where 'now' is the servers's EnvironmentEdgeManager.currentTimeMillis().
+ * where 'now' is the servers's System.currentTimeMillis().
* Specifying a timestamp to the deleteColumn method will
* delete versions only with a timestamp equal to that specified.
* If no timestamp is passed to deleteColumn, internally, it figures the
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java Sat Sep 29 17:57:49 2012
@@ -86,7 +86,6 @@ import org.apache.hadoop.hbase.protobuf.
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.SoftValueSortedMap;
import org.apache.hadoop.hbase.util.Triple;
@@ -1447,7 +1446,7 @@ public class HConnectionManager {
--keepAliveZookeeperUserCount;
if (keepAliveZookeeperUserCount <=0 ){
keepZooKeeperWatcherAliveUntil =
- EnvironmentEdgeManager.currentTimeMillis() + keepAlive;
+ System.currentTimeMillis() + keepAlive;
}
}
}
@@ -1485,7 +1484,7 @@ public class HConnectionManager {
}
protected void closeMasterProtocol(MasterProtocolState protocolState) {
- if (EnvironmentEdgeManager.currentTimeMillis() > protocolState.keepAliveUntil) {
+ if (System.currentTimeMillis() > protocolState.keepAliveUntil) {
hci.closeMasterProtocol(protocolState);
protocolState.keepAliveUntil = Long.MAX_VALUE;
}
@@ -1495,7 +1494,7 @@ public class HConnectionManager {
protected void chore() {
synchronized (hci.masterAndZKLock) {
if (hci.canCloseZKW) {
- if (EnvironmentEdgeManager.currentTimeMillis() >
+ if (System.currentTimeMillis() >
hci.keepZooKeeperWatcherAliveUntil) {
hci.closeZooKeeperWatcher();
@@ -1660,7 +1659,7 @@ public class HConnectionManager {
--protocolState.userCount;
if (protocolState.userCount <= 0) {
protocolState.keepAliveUntil =
- EnvironmentEdgeManager.currentTimeMillis() + keepAlive;
+ System.currentTimeMillis() + keepAlive;
}
}
}
@@ -2097,12 +2096,12 @@ public class HConnectionManager {
final Callable<MultiResponse> delegate = hci.createCallable(loc, multi, tableName);
return new Callable<MultiResponse>() {
- private final long creationTime = EnvironmentEdgeManager.currentTimeMillis();
+ private final long creationTime = System.currentTimeMillis();
@Override
public MultiResponse call() throws Exception {
try {
- final long waitingTime = delay + creationTime - EnvironmentEdgeManager.currentTimeMillis();
+ final long waitingTime = delay + creationTime - System.currentTimeMillis();
if (waitingTime > 0) {
Thread.sleep(waitingTime);
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java Sat Sep 29 17:57:49 2012
@@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.ServerNam
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.PairOfSameType;
/**
@@ -410,7 +409,7 @@ public class MetaScanner {
HRegionInfo splitB = daughters.getSecond();
HTable metaTable = getMetaTable();
- long start = EnvironmentEdgeManager.currentTimeMillis();
+ long start = System.currentTimeMillis();
Result resultA = getRegionResultBlocking(metaTable, blockingTimeout,
splitA.getRegionName());
if (resultA != null) {
@@ -420,7 +419,7 @@ public class MetaScanner {
throw new RegionOfflineException("Split daughter region " +
splitA.getRegionNameAsString() + " cannot be found in META.");
}
- long rem = blockingTimeout - (EnvironmentEdgeManager.currentTimeMillis() - start);
+ long rem = blockingTimeout - (System.currentTimeMillis() - start);
Result resultB = getRegionResultBlocking(metaTable, rem,
splitB.getRegionName());
@@ -441,8 +440,8 @@ public class MetaScanner {
if (LOG.isDebugEnabled()) {
LOG.debug("blocking until region is in META: " + Bytes.toStringBinary(regionName));
}
- long start = EnvironmentEdgeManager.currentTimeMillis();
- while (EnvironmentEdgeManager.currentTimeMillis() - start < timeout) {
+ long start = System.currentTimeMillis();
+ while (System.currentTimeMillis() - start < timeout) {
Get get = new Get(regionName);
Result result = metaTable.get(get);
HRegionInfo info = getHRegionInfo(result);
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java Sat Sep 29 17:57:49 2012
@@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.protobuf.
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.DNS;
@@ -142,10 +141,10 @@ public class ScannerCallable extends Ser
RequestConverter.buildScanRequest(scannerId, caching, false);
try {
ScanResponse response = server.scan(null, request);
- long timestamp = EnvironmentEdgeManager.currentTimeMillis();
+ long timestamp = System.currentTimeMillis();
rrs = ResponseConverter.getResults(response);
if (logScannerActivity) {
- long now = EnvironmentEdgeManager.currentTimeMillis();
+ long now = System.currentTimeMillis();
if (now - timestamp > logCutOffLatency) {
int rows = rrs == null ? 0 : rrs.length;
LOG.info("Took " + (now-timestamp) + "ms to fetch "
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java Sat Sep 29 17:57:49 2012
@@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.HRegionLo
import org.apache.hadoop.hbase.client.ClientProtocol;
import org.apache.hadoop.hbase.ipc.HBaseRPC;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.ipc.RemoteException;
import com.google.protobuf.ServiceException;
@@ -115,12 +114,12 @@ public abstract class ServerCallable<T>
public void beforeCall() {
HBaseRPC.setRpcTimeout(this.callTimeout);
- this.startTime = EnvironmentEdgeManager.currentTimeMillis();
+ this.startTime = System.currentTimeMillis();
}
public void afterCall() {
HBaseRPC.resetRpcTimeout();
- this.endTime = EnvironmentEdgeManager.currentTimeMillis();
+ this.endTime = System.currentTimeMillis();
}
public void shouldRetry(Throwable throwable) throws IOException {
@@ -183,7 +182,7 @@ public abstract class ServerCallable<T>
}
RetriesExhaustedException.ThrowableWithExtraContext qt =
new RetriesExhaustedException.ThrowableWithExtraContext(t,
- EnvironmentEdgeManager.currentTimeMillis(), toString());
+ System.currentTimeMillis(), toString());
exceptions.add(qt);
if (tries == numRetries - 1) {
throw new RetriesExhaustedException(tries, exceptions);
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java Sat Sep 29 17:57:49 2012
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.client.co
import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.SortedCopyOnWriteSet;
import org.apache.hadoop.hbase.util.VersionInfo;
import org.apache.hadoop.hbase.Server;
@@ -188,7 +187,7 @@ public abstract class CoprocessorHost<E
FileSystem fs = path.getFileSystem(HBaseConfiguration.create());
Path dst = new Path(System.getProperty("java.io.tmpdir") +
java.io.File.separator +"." + pathPrefix +
- "." + className + "." + EnvironmentEdgeManager.currentTimeMillis() + ".jar");
+ "." + className + "." + System.currentTimeMillis() + ".jar");
fs.copyToLocalFile(path, dst);
File tmpLocal = new File(dst.toString());
tmpLocal.deleteOnExit();
@@ -214,7 +213,7 @@ public abstract class CoprocessorHost<E
if (entry.getName().matches("/lib/[^/]+\\.jar")) {
File file = new File(System.getProperty("java.io.tmpdir") +
java.io.File.separator +"." + pathPrefix +
- "." + className + "." + EnvironmentEdgeManager.currentTimeMillis() + "." + entry.getName().substring(5));
+ "." + className + "." + System.currentTimeMillis() + "." + entry.getName().substring(5));
IOUtils.copyBytes(jarFile.getInputStream(entry), new FileOutputStream(file), conf, true);
file.deleteOnExit();
paths.add(file.toURL());
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java Sat Sep 29 17:57:49 2012
@@ -266,7 +266,7 @@ public class HBaseClient {
protected Call(RpcRequestBody param) {
this.param = param;
- this.startTime = EnvironmentEdgeManager.currentTimeMillis();
+ this.startTime = System.currentTimeMillis();
synchronized (HBaseClient.this) {
this.id = counter++;
}
@@ -432,7 +432,7 @@ public class HBaseClient {
/** Update lastActivity with the current time. */
protected void touch() {
- lastActivity.set(EnvironmentEdgeManager.currentTimeMillis());
+ lastActivity.set(System.currentTimeMillis());
}
/**
@@ -604,7 +604,7 @@ public class HBaseClient {
protected synchronized boolean waitForWork() {
if (calls.isEmpty() && !shouldCloseConnection.get() && running.get()) {
long timeout = maxIdleTime-
- (EnvironmentEdgeManager.currentTimeMillis()-lastActivity.get());
+ (System.currentTimeMillis()-lastActivity.get());
if (timeout>0) {
try {
wait(timeout);
@@ -634,7 +634,7 @@ public class HBaseClient {
* since last I/O activity is equal to or greater than the ping interval
*/
protected synchronized void sendPing() throws IOException {
- long curTime = EnvironmentEdgeManager.currentTimeMillis();
+ long curTime = System.currentTimeMillis();
if ( curTime - lastActivity.get() >= pingInterval) {
lastActivity.set(curTime);
//noinspection SynchronizeOnNonFinalField
@@ -1056,7 +1056,7 @@ public class HBaseClient {
Iterator<Entry<Integer, Call>> itor = calls.entrySet().iterator();
while (itor.hasNext()) {
Call c = itor.next().getValue();
- long waitTime = EnvironmentEdgeManager.currentTimeMillis() - c.getStartTime();
+ long waitTime = System.currentTimeMillis() - c.getStartTime();
if (waitTime >= rpcTimeout) {
if (this.closeException == null) {
// There may be no exception in the case that there are many calls
@@ -1080,7 +1080,7 @@ public class HBaseClient {
try {
if (!calls.isEmpty()) {
Call firstCall = calls.get(calls.firstKey());
- long maxWaitTime = EnvironmentEdgeManager.currentTimeMillis() - firstCall.getStartTime();
+ long maxWaitTime = System.currentTimeMillis() - firstCall.getStartTime();
if (maxWaitTime < rpcTimeout) {
rpcTimeout -= maxWaitTime;
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java Sat Sep 29 17:57:49 2012
@@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.DoNotRetr
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.ReflectionUtils;
@@ -225,7 +224,7 @@ public class HBaseRPC {
long timeout
) throws IOException {
// HBase does limited number of reconnects which is different from hadoop.
- long startTime = EnvironmentEdgeManager.currentTimeMillis();
+ long startTime = System.currentTimeMillis();
IOException ioe;
int reconnectAttempts = 0;
while (true) {
@@ -258,7 +257,7 @@ public class HBaseRPC {
}
}
// check if timed out
- if (EnvironmentEdgeManager.currentTimeMillis() - timeout >= startTime) {
+ if (System.currentTimeMillis() - timeout >= startTime) {
throw ioe;
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java Sat Sep 29 17:57:49 2012
@@ -84,7 +84,6 @@ import org.apache.hadoop.hbase.security.
import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler;
import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslStatus;
import org.apache.hadoop.hbase.util.ByteBufferOutputStream;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Writable;
@@ -334,7 +333,7 @@ public abstract class HBaseServer implem
this.id = id;
this.rpcRequestBody = rpcRequestBody;
this.connection = connection;
- this.timestamp = EnvironmentEdgeManager.currentTimeMillis();
+ this.timestamp = System.currentTimeMillis();
this.response = null;
this.delayResponse = false;
this.responder = responder;
@@ -465,7 +464,7 @@ public abstract class HBaseServer implem
@Override
public void throwExceptionIfCallerDisconnected() throws CallerDisconnectedException {
if (!connection.channel.isOpen()) {
- long afterTime = EnvironmentEdgeManager.currentTimeMillis() - timestamp;
+ long afterTime = System.currentTimeMillis() - timestamp;
throw new CallerDisconnectedException(
"Aborting call " + this + " after " + afterTime + " ms, since " +
"caller disconnected");
@@ -617,7 +616,7 @@ public abstract class HBaseServer implem
*/
private void cleanupConnections(boolean force) {
if (force || numConnections > thresholdIdleConnections) {
- long currentTime = EnvironmentEdgeManager.currentTimeMillis();
+ long currentTime = System.currentTimeMillis();
if (!force && (currentTime - lastCleanupRunTime) < cleanupInterval) {
return;
}
@@ -654,7 +653,7 @@ public abstract class HBaseServer implem
}
else i++;
}
- lastCleanupRunTime = EnvironmentEdgeManager.currentTimeMillis();
+ lastCleanupRunTime = System.currentTimeMillis();
}
}
@@ -752,7 +751,7 @@ public abstract class HBaseServer implem
try {
reader.startAdd();
SelectionKey readKey = reader.registerChannel(channel);
- c = getConnection(channel, EnvironmentEdgeManager.currentTimeMillis());
+ c = getConnection(channel, System.currentTimeMillis());
readKey.attach(c);
synchronized (connectionList) {
connectionList.add(numConnections, c);
@@ -775,7 +774,7 @@ public abstract class HBaseServer implem
if (c == null) {
return;
}
- c.setLastContact(EnvironmentEdgeManager.currentTimeMillis());
+ c.setLastContact(System.currentTimeMillis());
try {
count = c.readAndProcess();
@@ -794,7 +793,7 @@ public abstract class HBaseServer implem
// c = null;
}
else {
- c.setLastContact(EnvironmentEdgeManager.currentTimeMillis());
+ c.setLastContact(System.currentTimeMillis());
}
}
@@ -868,7 +867,7 @@ public abstract class HBaseServer implem
LOG.info(getName() + ": doAsyncWrite threw exception " + e);
}
}
- long now = EnvironmentEdgeManager.currentTimeMillis();
+ long now = System.currentTimeMillis();
if (now < lastPurgeTime + purgeTimeout) {
continue;
}
@@ -1023,7 +1022,7 @@ public abstract class HBaseServer implem
if (inHandler) {
// set the serve time when the response has to be sent later
- call.timestamp = EnvironmentEdgeManager.currentTimeMillis();
+ call.timestamp = System.currentTimeMillis();
if (enqueueInSelector(call))
done = true;
}
@@ -1071,7 +1070,7 @@ public abstract class HBaseServer implem
//
void doRespond(Call call) throws IOException {
// set the serve time when the response has to be sent later
- call.timestamp = EnvironmentEdgeManager.currentTimeMillis();
+ call.timestamp = System.currentTimeMillis();
responseQueueLen++;
boolean doRegister = false;
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcEngine.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcEngine.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcEngine.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcEngine.java Sat Sep 29 17:57:49 2012
@@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.security.
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Objects;
import org.codehaus.jackson.map.ObjectMapper;
@@ -170,7 +169,7 @@ class ProtobufRpcEngine implements RpcEn
throws ServiceException {
long startTime = 0;
if (LOG.isDebugEnabled()) {
- startTime = EnvironmentEdgeManager.currentTimeMillis();
+ startTime = System.currentTimeMillis();
}
RpcRequestBody rpcRequest = constructRpcRequest(method, args);
@@ -179,7 +178,7 @@ class ProtobufRpcEngine implements RpcEn
val = client.call(rpcRequest, address, protocol, ticket, rpcTimeout);
if (LOG.isDebugEnabled()) {
- long callTime = EnvironmentEdgeManager.currentTimeMillis() - startTime;
+ long callTime = System.currentTimeMillis() - startTime;
if (LOG.isTraceEnabled()) LOG.trace("Call: " + method.getName() + " " + callTime);
}
return val;
@@ -351,7 +350,7 @@ class ProtobufRpcEngine implements RpcEn
throw new HBaseRPC.UnknownProtocolException(protocol);
}
- long startTime = EnvironmentEdgeManager.currentTimeMillis();
+ long startTime = System.currentTimeMillis();
if (method.getParameterTypes().length == 2) {
// RpcController + Message in the method args
// (generated code from RPC bits in .proto files have RpcController)
@@ -364,7 +363,7 @@ class ProtobufRpcEngine implements RpcEn
+ method.getName() + "]" + ", allowed (at most): 2, Actual: "
+ method.getParameterTypes().length);
}
- int processingTime = (int) (EnvironmentEdgeManager.currentTimeMillis() - startTime);
+ int processingTime = (int) (System.currentTimeMillis() - startTime);
int qTime = (int) (startTime-receiveTime);
if (TRACELOG.isDebugEnabled()) {
TRACELOG.debug("Call #" + CurCall.get().id +
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java Sat Sep 29 17:57:49 2012
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.filter.Fi
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.util.StringUtils;
@@ -93,7 +92,7 @@ public class TableRecordReaderImpl {
}
if (logScannerActivity) {
LOG.info("Current scan=" + currentScan.toString());
- timestamp = EnvironmentEdgeManager.currentTimeMillis();
+ timestamp = System.currentTimeMillis();
rowcount = 0;
}
}
@@ -198,7 +197,7 @@ public class TableRecordReaderImpl {
if (logScannerActivity) {
rowcount ++;
if (rowcount >= logPerRowCount) {
- long now = EnvironmentEdgeManager.currentTimeMillis();
+ long now = System.currentTimeMillis();
LOG.info("Mapper took " + (now-timestamp)
+ "ms to process " + rowcount + " rows");
timestamp = now;
@@ -233,7 +232,7 @@ public class TableRecordReaderImpl {
return false;
} catch (IOException ioe) {
if (logScannerActivity) {
- long now = EnvironmentEdgeManager.currentTimeMillis();
+ long now = System.currentTimeMillis();
LOG.info("Mapper took " + (now-timestamp)
+ "ms to process " + rowcount + " rows");
LOG.info(ioe);
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java Sat Sep 29 17:57:49 2012
@@ -57,7 +57,6 @@ import org.apache.hadoop.hbase.regionser
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.WritableUtils;
@@ -127,7 +126,7 @@ public class HFileOutputFormat extends F
private final Map<byte [], WriterLength> writers =
new TreeMap<byte [], WriterLength>(Bytes.BYTES_COMPARATOR);
private byte [] previousRow = HConstants.EMPTY_BYTE_ARRAY;
- private final byte [] now = Bytes.toBytes(EnvironmentEdgeManager.currentTimeMillis());
+ private final byte [] now = Bytes.toBytes(System.currentTimeMillis());
private boolean rollRequested = false;
public void write(ImmutableBytesWritable row, KeyValue kv)
@@ -214,7 +213,7 @@ public class HFileOutputFormat extends F
private void close(final HFile.Writer w) throws IOException {
if (w != null) {
w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY,
- Bytes.toBytes(EnvironmentEdgeManager.currentTimeMillis()));
+ Bytes.toBytes(System.currentTimeMillis()));
w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,
Bytes.toBytes(context.getTaskAttemptID().toString()));
w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY,
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java Sat Sep 29 17:57:49 2012
@@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.mapreduce;
import org.apache.hadoop.hbase.util.Base64;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import java.io.IOException;
import java.util.ArrayList;
@@ -418,7 +417,7 @@ public class ImportTsv {
// If timestamp option is not specified, use current system time.
long timstamp = conf
- .getLong(TIMESTAMP_CONF_KEY, EnvironmentEdgeManager.currentTimeMillis());
+ .getLong(TIMESTAMP_CONF_KEY, System.currentTimeMillis());
// Set it back to replace invalid timestamp (non-numeric) with current
// system time
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java Sat Sep 29 17:57:49 2012
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.client.Sc
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.InputSplit;
@@ -85,7 +84,7 @@ public class TableRecordReaderImpl {
this.scanner = this.htable.getScanner(currentScan);
if (logScannerActivity) {
LOG.info("Current scan=" + currentScan.toString());
- timestamp = EnvironmentEdgeManager.currentTimeMillis();
+ timestamp = System.currentTimeMillis();
rowcount = 0;
}
}
@@ -196,7 +195,7 @@ public class TableRecordReaderImpl {
if (logScannerActivity) {
rowcount ++;
if (rowcount >= logPerRowCount) {
- long now = EnvironmentEdgeManager.currentTimeMillis();
+ long now = System.currentTimeMillis();
LOG.info("Mapper took " + (now-timestamp)
+ "ms to process " + rowcount + " rows");
timestamp = now;
@@ -232,7 +231,7 @@ public class TableRecordReaderImpl {
return false;
} catch (IOException ioe) {
if (logScannerActivity) {
- long now = EnvironmentEdgeManager.currentTimeMillis();
+ long now = System.currentTimeMillis();
LOG.info("Mapper took " + (now-timestamp)
+ "ms to process " + rowcount + " rows");
LOG.info(ioe);
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java Sat Sep 29 17:57:49 2012
@@ -66,7 +66,6 @@ import org.apache.hadoop.hbase.master.me
import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.KeyLocker;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
@@ -666,7 +665,7 @@ public class AssignmentManager extends Z
Lock lock = locker.acquireLock(encodedName);
try {
// Printing if the event was created a long time ago helps debugging
- boolean lateEvent = createTime < (EnvironmentEdgeManager.currentTimeMillis() - 15000);
+ boolean lateEvent = createTime < (System.currentTimeMillis() - 15000);
RegionState regionState = regionStates.getRegionTransitionState(encodedName);
LOG.debug("Handling transition=" + rt.getEventType() +
", server=" + sn + ", region=" +
@@ -1216,7 +1215,7 @@ public class AssignmentManager extends Z
try {
// Send OPEN RPC. If it fails on a IOE or RemoteException, the
// TimeoutMonitor will pick up the pieces.
- long maxWaitTime = EnvironmentEdgeManager.currentTimeMillis() +
+ long maxWaitTime = System.currentTimeMillis() +
this.server.getConfiguration().
getLong("hbase.regionserver.rpc.startup.waittime", 60000);
while (!this.server.isStopped()) {
@@ -1245,7 +1244,7 @@ public class AssignmentManager extends Z
} else if (decodedException instanceof ServerNotRunningYetException) {
// This is the one exception to retry. For all else we should just fail
// the startup.
- long now = EnvironmentEdgeManager.currentTimeMillis();
+ long now = System.currentTimeMillis();
if (now > maxWaitTime) throw e;
LOG.debug("Server is not yet up; waiting up to " +
(maxWaitTime - now) + "ms", e);
@@ -1339,7 +1338,7 @@ public class AssignmentManager extends Z
// call to open risks our writing PENDING_OPEN after state has been moved
// to OPENING by the regionserver.
regionStates.updateRegionState(state.getRegion(),
- RegionState.State.PENDING_OPEN, EnvironmentEdgeManager.currentTimeMillis(),
+ RegionState.State.PENDING_OPEN, System.currentTimeMillis(),
destination);
this.counter.addAndGet(1);
}
@@ -1442,7 +1441,7 @@ public class AssignmentManager extends Z
" to " + plan.getDestination().toString());
// Transition RegionState to PENDING_OPEN
regionStates.updateRegionState(state.getRegion(),
- RegionState.State.PENDING_OPEN, EnvironmentEdgeManager.currentTimeMillis(),
+ RegionState.State.PENDING_OPEN, System.currentTimeMillis(),
plan.getDestination());
// Send OPEN RPC. This can fail if the server on other end is is not up.
// Pass the version that was obtained while setting the node to OFFLINE.
@@ -2100,10 +2099,10 @@ public class AssignmentManager extends Z
// that if it returns without an exception that there was a period of time
// with no regions in transition from the point-of-view of the in-memory
// state of the Master.
- final long endTime = EnvironmentEdgeManager.currentTimeMillis() + timeout;
+ final long endTime = System.currentTimeMillis() + timeout;
while (!this.server.isStopped() && regionStates.isRegionsInTransition()
- && endTime > EnvironmentEdgeManager.currentTimeMillis()) {
+ && endTime > System.currentTimeMillis()) {
regionStates.waitForUpdate(100);
}
@@ -2300,7 +2299,7 @@ public class AssignmentManager extends Z
* on a frequent interval.
*/
public void updateRegionsInTransitionMetrics() {
- long currentTime = EnvironmentEdgeManager.currentTimeMillis();
+ long currentTime = System.currentTimeMillis();
int totalRITs = 0;
int totalRITsOverThreshold = 0;
long oldestRITTime = 0;
@@ -2431,7 +2430,7 @@ public class AssignmentManager extends Z
boolean noRSAvailable = this.serverManager.createDestinationServersList().isEmpty();
// Iterate all regions in transition checking for time outs
- long now = EnvironmentEdgeManager.currentTimeMillis();
+ long now = System.currentTimeMillis();
// no lock concurrent access ok: we will be working on a copy, and it's java-valid to do
// a copy while another thread is adding/removing items
for (RegionState regionState : regionStates.getRegionsInTransition().values()) {
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java Sat Sep 29 17:57:49 2012
@@ -36,7 +36,6 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
* Run bulk assign. Does one RCP per regionserver passing a
@@ -104,10 +103,10 @@ public class GeneralBulkAssigner extends
pool.shutdown(); // no more task allowed
int serverCount = bulkPlan.size();
int regionCount = regionSet.size();
- long startTime = EnvironmentEdgeManager.currentTimeMillis();
+ long startTime = System.currentTimeMillis();
long rpcWaitTime = startTime + timeout;
while (!server.isStopped() && !pool.isTerminated()
- && rpcWaitTime > EnvironmentEdgeManager.currentTimeMillis()) {
+ && rpcWaitTime > System.currentTimeMillis()) {
if (failedPlans.isEmpty()) {
pool.awaitTermination(100, TimeUnit.MILLISECONDS);
} else {
@@ -116,7 +115,7 @@ public class GeneralBulkAssigner extends
}
if (!pool.isTerminated()) {
LOG.warn("bulk assigner is still running after "
- + (EnvironmentEdgeManager.currentTimeMillis() - startTime) + "ms, shut it down now");
+ + (System.currentTimeMillis() - startTime) + "ms, shut it down now");
// some assigner hangs, can't wait any more, shutdown the pool now
List<Runnable> notStarted = pool.shutdownNow();
if (notStarted != null && !notStarted.isEmpty()) {
@@ -134,11 +133,11 @@ public class GeneralBulkAssigner extends
Configuration conf = server.getConfiguration();
long perRegionOpenTimeGuesstimate =
conf.getLong("hbase.bulk.assignment.perregion.open.time", 1000);
- long endTime = Math.max(EnvironmentEdgeManager.currentTimeMillis(), rpcWaitTime)
+ long endTime = Math.max(System.currentTimeMillis(), rpcWaitTime)
+ perRegionOpenTimeGuesstimate * (reassigningRegions + 1);
RegionStates regionStates = assignmentManager.getRegionStates();
// We're not synchronizing on regionsInTransition now because we don't use any iterator.
- while (!regionSet.isEmpty() && !server.isStopped() && endTime > EnvironmentEdgeManager.currentTimeMillis()) {
+ while (!regionSet.isEmpty() && !server.isStopped() && endTime > System.currentTimeMillis()) {
Iterator<HRegionInfo> regionInfoIterator = regionSet.iterator();
while (regionInfoIterator.hasNext()) {
HRegionInfo hri = regionInfoIterator.next();
@@ -154,7 +153,7 @@ public class GeneralBulkAssigner extends
}
if (LOG.isDebugEnabled()) {
- long elapsedTime = EnvironmentEdgeManager.currentTimeMillis() - startTime;
+ long elapsedTime = System.currentTimeMillis() - startTime;
String status = "successfully";
if (!regionSet.isEmpty()) {
status = "with " + regionSet.size() + " regions still not assigned yet";
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Sat Sep 29 17:57:49 2012
@@ -164,7 +164,6 @@ import org.apache.hadoop.hbase.replicati
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CompressionTest;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.apache.hadoop.hbase.util.HasThread;
@@ -356,7 +355,7 @@ Server {
// Set our address.
this.isa = this.rpcServer.getListenerAddress();
this.serverName = new ServerName(this.isa.getHostName(),
- this.isa.getPort(), EnvironmentEdgeManager.currentTimeMillis());
+ this.isa.getPort(), System.currentTimeMillis());
this.rsFatals = new MemoryBoundedLogMessageBuffer(
conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));
@@ -432,7 +431,7 @@ Server {
MonitoredTask startupStatus =
TaskMonitor.get().createStatus("Master startup");
startupStatus.setDescription("Master startup");
- masterStartTime = EnvironmentEdgeManager.currentTimeMillis();
+ masterStartTime = System.currentTimeMillis();
try {
/*
* Block on becoming the active master.
@@ -572,10 +571,10 @@ Server {
long lastMsgTs = 0l;
long now = 0l;
while (!this.stopped) {
- now = EnvironmentEdgeManager.currentTimeMillis();
+ now = System.currentTimeMillis();
if ((now - lastMsgTs) >= this.msgInterval) {
doMetrics();
- lastMsgTs = EnvironmentEdgeManager.currentTimeMillis();
+ lastMsgTs = System.currentTimeMillis();
}
stopSleeper.sleep();
}
@@ -626,7 +625,7 @@ Server {
*/
status.setStatus("Initializing Master file system");
- this.masterActiveTime = EnvironmentEdgeManager.currentTimeMillis();
+ this.masterActiveTime = System.currentTimeMillis();
// TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
this.fileSystemManager = new MasterFileSystem(this, this, metrics, masterRecovery);
@@ -1266,7 +1265,7 @@ Server {
if (!this.loadBalancerTracker.isBalancerOn()) return false;
// Do this call outside of synchronized block.
int maximumBalanceTime = getBalancerCutoffTime();
- long cutoffTime = EnvironmentEdgeManager.currentTimeMillis() + maximumBalanceTime;
+ long cutoffTime = System.currentTimeMillis() + maximumBalanceTime;
boolean balancerRan;
synchronized (this.balancer) {
// Only allow one balance run at at time.
@@ -1312,13 +1311,13 @@ Server {
if (plans != null && !plans.isEmpty()) {
for (RegionPlan plan: plans) {
LOG.info("balance " + plan);
- long balStartTime = EnvironmentEdgeManager.currentTimeMillis();
+ long balStartTime = System.currentTimeMillis();
this.assignmentManager.balance(plan);
- totalRegPlanExecTime += EnvironmentEdgeManager.currentTimeMillis()-balStartTime;
+ totalRegPlanExecTime += System.currentTimeMillis()-balStartTime;
rpCount++;
if (rpCount < plans.size() &&
// if performing next balance exceeds cutoff time, exit the loop
- (EnvironmentEdgeManager.currentTimeMillis() + (totalRegPlanExecTime / rpCount)) > cutoffTime) {
+ (System.currentTimeMillis() + (totalRegPlanExecTime / rpCount)) > cutoffTime) {
LOG.debug("No more balancing till next balance run; maximumBalanceTime=" +
maximumBalanceTime);
break;
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionState.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionState.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionState.java Sat Sep 29 17:57:49 2012
@@ -27,7 +27,6 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
* State of a Region while undergoing transitions.
@@ -56,11 +55,11 @@ public class RegionState implements org.
private volatile State state;
public RegionState() {
- this.stamp = new AtomicLong(EnvironmentEdgeManager.currentTimeMillis());
+ this.stamp = new AtomicLong(System.currentTimeMillis());
}
public RegionState(HRegionInfo region, State state) {
- this(region, state, EnvironmentEdgeManager.currentTimeMillis(), null);
+ this(region, state, System.currentTimeMillis(), null);
}
public RegionState(HRegionInfo region,
@@ -72,7 +71,7 @@ public class RegionState implements org.
}
public void updateTimestampToNow() {
- this.stamp.set(EnvironmentEdgeManager.currentTimeMillis());
+ this.stamp.set(System.currentTimeMillis());
}
public State getState() {
@@ -140,7 +139,7 @@ public class RegionState implements org.
*/
public String toDescriptiveString() {
long lstamp = stamp.get();
- long relTime = EnvironmentEdgeManager.currentTimeMillis() - lstamp;
+ long relTime = System.currentTimeMillis() - lstamp;
return region.getRegionNameAsString()
+ " state=" + state
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java Sat Sep 29 17:57:49 2012
@@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.ServerNam
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
/**
@@ -212,7 +211,7 @@ public class RegionStates {
*/
public synchronized RegionState updateRegionState(
final HRegionInfo hri, final State state, final ServerName serverName) {
- return updateRegionState(hri, state, EnvironmentEdgeManager.currentTimeMillis(), serverName);
+ return updateRegionState(hri, state, System.currentTimeMillis(), serverName);
}
/**
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java Sat Sep 29 17:57:49 2012
@@ -60,7 +60,6 @@ import org.apache.hadoop.hbase.protobuf.
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import com.google.protobuf.ServiceException;
@@ -288,7 +287,7 @@ public class ServerManager {
*/
private void checkClockSkew(final ServerName serverName, final long serverCurrentTime)
throws ClockOutOfSyncException {
- long skew = EnvironmentEdgeManager.currentTimeMillis() - serverCurrentTime;
+ long skew = System.currentTimeMillis() - serverCurrentTime;
if (skew > maxSkew) {
String message = "Server " + serverName + " has been " +
"rejected; Reported time is too far out of sync with master. " +
@@ -409,7 +408,7 @@ public class ServerManager {
long previousLogTime = 0;
while (!onlineServers.isEmpty()) {
- if (EnvironmentEdgeManager.currentTimeMillis() > (previousLogTime + 1000)) {
+ if (System.currentTimeMillis() > (previousLogTime + 1000)) {
StringBuilder sb = new StringBuilder();
for (ServerName key : this.onlineServers.keySet()) {
if (sb.length() > 0) {
@@ -418,7 +417,7 @@ public class ServerManager {
sb.append(key);
}
LOG.info("Waiting on regionserver(s) to go down " + sb.toString());
- previousLogTime = EnvironmentEdgeManager.currentTimeMillis();
+ previousLogTime = System.currentTimeMillis();
}
synchronized (onlineServers) {
@@ -695,7 +694,7 @@ public class ServerManager {
final int maxToStart = this.master.getConfiguration().
getInt("hbase.master.wait.on.regionservers.maxtostart", Integer.MAX_VALUE);
- long now = EnvironmentEdgeManager.currentTimeMillis();
+ long now = System.currentTimeMillis();
final long startTime = now;
long slept = 0;
long lastLogTime = 0;
@@ -724,7 +723,7 @@ public class ServerManager {
// We sleep for some time
final long sleepTime = 50;
Thread.sleep(sleepTime);
- now = EnvironmentEdgeManager.currentTimeMillis();
+ now = System.currentTimeMillis();
slept = now - startTime;
oldCount = count;
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java Sat Sep 29 17:57:49 2012
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.ServerNam
import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import com.google.common.base.Joiner;
import com.google.common.collect.ArrayListMultimap;
@@ -50,7 +49,7 @@ public abstract class BaseLoadBalancer i
// slop for regions
private float slop;
private Configuration config;
- private static final Random RANDOM = new Random(EnvironmentEdgeManager.currentTimeMillis());
+ private static final Random RANDOM = new Random(System.currentTimeMillis());
private static final Log LOG = LogFactory.getLog(BaseLoadBalancer.class);
protected MasterServices services;
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DefaultLoadBalancer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DefaultLoadBalancer.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DefaultLoadBalancer.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DefaultLoadBalancer.java Sat Sep 29 17:57:49 2012
@@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.HRegionIn
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.RegionPlan;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import com.google.common.collect.MinMaxPriorityQueue;
@@ -57,7 +56,7 @@ import com.google.common.collect.MinMaxP
@InterfaceAudience.Private
public class DefaultLoadBalancer extends BaseLoadBalancer {
private static final Log LOG = LogFactory.getLog(DefaultLoadBalancer.class);
- private static final Random RANDOM = new Random(EnvironmentEdgeManager.currentTimeMillis());
+ private static final Random RANDOM = new Random(System.currentTimeMillis());
private RegionInfoComparator riComparator = new RegionInfoComparator();
private RegionPlan.RegionPlanComparator rpComparator = new RegionPlan.RegionPlanComparator();
@@ -182,7 +181,7 @@ public class DefaultLoadBalancer extends
public List<RegionPlan> balanceCluster(
Map<ServerName, List<HRegionInfo>> clusterMap) {
boolean emptyRegionServerPresent = false;
- long startTime = EnvironmentEdgeManager.currentTimeMillis();
+ long startTime = System.currentTimeMillis();
ClusterLoadState cs = new ClusterLoadState(clusterMap);
@@ -319,7 +318,7 @@ public class DefaultLoadBalancer extends
// If none needed to fill all to min and none left to drain all to max,
// we are done
if (neededRegions == 0 && regionsToMove.isEmpty()) {
- long endTime = EnvironmentEdgeManager.currentTimeMillis();
+ long endTime = System.currentTimeMillis();
LOG.info("Calculated a load balance in " + (endTime-startTime) + "ms. " +
"Moving " + totalNumMoved + " regions off of " +
serversOverloaded + " overloaded servers onto " +
@@ -397,7 +396,7 @@ public class DefaultLoadBalancer extends
}
}
- long endTime = EnvironmentEdgeManager.currentTimeMillis();
+ long endTime = System.currentTimeMillis();
if (!regionsToMove.isEmpty() || neededRegions != 0) {
// Emit data so can diagnose how balancer went astray.
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java Sat Sep 29 17:57:49 2012
@@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.ServerNam
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import java.util.ArrayList;
import java.util.HashMap;
@@ -107,7 +106,7 @@ public class StochasticLoadBalancer exte
private static final String MAX_MOVES_KEY = "hbase.master.balancer.stochastic.maxMoveRegions";
private static final String KEEP_REGION_LOADS = "hbase.master.balancer.stochastic.numRegionLoadsToRemember";
- private static final Random RANDOM = new Random(EnvironmentEdgeManager.currentTimeMillis());
+ private static final Random RANDOM = new Random(System.currentTimeMillis());
private static final Log LOG = LogFactory.getLog(StochasticLoadBalancer.class);
private final RegionLocationFinder regionFinder = new RegionLocationFinder();
private ClusterStatus clusterStatus = null;
@@ -184,7 +183,7 @@ public class StochasticLoadBalancer exte
return null;
}
- long startTime = EnvironmentEdgeManager.currentTimeMillis();
+ long startTime = System.currentTimeMillis();
// Keep track of servers to iterate through them.
List<ServerName> servers = new ArrayList<ServerName>(clusterState.keySet());
@@ -249,7 +248,7 @@ public class StochasticLoadBalancer exte
}
- long endTime = EnvironmentEdgeManager.currentTimeMillis();
+ long endTime = System.currentTimeMillis();
if (initCost > currentCost) {
List<RegionPlan> plans = createRegionPlans(initialRegionMapping, clusterState);
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java Sat Sep 29 17:57:49 2012
@@ -21,7 +21,6 @@ import java.io.IOException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.commons.logging.Log;
@@ -41,7 +40,7 @@ public class TimeToLiveLogCleaner extend
@Override
public boolean isLogDeletable(Path filePath) {
long time = 0;
- long currentTime = EnvironmentEdgeManager.currentTimeMillis();
+ long currentTime = System.currentTimeMillis();
try {
FileStatus fStat = filePath.getFileSystem(this.getConf()).getFileStatus(filePath);
time = fStat.getModificationTime();
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java Sat Sep 29 17:57:49 2012
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.master.HM
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.zookeeper.KeeperException;
@@ -60,8 +59,8 @@ public class DeleteTableHandler extends
long waitTime = server.getConfiguration().
getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
for (HRegionInfo region : regions) {
- long done = EnvironmentEdgeManager.currentTimeMillis() + waitTime;
- while (EnvironmentEdgeManager.currentTimeMillis() < done) {
+ long done = System.currentTimeMillis() + waitTime;
+ while (System.currentTimeMillis() < done) {
if (!am.getRegionStates().isRegionInTransition(region)) break;
Threads.sleep(waitingTimeForEvents);
LOG.debug("Waiting on region to clear regions in transition; "
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java Sat Sep 29 17:57:49 2012
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.master.HM
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.RegionStates;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.zookeeper.KeeperException;
import org.cloudera.htrace.Trace;
@@ -179,14 +178,14 @@ public class DisableTableHandler extends
@Override
protected boolean waitUntilDone(long timeout)
throws InterruptedException {
- long startTime = EnvironmentEdgeManager.currentTimeMillis();
+ long startTime = System.currentTimeMillis();
long remaining = timeout;
List<HRegionInfo> regions = null;
while (!server.isStopped() && remaining > 0) {
Thread.sleep(waitingTimeForEvents);
regions = assignmentManager.getRegionStates().getRegionsOfTable(tableName);
if (regions.isEmpty()) break;
- remaining = timeout - (EnvironmentEdgeManager.currentTimeMillis() - startTime);
+ remaining = timeout - (System.currentTimeMillis() - startTime);
}
return regions != null && regions.isEmpty();
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java Sat Sep 29 17:57:49 2012
@@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.master.Bu
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.zookeeper.KeeperException;
import org.cloudera.htrace.Trace;
@@ -220,7 +219,7 @@ public class EnableTableHandler extends
@Override
protected boolean waitUntilDone(long timeout)
throws InterruptedException {
- long startTime = EnvironmentEdgeManager.currentTimeMillis();
+ long startTime = System.currentTimeMillis();
long remaining = timeout;
List<HRegionInfo> regions = null;
int lastNumberOfRegions = 0;
@@ -235,7 +234,7 @@ public class EnableTableHandler extends
lastNumberOfRegions = regions.size();
timeout += waitingTimeForEvents;
}
- remaining = timeout - (EnvironmentEdgeManager.currentTimeMillis() - startTime);
+ remaining = timeout - (System.currentTimeMillis() - startTime);
}
return isDone(regions);
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java Sat Sep 29 17:57:49 2012
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.metrics;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.metrics.MetricsRecord;
import org.apache.hadoop.metrics.util.MetricsBase;
import org.apache.hadoop.metrics.util.MetricsRegistry;
@@ -44,7 +43,7 @@ public class MetricsRate extends Metrics
super(name, description);
this.value = 0;
this.prevRate = 0;
- this.ts = EnvironmentEdgeManager.currentTimeMillis();
+ this.ts = System.currentTimeMillis();
registry.add(name, this);
}
@@ -61,7 +60,7 @@ public class MetricsRate extends Metrics
}
public synchronized void intervalHeartBeat() {
- long now = EnvironmentEdgeManager.currentTimeMillis();
+ long now = System.currentTimeMillis();
long diff = (now-ts) / 1000;
if (diff < 1){
// To make sure our averages aren't skewed by fast repeated calls,
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java Sat Sep 29 17:57:49 2012
@@ -25,7 +25,6 @@ import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import com.google.common.base.Charsets;
import com.google.common.base.Preconditions;
@@ -55,7 +54,7 @@ public class MemoryBoundedLogMessageBuff
* older messages until the desired memory limit is achieved.
*/
public synchronized void add(String messageText) {
- LogMessage message = new LogMessage(messageText, EnvironmentEdgeManager.currentTimeMillis());
+ LogMessage message = new LogMessage(messageText, System.currentTimeMillis());
usage += message.estimateHeapUsage();
messages.add(message);
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java Sat Sep 29 17:57:49 2012
@@ -23,7 +23,6 @@ import org.apache.hadoop.hbase.client.Op
import org.apache.hadoop.hbase.io.WritableWithSize;
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcRequestBody;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.Writable;
import org.codehaus.jackson.map.ObjectMapper;
@@ -192,7 +191,7 @@ public class MonitoredRPCHandlerImpl ext
long queueTime) {
this.methodName = methodName;
this.params = params;
- this.rpcStartTime = EnvironmentEdgeManager.currentTimeMillis();
+ this.rpcStartTime = System.currentTimeMillis();
this.rpcQueueTime = queueTime;
this.state = State.RUNNING;
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java Sat Sep 29 17:57:49 2012
@@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.monitoring;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.codehaus.jackson.map.ObjectMapper;
import java.io.IOException;
@@ -38,7 +37,7 @@ class MonitoredTaskImpl implements Monit
protected volatile State state = State.RUNNING;
public MonitoredTaskImpl() {
- startTime = EnvironmentEdgeManager.currentTimeMillis();
+ startTime = System.currentTimeMillis();
statusTime = startTime;
stateTime = startTime;
}
@@ -117,12 +116,12 @@ class MonitoredTaskImpl implements Monit
@Override
public void setStatus(String status) {
this.status = status;
- statusTime = EnvironmentEdgeManager.currentTimeMillis();
+ statusTime = System.currentTimeMillis();
}
protected void setState(State state) {
this.state = state;
- stateTime = EnvironmentEdgeManager.currentTimeMillis();
+ stateTime = System.currentTimeMillis();
}
@Override
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java?rev=1391852&r1=1391851&r2=1391852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java Sat Sep 29 17:57:49 2012
@@ -30,7 +30,6 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
@@ -141,12 +140,12 @@ public class TaskMonitor {
private boolean canPurge(MonitoredTask stat) {
long cts = stat.getCompletionTimestamp();
- return (cts > 0 && EnvironmentEdgeManager.currentTimeMillis() - cts > EXPIRATION_TIME);
+ return (cts > 0 && System.currentTimeMillis() - cts > EXPIRATION_TIME);
}
public void dumpAsText(PrintWriter out) {
- long now = EnvironmentEdgeManager.currentTimeMillis();
+ long now = System.currentTimeMillis();
List<MonitoredTask> tasks = getTasks();
for (MonitoredTask task : tasks) {