You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2014/08/30 02:14:41 UTC

[4/4] git commit: HBASE-11822 Convert EnvironmentEdge#getCurrentTimeMillis to getCurrentTime

HBASE-11822 Convert EnvironmentEdge#getCurrentTimeMillis to getCurrentTime


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3bfbd062
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3bfbd062
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3bfbd062

Branch: refs/heads/master
Commit: 3bfbd062915d680f653af5b3747dbcfefbd3df12
Parents: cdfc96f
Author: stack <st...@apache.org>
Authored: Fri Aug 29 17:07:51 2014 -0700
Committer: stack <st...@apache.org>
Committed: Fri Aug 29 17:14:25 2014 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hbase/MetaTableAccessor.java  |  2 +-
 .../hadoop/hbase/client/AsyncProcess.java       | 10 +++----
 .../hadoop/hbase/client/ConnectionManager.java  |  4 +--
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 14 +++++-----
 .../hadoop/hbase/client/HTableMultiplexer.java  |  6 ++---
 .../hadoop/hbase/client/RpcRetryingCaller.java  | 12 ++++-----
 .../RpcRetryingCallerWithReadReplicas.java      |  2 +-
 .../org/apache/hadoop/hbase/ipc/RpcClient.java  | 22 +++++++--------
 .../hbase/util/DefaultEnvironmentEdge.java      |  6 ++---
 .../hadoop/hbase/util/EnvironmentEdge.java      |  9 +++----
 .../hbase/util/EnvironmentEdgeManager.java      |  6 ++---
 .../hbase/util/IncrementingEnvironmentEdge.java |  2 +-
 .../hbase/util/TestEnvironmentEdgeManager.java  | 10 +++----
 .../example/ZooKeeperScanPolicyObserver.java    |  6 ++---
 .../TestZooKeeperScanPolicyObserver.java        |  2 +-
 .../hbase/IntegrationTestLazyCfLoading.java     |  8 +++---
 .../mapreduce/IntegrationTestBulkLoad.java      |  4 +--
 .../hadoop/hbase/backup/HFileArchiver.java      |  4 +--
 .../errorhandling/TimeoutExceptionInjector.java |  4 +--
 .../hbase/io/hfile/bucket/BucketCache.java      |  2 +-
 .../hbase/io/hfile/bucket/BucketCacheStats.java |  4 +--
 .../hadoop/hbase/master/AssignmentManager.java  | 22 +++++++--------
 .../hbase/master/ClusterStatusPublisher.java    |  4 +--
 .../apache/hadoop/hbase/master/DeadServer.java  |  2 +-
 .../hadoop/hbase/master/MasterFileSystem.java   |  4 +--
 .../hadoop/hbase/master/SplitLogManager.java    | 12 ++++-----
 .../hadoop/hbase/master/TableLockManager.java   |  2 +-
 .../hbase/master/TableNamespaceManager.java     |  4 +--
 .../balancer/FavoredNodeAssignmentHelper.java   |  2 +-
 .../master/balancer/StochasticLoadBalancer.java |  6 ++---
 .../master/cleaner/TimeToLiveHFileCleaner.java  |  2 +-
 .../master/cleaner/TimeToLiveLogCleaner.java    |  2 +-
 .../handler/DispatchMergingRegionHandler.java   | 10 +++----
 .../hbase/master/snapshot/SnapshotManager.java  |  2 +-
 .../hbase/regionserver/CompactSplitThread.java  |  4 +--
 .../hbase/regionserver/CompactionTool.java      |  2 +-
 .../hbase/regionserver/DefaultMemStore.java     |  4 +--
 .../hadoop/hbase/regionserver/HRegion.java      | 28 ++++++++++----------
 .../hbase/regionserver/HRegionServer.java       |  6 ++---
 .../hadoop/hbase/regionserver/HStore.java       | 12 ++++-----
 .../hadoop/hbase/regionserver/Leases.java       |  4 +--
 .../hbase/regionserver/MemStoreFlusher.java     | 20 +++++++-------
 .../MetricsRegionServerWrapperImpl.java         |  2 +-
 .../hbase/regionserver/RSRpcServices.java       | 24 ++++++++---------
 .../hbase/regionserver/RegionMergeRequest.java  |  4 +--
 .../regionserver/RegionMergeTransaction.java    |  2 +-
 .../hbase/regionserver/ScanQueryMatcher.java    |  2 +-
 .../hbase/regionserver/ServerNonceManager.java  |  6 ++---
 .../hbase/regionserver/SplitTransaction.java    |  2 +-
 .../hadoop/hbase/regionserver/StoreScanner.java |  2 +-
 .../regionserver/StorefileRefresherChore.java   |  2 +-
 .../compactions/CompactionRequest.java          |  2 +-
 .../compactions/StripeCompactionPolicy.java     |  2 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java   |  4 +--
 .../hbase/regionserver/wal/HLogFactory.java     |  6 ++---
 .../hadoop/hbase/regionserver/wal/HLogKey.java  |  2 +-
 .../hbase/regionserver/wal/HLogSplitter.java    |  4 +--
 .../hadoop/hbase/regionserver/wal/WALEdit.java  |  6 ++---
 .../regionserver/wal/WALEditsReplaySink.java    |  4 +--
 .../replication/regionserver/MetricsSource.java |  2 +-
 .../regionserver/ReplicationThrottler.java      |  6 ++---
 .../hbase/security/access/AccessController.java |  2 +-
 .../token/AuthenticationTokenSecretManager.java | 10 +++----
 .../snapshot/SnapshotDescriptionUtils.java      |  4 +--
 .../hadoop/hbase/util/ConnectionCache.java      |  6 ++---
 .../apache/hadoop/hbase/util/FSHDFSUtils.java   | 10 +++----
 .../org/apache/hadoop/hbase/util/FSUtils.java   |  6 ++---
 .../hbase/util/ManualEnvironmentEdge.java       |  4 +--
 .../hbase/util/hbck/TableLockChecker.java       |  2 +-
 .../zookeeper/lock/ZKInterProcessLockBase.java  |  4 +--
 .../hadoop/hbase/client/TestFromClientSide.java |  4 +--
 .../org/apache/hadoop/hbase/client/TestHCM.java |  2 +-
 .../client/TestScannersFromClientSide.java      |  8 +++---
 .../TestRegionObserverInterface.java            |  4 +--
 .../hbase/coprocessor/TestWALObserver.java      | 12 ++++-----
 .../hadoop/hbase/io/hfile/TestCacheOnWrite.java |  2 +-
 .../io/hfile/TestScannerSelectionUsingTTL.java  |  2 +-
 .../master/TestAssignmentManagerOnCluster.java  |  4 +--
 .../master/TestClusterStatusPublisher.java      |  2 +-
 .../master/TestDistributedLogSplitting.java     |  8 +++---
 .../hbase/master/cleaner/TestHFileCleaner.java  |  2 +-
 .../master/cleaner/TestSnapshotFromMaster.java  |  2 +-
 .../hbase/regionserver/TestDefaultMemStore.java |  2 +-
 .../hbase/regionserver/TestKeepDeletes.java     | 22 +++++++--------
 .../hbase/regionserver/TestMinVersions.java     | 12 ++++-----
 .../hbase/regionserver/TestQueryMatcher.java    | 10 +++----
 .../TestRegionMergeTransactionOnCluster.java    |  4 +--
 .../TestSplitTransactionOnCluster.java          |  4 +--
 .../hadoop/hbase/regionserver/TestStore.java    |  8 +++---
 .../hbase/regionserver/TestStoreScanner.java    |  9 +------
 .../hbase/regionserver/wal/TestWALReplay.java   | 10 +++----
 .../replication/TestReplicationSmallTests.java  |  2 +-
 .../access/TestCellACLWithMultipleVersions.java |  2 +-
 .../security/token/TestTokenAuthentication.java |  2 +-
 .../security/token/TestZKSecretWatcher.java     |  2 +-
 .../hbase/util/TestCoprocessorScanPolicy.java   |  4 +--
 .../hbase/util/TestDefaultEnvironmentEdge.java  |  9 +++----
 .../hadoop/hbase/util/TestFSHDFSUtils.java      |  4 +--
 .../util/TestIncrementingEnvironmentEdge.java   | 17 +++++-------
 99 files changed, 290 insertions(+), 310 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index e9ca88b..c63e4c6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -1411,7 +1411,7 @@ public class MetaTableAccessor {
   public static Put addLocation(final Put p, final ServerName sn, long openSeqNum, int replicaId){
     // using regionserver's local time as the timestamp of Put.
     // See: HBASE-11536
-    long now = EnvironmentEdgeManager.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     p.addImmutable(HConstants.CATALOG_FAMILY, getServerColumn(replicaId), now,
       Bytes.toBytes(sn.getHostAndPort()));
     p.addImmutable(HConstants.CATALOG_FAMILY, getStartCodeColumn(replicaId), now,

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 2768da0..34136e9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -943,7 +943,7 @@ class AsyncProcess {
      * Starts waiting to issue replica calls on a different thread; or issues them immediately.
      */
     private void startWaitingForReplicaCalls(List<Action<Row>> actionsForReplicaThread) {
-      long startTime = EnvironmentEdgeManager.currentTimeMillis();
+      long startTime = EnvironmentEdgeManager.currentTime();
       ReplicaCallIssuingRunnable replicaRunnable = new ReplicaCallIssuingRunnable(
           actionsForReplicaThread, startTime);
       if (primaryCallTimeoutMicroseconds == 0) {
@@ -1421,10 +1421,10 @@ class AsyncProcess {
 
     private boolean waitUntilDone(long cutoff) throws InterruptedException {
       boolean hasWait = cutoff != Long.MAX_VALUE;
-      long lastLog = EnvironmentEdgeManager.currentTimeMillis();
+      long lastLog = EnvironmentEdgeManager.currentTime();
       long currentInProgress;
       while (0 != (currentInProgress = actionsInProgress.get())) {
-        long now = EnvironmentEdgeManager.currentTimeMillis();
+        long now = EnvironmentEdgeManager.currentTime();
         if (hasWait && (now * 1000L) > cutoff) {
           return false;
         }
@@ -1504,11 +1504,11 @@ class AsyncProcess {
 
   /** Wait until the async does not have more than max tasks in progress. */
   private void waitForMaximumCurrentTasks(int max) throws InterruptedIOException {
-    long lastLog = EnvironmentEdgeManager.currentTimeMillis();
+    long lastLog = EnvironmentEdgeManager.currentTime();
     long currentInProgress, oldInProgress = Long.MAX_VALUE;
     while ((currentInProgress = this.tasksInProgress.get()) > max) {
       if (oldInProgress != currentInProgress) { // Wait for in progress to change.
-        long now = EnvironmentEdgeManager.currentTimeMillis();
+        long now = EnvironmentEdgeManager.currentTime();
         if (now > lastLog + 10000) {
           lastLog = now;
           LOG.info("#" + id + ", waiting for some tasks to finish. Expected max="

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 0813745..7c9c0b9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -2513,7 +2513,7 @@ class ConnectionManager {
 
     public ServerErrorTracker(long timeout, int maxRetries) {
       this.maxRetries = maxRetries;
-      this.canRetryUntil = EnvironmentEdgeManager.currentTimeMillis() + timeout;
+      this.canRetryUntil = EnvironmentEdgeManager.currentTime() + timeout;
       this.startTrackingTime = new Date().getTime();
     }
 
@@ -2523,7 +2523,7 @@ class ConnectionManager {
     boolean canRetryMore(int numRetry) {
       // If there is a single try we must not take into account the time.
       return numRetry < maxRetries || (maxRetries > 1 &&
-          EnvironmentEdgeManager.currentTimeMillis() < this.canRetryUntil);
+          EnvironmentEdgeManager.currentTime() < this.canRetryUntil);
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index fe8fb31..5d25c0b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -822,7 +822,7 @@ public class HBaseAdmin implements Admin {
    */
   private void waitUntilTableIsEnabled(final TableName tableName) throws IOException {
     boolean enabled = false;
-    long start = EnvironmentEdgeManager.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
       try {
         enabled = isTableEnabled(tableName);
@@ -848,7 +848,7 @@ public class HBaseAdmin implements Admin {
       }
     }
     if (!enabled) {
-      long msec = EnvironmentEdgeManager.currentTimeMillis() - start;
+      long msec = EnvironmentEdgeManager.currentTime() - start;
       throw new IOException("Table '" + tableName +
         "' not yet enabled, after " + msec + "ms.");
     }
@@ -2802,7 +2802,7 @@ public synchronized  byte[][] rollHLogWriter(String serverName)
     final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot)
         .build();
     IsSnapshotDoneResponse done = null;
-    long start = EnvironmentEdgeManager.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     long max = response.getExpectedTimeout();
     long maxPauseTime = max / this.numRetries;
     int tries = 0;
@@ -2810,7 +2810,7 @@ public synchronized  byte[][] rollHLogWriter(String serverName)
         ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " +
         maxPauseTime + " ms per retry)");
     while (tries == 0
-        || ((EnvironmentEdgeManager.currentTimeMillis() - start) < max && !done.getDone())) {
+        || ((EnvironmentEdgeManager.currentTime() - start) < max && !done.getDone())) {
       try {
         // sleep a backoff <= pauseTime amount
         long sleep = getPauseTime(tries++);
@@ -3011,7 +3011,7 @@ public synchronized  byte[][] rollHLogWriter(String serverName)
       failSafeSnapshotSnapshotName = failSafeSnapshotSnapshotName
         .replace("{snapshot.name}", snapshotName)
         .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.'))
-        .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTimeMillis()));
+        .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime()));
       LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
       snapshot(failSafeSnapshotSnapshotName, tableName);
     }
@@ -3185,7 +3185,7 @@ public synchronized  byte[][] rollHLogWriter(String serverName)
       }
     });
 
-    long start = EnvironmentEdgeManager.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     long max = response.getExpectedTimeout();
     long maxPauseTime = max / this.numRetries;
     int tries = 0;
@@ -3193,7 +3193,7 @@ public synchronized  byte[][] rollHLogWriter(String serverName)
         signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)");
     boolean done = false;
     while (tries == 0
-        || ((EnvironmentEdgeManager.currentTimeMillis() - start) < max && !done)) {
+        || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) {
       try {
         // sleep a backoff <= pauseTime amount
         long sleep = getPauseTime(tries++);

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
index 9d378ae..4c2c4d2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
@@ -485,7 +485,7 @@ public class HTableMultiplexer {
       int failedCount = 0;
       while (true) {
         try {
-          start = elapsed = EnvironmentEdgeManager.currentTimeMillis();
+          start = elapsed = EnvironmentEdgeManager.currentTime();
 
           // Clear the processingList, putToStatusMap and failedCount
           processingList.clear();
@@ -545,7 +545,7 @@ public class HTableMultiplexer {
             // Update the totalFailedCount
             this.totalFailedPutCount.addAndGet(failedCount);
             
-            elapsed = EnvironmentEdgeManager.currentTimeMillis() - start;
+            elapsed = EnvironmentEdgeManager.currentTime() - start;
             // Update latency counters
             averageLatency.add(elapsed);
             if (elapsed > maxLatency.get()) {
@@ -566,7 +566,7 @@ public class HTableMultiplexer {
 
           // Sleep for a while
           if (elapsed == start) {
-            elapsed = EnvironmentEdgeManager.currentTimeMillis() - start;
+            elapsed = EnvironmentEdgeManager.currentTime() - start;
           }
           if (elapsed < frequency) {
             try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
index 9e11a27..cf9a210 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
@@ -73,7 +73,7 @@ public class RpcRetryingCaller<T> {
     } else {
       if (callTimeout == Integer.MAX_VALUE) return Integer.MAX_VALUE;
       int remainingTime = (int) (callTimeout -
-          (EnvironmentEdgeManager.currentTimeMillis() - this.globalStartTime));
+          (EnvironmentEdgeManager.currentTime() - this.globalStartTime));
       if (remainingTime < MIN_RPC_TIMEOUT) {
         // If there is no time left, we're trying anyway. It's too late.
         // 0 means no timeout, and it's not the intent here. So we secure both cases by
@@ -103,7 +103,7 @@ public class RpcRetryingCaller<T> {
   throws IOException, RuntimeException {
     List<RetriesExhaustedException.ThrowableWithExtraContext> exceptions =
       new ArrayList<RetriesExhaustedException.ThrowableWithExtraContext>();
-    this.globalStartTime = EnvironmentEdgeManager.currentTimeMillis();
+    this.globalStartTime = EnvironmentEdgeManager.currentTime();
     for (int tries = 0;; tries++) {
       long expectedSleep;
       try {
@@ -113,7 +113,7 @@ public class RpcRetryingCaller<T> {
         ExceptionUtil.rethrowIfInterrupt(t);
         if (LOG.isTraceEnabled()) {
           LOG.trace("Call exception, tries=" + tries + ", retries=" + retries + ", started=" +
-              (EnvironmentEdgeManager.currentTimeMillis() - this.globalStartTime) + " ms ago, "
+              (EnvironmentEdgeManager.currentTime() - this.globalStartTime) + " ms ago, "
               + "cancelled=" + cancelled.get(), t);
         }
 
@@ -122,7 +122,7 @@ public class RpcRetryingCaller<T> {
         callable.throwable(t, retries != 1);
         RetriesExhaustedException.ThrowableWithExtraContext qt =
             new RetriesExhaustedException.ThrowableWithExtraContext(t,
-                EnvironmentEdgeManager.currentTimeMillis(), toString());
+                EnvironmentEdgeManager.currentTime(), toString());
         exceptions.add(qt);
         if (tries >= retries - 1) {
           throw new RetriesExhaustedException(tries, exceptions);
@@ -158,7 +158,7 @@ public class RpcRetryingCaller<T> {
    * @return Calculate how long a single call took
    */
   private long singleCallDuration(final long expectedSleep) {
-    return (EnvironmentEdgeManager.currentTimeMillis() - this.globalStartTime) + expectedSleep;
+    return (EnvironmentEdgeManager.currentTime() - this.globalStartTime) + expectedSleep;
   }
 
   /**
@@ -173,7 +173,7 @@ public class RpcRetryingCaller<T> {
   public T callWithoutRetries(RetryingCallable<T> callable, int callTimeout)
   throws IOException, RuntimeException {
     // The code of this method should be shared with withRetries.
-    this.globalStartTime = EnvironmentEdgeManager.currentTimeMillis();
+    this.globalStartTime = EnvironmentEdgeManager.currentTime();
     try {
       callable.prepare(false);
       return callable.call(callTimeout);

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
index 6cd422f..f15ad02 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
@@ -252,7 +252,7 @@ public class RpcRetryingCallerWithReadReplicas {
 
     RetriesExhaustedException.ThrowableWithExtraContext qt =
         new RetriesExhaustedException.ThrowableWithExtraContext(t,
-            EnvironmentEdgeManager.currentTimeMillis(), null);
+            EnvironmentEdgeManager.currentTime(), null);
 
     List<RetriesExhaustedException.ThrowableWithExtraContext> exceptions =
         Collections.singletonList(qt);

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java
index 369b1f5..b4394b7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java
@@ -188,7 +188,7 @@ public class RpcClient {
      * Add an address to the list of the failed servers list.
      */
     public synchronized void addToFailedServers(InetSocketAddress address) {
-      final long expiry = EnvironmentEdgeManager.currentTimeMillis() + recheckServersTimeout;
+      final long expiry = EnvironmentEdgeManager.currentTime() + recheckServersTimeout;
       failedServers.addFirst(new Pair<Long, String>(expiry, address.toString()));
     }
 
@@ -203,7 +203,7 @@ public class RpcClient {
       }
 
       final String lookup = address.toString();
-      final long now = EnvironmentEdgeManager.currentTimeMillis();
+      final long now = EnvironmentEdgeManager.currentTime();
 
       // iterate, looking for the search entry and cleaning expired entries
       Iterator<Pair<Long, String>> it = failedServers.iterator();
@@ -261,7 +261,7 @@ public class RpcClient {
       this.param = param;
       this.md = md;
       this.cells = cells;
-      this.startTime = EnvironmentEdgeManager.currentTimeMillis();
+      this.startTime = EnvironmentEdgeManager.currentTime();
       this.responseDefaultType = responseDefaultType;
       this.id = callIdCnt.getAndIncrement();
       this.timeout = timeout;
@@ -277,7 +277,7 @@ public class RpcClient {
         return false;
       }
 
-      long waitTime = EnvironmentEdgeManager.currentTimeMillis() - getStartTime();
+      long waitTime = EnvironmentEdgeManager.currentTime() - getStartTime();
       if (waitTime >= timeout) {
         IOException ie = new CallTimeoutException("Call id=" + id +
             ", waitTime=" + waitTime + ", operationTimeout=" + timeout + " expired.");
@@ -293,7 +293,7 @@ public class RpcClient {
         return Integer.MAX_VALUE;
       }
 
-      int remaining = timeout - (int) (EnvironmentEdgeManager.currentTimeMillis() - getStartTime());
+      int remaining = timeout - (int) (EnvironmentEdgeManager.currentTime() - getStartTime());
       return remaining > 0 ? remaining : 0;
     }
 
@@ -731,7 +731,7 @@ public class RpcClient {
     protected synchronized boolean waitForWork() throws InterruptedException {
       // beware of the concurrent access to the calls list: we can add calls, but as well
       //  remove them.
-      long waitUntil = EnvironmentEdgeManager.currentTimeMillis() + minIdleTimeBeforeClose;
+      long waitUntil = EnvironmentEdgeManager.currentTime() + minIdleTimeBeforeClose;
 
       while (true) {
         if (shouldCloseConnection.get()) {
@@ -749,7 +749,7 @@ public class RpcClient {
           return true;
         }
 
-        if (EnvironmentEdgeManager.currentTimeMillis() >= waitUntil) {
+        if (EnvironmentEdgeManager.currentTime() >= waitUntil) {
           // Connection is idle.
           // We expect the number of calls to be zero here, but actually someone can
           //  adds a call at the any moment, as there is no synchronization between this task
@@ -820,7 +820,7 @@ public class RpcClient {
     private synchronized boolean setupSaslConnection(final InputStream in2,
         final OutputStream out2) throws IOException {
       saslRpcClient = new HBaseSaslRpcClient(authMethod, token, serverPrincipal, fallbackAllowed,
-          conf.get("hbase.rpc.protection", 
+          conf.get("hbase.rpc.protection",
               QualityOfProtection.AUTHENTICATION.name().toLowerCase()));
       return saslRpcClient.saslConnect(in2, out2);
     }
@@ -1245,7 +1245,7 @@ public class RpcClient {
           // To catch the calls without timeout that were cancelled.
           itor.remove();
         } else if (allCalls) {
-          long waitTime = EnvironmentEdgeManager.currentTimeMillis() - c.getStartTime();
+          long waitTime = EnvironmentEdgeManager.currentTime() - c.getStartTime();
           IOException ie = new IOException("Connection to " + getRemoteAddress()
               + " is closing. Call id=" + c.id + ", waitTime=" + waitTime);
           c.setException(ie);
@@ -1669,7 +1669,7 @@ public class RpcClient {
   throws ServiceException {
     long startTime = 0;
     if (LOG.isTraceEnabled()) {
-      startTime = EnvironmentEdgeManager.currentTimeMillis();
+      startTime = EnvironmentEdgeManager.currentTime();
     }
     int callTimeout = 0;
     CellScanner cells = null;
@@ -1691,7 +1691,7 @@ public class RpcClient {
       }
 
       if (LOG.isTraceEnabled()) {
-        long callTime = EnvironmentEdgeManager.currentTimeMillis() - startTime;
+        long callTime = EnvironmentEdgeManager.currentTime() - startTime;
         LOG.trace("Call: " + md.getName() + ", callTime: " + callTime + "ms");
       }
       return val.getFirst();

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java
index e8eab93..b8461f2 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java
@@ -25,15 +25,13 @@ import org.apache.hadoop.classification.InterfaceAudience;
  */
 @InterfaceAudience.Private
 public class DefaultEnvironmentEdge implements EnvironmentEdge {
-
-
   /**
    * {@inheritDoc}
    * <p/>
    * This implementation returns {@link System#currentTimeMillis()}
    */
   @Override
-  public long currentTimeMillis() {
+  public long currentTime() {
     return System.currentTimeMillis();
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java
index a43fa66..ee8c00a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java
@@ -28,11 +28,10 @@ import org.apache.hadoop.classification.InterfaceAudience;
  */
 @InterfaceAudience.Private
 public interface EnvironmentEdge {
-
   /**
-   * Returns the currentTimeMillis.
+   * Returns the currentTime.
    *
-   * @return currentTimeMillis.
+   * @return Current time.
    */
-  long currentTimeMillis();
-}
+  long currentTime();
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java
index c7d4b25..809bbe3 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java
@@ -67,11 +67,11 @@ public class EnvironmentEdgeManager {
 
   /**
    * Defers to the delegate and calls the
-   * {@link EnvironmentEdge#currentTimeMillis()} method.
+   * {@link EnvironmentEdge#currentTime()} method.
    *
    * @return current time in millis according to the delegate.
    */
-  public static long currentTimeMillis() {
-    return getDelegate().currentTimeMillis();
+  public static long currentTime() {
+    return getDelegate().currentTime();
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java
index c1ce25c..18a258d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java
@@ -50,7 +50,7 @@ public class IncrementingEnvironmentEdge implements EnvironmentEdge {
    * method is called. The first value is 1.
    */
   @Override
-  public synchronized long currentTimeMillis() {
+  public synchronized long currentTime() {
     return timeIncrement++;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestEnvironmentEdgeManager.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestEnvironmentEdgeManager.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestEnvironmentEdgeManager.java
index 13e42fb..bd9efb5 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestEnvironmentEdgeManager.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestEnvironmentEdgeManager.java
@@ -58,11 +58,9 @@ public class TestEnvironmentEdgeManager {
     EnvironmentEdge mock = mock(EnvironmentEdge.class);
     EnvironmentEdgeManager.injectEdge(mock);
     long expectation = 3456;
-    when(mock.currentTimeMillis()).thenReturn(expectation);
-    long result = EnvironmentEdgeManager.currentTimeMillis();
-    verify(mock).currentTimeMillis();
+    when(mock.currentTime()).thenReturn(expectation);
+    long result = EnvironmentEdgeManager.currentTime();
+    verify(mock).currentTime();
     assertEquals(expectation, result);
   }
-
-}
-
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
----------------------------------------------------------------------
diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
index 1180249..8b6f975 100644
--- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
+++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
@@ -98,7 +98,7 @@ public class ZooKeeperScanPolicyObserver extends BaseRegionObserver {
     @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION")
     public byte[] getData() {
       // try at most twice/minute
-      if (needSetup && EnvironmentEdgeManager.currentTimeMillis() > lastSetupTry + 30000) {
+      if (needSetup && EnvironmentEdgeManager.currentTime() > lastSetupTry + 30000) {
         synchronized (this) {
           // make sure only one thread tries to reconnect
           if (needSetup) {
@@ -112,7 +112,7 @@ public class ZooKeeperScanPolicyObserver extends BaseRegionObserver {
         try {
           LOG.debug("Connecting to ZK");
           // record this attempt
-          lastSetupTry = EnvironmentEdgeManager.currentTimeMillis();
+          lastSetupTry = EnvironmentEdgeManager.currentTime();
           if (zk.exists(node, false) != null) {
             data = zk.getData(node, this, null);
             LOG.debug("Read synchronously: "+(data == null ? "null" : Bytes.toLong(data)));
@@ -186,7 +186,7 @@ public class ZooKeeperScanPolicyObserver extends BaseRegionObserver {
     if (oldSI.getTtl() == Long.MAX_VALUE) {
       return null;
     }
-    long ttl = Math.max(EnvironmentEdgeManager.currentTimeMillis() -
+    long ttl = Math.max(EnvironmentEdgeManager.currentTime() -
         Bytes.toLong(data), oldSI.getTtl());
     return new ScanInfo(store.getFamily(), ttl,
         oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java
----------------------------------------------------------------------
diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java
index 4ab53c7..824910a 100644
--- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java
+++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java
@@ -75,7 +75,7 @@ public class TestZooKeeperScanPolicyObserver {
     desc.addFamily(hcd);
     TEST_UTIL.getHBaseAdmin().createTable(desc);
     HTable t = new HTable(new Configuration(TEST_UTIL.getConfiguration()), tableName);
-    long now = EnvironmentEdgeManager.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
 
     ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), "test", null);
     ZooKeeper zk = zkw.getRecoverableZooKeeper().getZooKeeper();

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java
index 377bbdd..a1e306d 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java
@@ -235,7 +235,7 @@ public class IntegrationTestLazyCfLoading {
     writer.start(1, keysToWrite, WRITER_THREADS);
 
     // Now, do scans.
-    long now = EnvironmentEdgeManager.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     long timeLimit = now + (maxRuntime * 60000);
     boolean isWriterDone = false;
     while (now < timeLimit && !isWriterDone) {
@@ -255,7 +255,7 @@ public class IntegrationTestLazyCfLoading {
       // Not a strict lower bound - writer knows nothing about filters, so we report
       // this from generator. Writer might have generated the value but not put it yet.
       long onesGennedBeforeScan = dataGen.getExpectedNumberOfKeys();
-      long startTs = EnvironmentEdgeManager.currentTimeMillis();
+      long startTs = EnvironmentEdgeManager.currentTime();
       ResultScanner results = table.getScanner(scan);
       long resultCount = 0;
       Result result = null;
@@ -265,7 +265,7 @@ public class IntegrationTestLazyCfLoading {
         Assert.assertTrue("Failed to verify [" + Bytes.toString(result.getRow())+ "]", isOk);
         ++resultCount;
       }
-      long timeTaken = EnvironmentEdgeManager.currentTimeMillis() - startTs;
+      long timeTaken = EnvironmentEdgeManager.currentTime() - startTs;
       // Verify the result count.
       long onesGennedAfterScan = dataGen.getExpectedNumberOfKeys();
       Assert.assertTrue("Read " + resultCount + " keys when at most " + onesGennedAfterScan
@@ -280,7 +280,7 @@ public class IntegrationTestLazyCfLoading {
       LOG.info("Scan took " + timeTaken + "ms");
       if (!isWriterDone) {
         Thread.sleep(WAIT_BETWEEN_SCANS_MS);
-        now = EnvironmentEdgeManager.currentTimeMillis();
+        now = EnvironmentEdgeManager.currentTime();
       }
     }
     Assert.assertEquals("There are write failures", 0, writer.getNumWriteFailures());

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
index 1e2203a..f7f8727 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
@@ -246,7 +246,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
 
   private void runLinkedListMRJob(int iteration) throws Exception {
     String jobName =  IntegrationTestBulkLoad.class.getSimpleName() + " - " +
-        EnvironmentEdgeManager.currentTimeMillis();
+        EnvironmentEdgeManager.currentTime();
     Configuration conf = new Configuration(util.getConfiguration());
     Path p = util.getDataTestDirOnTestFS(getTablename() +  "-" + iteration);
     HTable table = new HTable(conf, getTablename());
@@ -639,7 +639,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
   private void runCheck() throws IOException, ClassNotFoundException, InterruptedException {
     LOG.info("Running check");
     Configuration conf = getConf();
-    String jobName = getTablename() + "_check" + EnvironmentEdgeManager.currentTimeMillis();
+    String jobName = getTablename() + "_check" + EnvironmentEdgeManager.currentTime();
     Path p = util.getDataTestDirOnTestFS(jobName);
 
     Job job = new Job(conf);

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
index f9cc60f..a04cb88 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
@@ -255,7 +255,7 @@ public class HFileArchiver {
     }
 
     // do the actual archive
-    long start = EnvironmentEdgeManager.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     File file = new FileablePath(fs, storeFile);
     if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) {
       throw new IOException("Failed to archive/delete the file for region:"
@@ -280,7 +280,7 @@ public class HFileArchiver {
   private static boolean resolveAndArchive(FileSystem fs, Path baseArchiveDir,
       Collection<File> toArchive) throws IOException {
     if (LOG.isTraceEnabled()) LOG.trace("Starting to archive " + toArchive);
-    long start = EnvironmentEdgeManager.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     List<File> failures = resolveAndArchive(fs, baseArchiveDir, toArchive, start);
 
     // notify that some files were not archived.

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java
index 79bfcde..ba25ac6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java
@@ -63,7 +63,7 @@ public class TimeoutExceptionInjector {
           // mark the task is run, to avoid repeats
           TimeoutExceptionInjector.this.complete = true;
         }
-        long end = EnvironmentEdgeManager.currentTimeMillis();
+        long end = EnvironmentEdgeManager.currentTime();
         TimeoutException tee =  new TimeoutException(
             "Timeout caused Foreign Exception", start, end, maxTime);
         String source = "timer-" + timer;
@@ -107,7 +107,7 @@ public class TimeoutExceptionInjector {
     }
     LOG.debug("Scheduling process timer to run in: " + maxTime + " ms");
     timer.schedule(timerTask, maxTime);
-    this.start = EnvironmentEdgeManager.currentTimeMillis();
+    this.start = EnvironmentEdgeManager.currentTime();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 86c8944..2bb0b8c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -918,7 +918,7 @@ public class BucketCache implements BlockCache, HeapSize {
    * cache
    */
   private void checkIOErrorIsTolerated() {
-    long now = EnvironmentEdgeManager.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     if (this.ioErrorStartTime > 0) {
       if (cacheEnabled && (now - ioErrorStartTime) > this.ioErrorsTolerationDuration) {
         LOG.error("IO errors duration time has exceeded " + ioErrorsTolerationDuration +

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java
index 5a290bf..3069dc3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java
@@ -32,7 +32,7 @@ public class BucketCacheStats extends CacheStats {
   private final AtomicLong ioHitCount = new AtomicLong(0);
   private final AtomicLong ioHitTime = new AtomicLong(0);
   private final static int nanoTime = 1000000;
-  private long lastLogTime = EnvironmentEdgeManager.currentTimeMillis();
+  private long lastLogTime = EnvironmentEdgeManager.currentTime();
 
   BucketCacheStats() {
     super("BucketCache");
@@ -50,7 +50,7 @@ public class BucketCacheStats extends CacheStats {
   }
 
   public long getIOHitsPerSecond() {
-    long now = EnvironmentEdgeManager.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     long took = (now - lastLogTime) / 1000;
     lastLogTime = now;
     return took == 0? 0: ioHitCount.get() / took;

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index feedfef..5ecbe98 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -677,7 +677,7 @@ public class AssignmentManager {
    */
   boolean assign(final ServerName destination, final List<HRegionInfo> regions)
     throws InterruptedException {
-    long startTime = EnvironmentEdgeManager.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     try {
       int regionCount = regions.size();
       if (regionCount == 0) {
@@ -829,7 +829,7 @@ public class AssignmentManager {
       LOG.debug("Bulk assigning done for " + destination);
       return true;
     } finally {
-      metricsAssignmentManager.updateBulkAssignTime(EnvironmentEdgeManager.currentTimeMillis() - startTime);
+      metricsAssignmentManager.updateBulkAssignTime(EnvironmentEdgeManager.currentTime() - startTime);
     }
   }
 
@@ -891,11 +891,11 @@ public class AssignmentManager {
           } else {
             if (maxWaitTime < 0) {
               maxWaitTime =
-                  EnvironmentEdgeManager.currentTimeMillis()
+                  EnvironmentEdgeManager.currentTime()
                       + conf.getLong(ALREADY_IN_TRANSITION_WAITTIME,
                         DEFAULT_ALREADY_IN_TRANSITION_WAITTIME);
             }
-            long now = EnvironmentEdgeManager.currentTimeMillis();
+            long now = EnvironmentEdgeManager.currentTime();
             if (now < maxWaitTime) {
               LOG.debug("Region is already in transition; "
                 + "waiting up to " + (maxWaitTime - now) + "ms", t);
@@ -987,7 +987,7 @@ public class AssignmentManager {
    * @param forceNewPlan
    */
   private void assign(RegionState state, boolean forceNewPlan) {
-    long startTime = EnvironmentEdgeManager.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     try {
       Configuration conf = server.getConfiguration();
       RegionPlan plan = null;
@@ -1090,18 +1090,18 @@ public class AssignmentManager {
 
             if (maxWaitTime < 0) {
               if (t instanceof RegionAlreadyInTransitionException) {
-                maxWaitTime = EnvironmentEdgeManager.currentTimeMillis()
+                maxWaitTime = EnvironmentEdgeManager.currentTime()
                   + this.server.getConfiguration().getLong(ALREADY_IN_TRANSITION_WAITTIME,
                     DEFAULT_ALREADY_IN_TRANSITION_WAITTIME);
               } else {
-                maxWaitTime = EnvironmentEdgeManager.currentTimeMillis()
+                maxWaitTime = EnvironmentEdgeManager.currentTime()
                   + this.server.getConfiguration().getLong(
                     "hbase.regionserver.rpc.startup.waittime", 60000);
               }
             }
             try {
               needNewPlan = false;
-              long now = EnvironmentEdgeManager.currentTimeMillis();
+              long now = EnvironmentEdgeManager.currentTime();
               if (now < maxWaitTime) {
                 LOG.debug("Server is not yet up or region is already in transition; "
                   + "waiting up to " + (maxWaitTime - now) + "ms", t);
@@ -1182,7 +1182,7 @@ public class AssignmentManager {
       // Run out of attempts
       regionStates.updateRegionState(region, State.FAILED_OPEN);
     } finally {
-      metricsAssignmentManager.updateAssignmentTime(EnvironmentEdgeManager.currentTimeMillis() - startTime);
+      metricsAssignmentManager.updateAssignmentTime(EnvironmentEdgeManager.currentTime() - startTime);
     }
   }
 
@@ -1912,7 +1912,7 @@ public class AssignmentManager {
   public boolean waitOnRegionToClearRegionsInTransition(final HRegionInfo hri, long timeOut)
       throws InterruptedException {
     if (!regionStates.isRegionInTransition(hri)) return true;
-    long end = (timeOut <= 0) ? Long.MAX_VALUE : EnvironmentEdgeManager.currentTimeMillis()
+    long end = (timeOut <= 0) ? Long.MAX_VALUE : EnvironmentEdgeManager.currentTime()
         + timeOut;
     // There is already a timeout monitor on regions in transition so I
     // should not have to have one here too?
@@ -1920,7 +1920,7 @@ public class AssignmentManager {
         " to leave regions-in-transition, timeOut=" + timeOut + " ms.");
     while (!this.server.isStopped() && regionStates.isRegionInTransition(hri)) {
       regionStates.waitForUpdate(100);
-      if (EnvironmentEdgeManager.currentTimeMillis() > end) {
+      if (EnvironmentEdgeManager.currentTime() > end) {
         LOG.info("Timed out on waiting for " + hri.getEncodedName() + " to be assigned.");
         return false;
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
index 6fe190f..85ec8cf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
@@ -139,7 +139,7 @@ public class ClusterStatusPublisher extends Chore {
       return;
     }
 
-    final long curTime = EnvironmentEdgeManager.currentTimeMillis();
+    final long curTime = EnvironmentEdgeManager.currentTime();
     if (lastMessageTime > curTime - messagePeriod) {
       // We already sent something less than 10 second ago. Done.
       return;
@@ -177,7 +177,7 @@ public class ClusterStatusPublisher extends Chore {
    */
   protected List<ServerName> generateDeadServersListToSend() {
     // We're getting the message sent since last time, and add them to the list
-    long since = EnvironmentEdgeManager.currentTimeMillis() - messagePeriod * 2;
+    long since = EnvironmentEdgeManager.currentTime() - messagePeriod * 2;
     for (Pair<ServerName, Long> dead : getDeadServers(since)) {
       lastSent.putIfAbsent(dead.getFirst(), 0);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
index 48b6ccf..5c232a6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
@@ -110,7 +110,7 @@ public class DeadServer {
   public synchronized void add(ServerName sn) {
     this.numProcessing++;
     if (!deadServers.containsKey(sn)){
-      deadServers.put(sn, EnvironmentEdgeManager.currentTimeMillis());
+      deadServers.put(sn, EnvironmentEdgeManager.currentTime());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 456447a..be702ab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -376,9 +376,9 @@ public class MasterFileSystem {
     List<Path> logDirs = getLogDirs(serverNames);
 
     splitLogManager.handleDeadWorkers(serverNames);
-    splitTime = EnvironmentEdgeManager.currentTimeMillis();
+    splitTime = EnvironmentEdgeManager.currentTime();
     splitLogSize = splitLogManager.splitLogDistributed(serverNames, logDirs, filter);
-    splitTime = EnvironmentEdgeManager.currentTimeMillis() - splitTime;
+    splitTime = EnvironmentEdgeManager.currentTime() - splitTime;
 
     if (this.metricsMasterFilesystem != null) {
       if (filter == META_FILTER) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
index 012e9a0..f684024 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
@@ -234,7 +234,7 @@ public class SplitLogManager {
     LOG.debug("Scheduling batch of logs to split");
     SplitLogCounters.tot_mgr_log_split_batch_start.incrementAndGet();
     LOG.info("started splitting " + logfiles.length + " logs in " + logDirs);
-    long t = EnvironmentEdgeManager.currentTimeMillis();
+    long t = EnvironmentEdgeManager.currentTime();
     long totalSize = 0;
     TaskBatch batch = new TaskBatch();
     Boolean isMetaRecovery = (filter == null) ? null : false;
@@ -288,7 +288,7 @@ public class SplitLogManager {
     String msg =
         "finished splitting (more than or equal to) " + totalSize + " bytes in " + batch.installed
             + " log files in " + logDirs + " in "
-            + (EnvironmentEdgeManager.currentTimeMillis() - t) + "ms";
+            + (EnvironmentEdgeManager.currentTime() - t) + "ms";
     status.markComplete(msg);
     LOG.info(msg);
     return totalSize;
@@ -301,7 +301,7 @@ public class SplitLogManager {
    * @return true if a new entry is created, false if it is already there.
    */
   boolean enqueueSplitTask(String taskname, TaskBatch batch) {
-    lastTaskCreateTime = EnvironmentEdgeManager.currentTimeMillis();
+    lastTaskCreateTime = EnvironmentEdgeManager.currentTime();
     String task =
         ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
             .getSplitLogManagerCoordination().prepareTask(taskname);
@@ -709,7 +709,7 @@ public class SplitLogManager {
         }
       }
       if (tot > 0) {
-        long now = EnvironmentEdgeManager.currentTimeMillis();
+        long now = EnvironmentEdgeManager.currentTime();
         if (now > lastLog + 5000) {
           lastLog = now;
           LOG.info("total tasks = " + tot + " unassigned = " + unassigned + " tasks=" + tasks);
@@ -729,7 +729,7 @@ public class SplitLogManager {
       // that there is always one worker in the system
       if (tot > 0
           && !found_assigned_task
-          && ((EnvironmentEdgeManager.currentTimeMillis() - lastTaskCreateTime) > unassignedTimeout)) {
+          && ((EnvironmentEdgeManager.currentTime() - lastTaskCreateTime) > unassignedTimeout)) {
         for (Map.Entry<String, Task> e : tasks.entrySet()) {
           String key = e.getKey();
           Task task = e.getValue();
@@ -764,7 +764,7 @@ public class SplitLogManager {
 
       // Garbage collect left-over
       long timeInterval =
-          EnvironmentEdgeManager.currentTimeMillis()
+          EnvironmentEdgeManager.currentTime()
               - ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
                   .getSplitLogManagerCoordination().getLastRecoveryTime();
       if (!failedRecoveringRegionDeletions.isEmpty()

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java
index faa11e3..f111f4b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java
@@ -326,7 +326,7 @@ public abstract class TableLockManager {
           .setThreadId(Thread.currentThread().getId())
           .setPurpose(purpose)
           .setIsShared(isShared)
-          .setCreateTime(EnvironmentEdgeManager.currentTimeMillis()).build();
+          .setCreateTime(EnvironmentEdgeManager.currentTime()).build();
         byte[] lockMetadata = toBytes(data);
 
         InterProcessReadWriteLock lock = new ZKInterProcessReadWriteLock(zkWatcher, tableLockZNode,

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index 99d794d..6a83dc3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -90,10 +90,10 @@ public class TableNamespaceManager {
       // Wait for the namespace table to be assigned.
       // If timed out, we will move ahead without initializing it.
       // So that it should be initialized later on lazily.
-      long startTime = EnvironmentEdgeManager.currentTimeMillis();
+      long startTime = EnvironmentEdgeManager.currentTime();
       int timeout = conf.getInt(NS_INIT_TIMEOUT, DEFAULT_NS_INIT_TIMEOUT);
       while (!isTableAssigned()) {
-        if (EnvironmentEdgeManager.currentTimeMillis() - startTime + 100 > timeout) {
+        if (EnvironmentEdgeManager.currentTime() - startTime + 100 > timeout) {
           LOG.warn("Timedout waiting for namespace table to be assigned.");
           return;
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
index ae59f26..79fd21e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
@@ -144,7 +144,7 @@ public class FavoredNodeAssignmentHelper {
       put = MetaTableAccessor.makePutFromRegionInfo(regionInfo);
       byte[] favoredNodes = getFavoredNodes(favoredNodeList);
       put.addImmutable(HConstants.CATALOG_FAMILY, FAVOREDNODES_QUALIFIER,
-          EnvironmentEdgeManager.currentTimeMillis(), favoredNodes);
+          EnvironmentEdgeManager.currentTime(), favoredNodes);
       LOG.info("Create the region " + regionInfo.getRegionNameAsString() +
           " with favored nodes " + Bytes.toString(favoredNodes));
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index e353316..8f6314f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -220,7 +220,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
       return null;
     }
 
-    long startTime = EnvironmentEdgeManager.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
 
     initCosts(cluster);
 
@@ -259,13 +259,13 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
         updateCostsWithAction(cluster, undoAction);
       }
 
-      if (EnvironmentEdgeManager.currentTimeMillis() - startTime >
+      if (EnvironmentEdgeManager.currentTime() - startTime >
           maxRunningTime) {
         break;
       }
     }
 
-    long endTime = EnvironmentEdgeManager.currentTimeMillis();
+    long endTime = EnvironmentEdgeManager.currentTime();
 
     metricsBalancer.balanceCluster(endTime - startTime);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java
index b65fbf9..46fe971 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java
@@ -46,7 +46,7 @@ public class TimeToLiveHFileCleaner extends BaseHFileCleanerDelegate {
 
   @Override
   public boolean isFileDeletable(FileStatus fStat) {
-    long currentTime = EnvironmentEdgeManager.currentTimeMillis();
+    long currentTime = EnvironmentEdgeManager.currentTime();
     long time = fStat.getModificationTime();
     long life = currentTime - time;
     if (LOG.isTraceEnabled()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
index 66b0423..f021954 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
@@ -37,7 +37,7 @@ public class TimeToLiveLogCleaner extends BaseLogCleanerDelegate {
 
   @Override
   public boolean isLogDeletable(FileStatus fStat) {
-    long currentTime = EnvironmentEdgeManager.currentTimeMillis();
+    long currentTime = EnvironmentEdgeManager.currentTime();
     long time = fStat.getModificationTime();
     long life = currentTime - time;
     

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java
index d09ea64..53f4108 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java
@@ -92,7 +92,7 @@ public class DispatchMergingRegionHandler extends EventHandler {
               .getEncodedName()) + " is not online now");
       return;
     }
-    long startTime = EnvironmentEdgeManager.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     boolean onSameRS = region_a_location.equals(region_b_location);
 
     // Make sure regions are on the same regionserver before send merge
@@ -134,7 +134,7 @@ public class DispatchMergingRegionHandler extends EventHandler {
             // RegionInTransition any more
             break;
           }
-          if ((EnvironmentEdgeManager.currentTimeMillis() - startTime) > timeout) break;
+          if ((EnvironmentEdgeManager.currentTime() - startTime) > timeout) break;
         } catch (InterruptedException e) {
           InterruptedIOException iioe = new InterruptedIOException();
           iioe.initCause(e);
@@ -144,7 +144,7 @@ public class DispatchMergingRegionHandler extends EventHandler {
     }
 
     if (onSameRS) {
-      startTime = EnvironmentEdgeManager.currentTimeMillis();
+      startTime = EnvironmentEdgeManager.currentTime();
       while (!masterServices.isStopped()) {
         try {
           masterServices.getServerManager().sendRegionsMerge(region_a_location,
@@ -153,7 +153,7 @@ public class DispatchMergingRegionHandler extends EventHandler {
             region_a.getEncodedName() + "," + region_b.getEncodedName() + ", focible=" + forcible);
           break;
         } catch (RegionOpeningException roe) {
-          if ((EnvironmentEdgeManager.currentTimeMillis() - startTime) > timeout) {
+          if ((EnvironmentEdgeManager.currentTime() - startTime) > timeout) {
             LOG.warn("Failed sending merge to " + region_a_location + " after " + timeout + "ms",
               roe);
             break;
@@ -170,7 +170,7 @@ public class DispatchMergingRegionHandler extends EventHandler {
       LOG.info("Cancel merging regions " + region_a.getRegionNameAsString()
           + ", " + region_b.getRegionNameAsString()
           + ", because can't move them together after "
-          + (EnvironmentEdgeManager.currentTimeMillis() - startTime) + "ms");
+          + (EnvironmentEdgeManager.currentTime() - startTime) + "ms");
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index 3bf704a..31ec098 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -866,7 +866,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
    * @param sentinels map of sentinels to clean
    */
   private synchronized void cleanupSentinels(final Map<TableName, SnapshotSentinel> sentinels) {
-    long currentTime = EnvironmentEdgeManager.currentTimeMillis();
+    long currentTime = EnvironmentEdgeManager.currentTime();
     Iterator<Map.Entry<TableName, SnapshotSentinel>> it =
         sentinels.entrySet().iterator();
     while (it.hasNext()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
index d9ddba3..f2c6db7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
@@ -474,9 +474,9 @@ public class CompactSplitThread implements CompactionRequestor {
       try {
         // Note: please don't put single-compaction logic here;
         //       put it into region/store/etc. This is CST logic.
-        long start = EnvironmentEdgeManager.currentTimeMillis();
+        long start = EnvironmentEdgeManager.currentTime();
         boolean completed = region.compact(compaction, store);
-        long now = EnvironmentEdgeManager.currentTimeMillis();
+        long now = EnvironmentEdgeManager.currentTime();
         LOG.info(((completed) ? "Completed" : "Aborted") + " compaction: " +
               this + "; duration=" + StringUtils.formatTimeDiff(now, start));
         if (completed) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
index 5a4c904..4417bd9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
@@ -360,7 +360,7 @@ public class CompactionTool extends Configured implements Tool {
     Path stagingDir = JobUtil.getStagingDir(conf);
     try {
       // Create input file with the store dirs
-      Path inputPath = new Path(stagingDir, "compact-"+ EnvironmentEdgeManager.currentTimeMillis());
+      Path inputPath = new Path(stagingDir, "compact-"+ EnvironmentEdgeManager.currentTime());
       CompactionInputFormat.createInputFile(fs, inputPath, toCompactDirs);
       CompactionInputFormat.addInputPath(job, inputPath);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
index d90357b..759f842 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
@@ -150,7 +150,7 @@ public class DefaultMemStore implements MemStore {
       LOG.warn("Snapshot called again without clearing previous. " +
           "Doing nothing. Another ongoing flush or did we fail last attempt?");
     } else {
-      this.snapshotId = EnvironmentEdgeManager.currentTimeMillis();
+      this.snapshotId = EnvironmentEdgeManager.currentTime();
       this.snapshotSize = keySize();
       if (!this.kvset.isEmpty()) {
         this.snapshot = this.kvset;
@@ -239,7 +239,7 @@ public class DefaultMemStore implements MemStore {
 
   void setOldestEditTimeToNow() {
     if (timeOfOldestEdit == Long.MAX_VALUE) {
-      timeOfOldestEdit = EnvironmentEdgeManager.currentTimeMillis();
+      timeOfOldestEdit = EnvironmentEdgeManager.currentTime();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index cc78751..57377ad 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -752,7 +752,7 @@ public class HRegion implements HeapSize { // , Writable{
     // Initialize split policy
     this.splitPolicy = RegionSplitPolicy.create(this, conf);
 
-    this.lastFlushTime = EnvironmentEdgeManager.currentTimeMillis();
+    this.lastFlushTime = EnvironmentEdgeManager.currentTime();
     // Use maximum of log sequenceid or that which was found in stores
     // (particularly if no recovered edits, seqid will be -1).
     long nextSeqid = maxSeqId + 1;
@@ -1683,7 +1683,7 @@ public class HRegion implements HeapSize { // , Writable{
     if (flushCheckInterval <= 0) { //disabled
       return false;
     }
-    long now = EnvironmentEdgeManager.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     //if we flushed in the recent past, we don't need to do again now
     if ((now - getLastFlushTime() < flushCheckInterval)) {
       return false;
@@ -1734,7 +1734,7 @@ public class HRegion implements HeapSize { // , Writable{
       // Don't flush when server aborting, it's unsafe
       throw new IOException("Aborting flush because server is aborted...");
     }
-    final long startTime = EnvironmentEdgeManager.currentTimeMillis();
+    final long startTime = EnvironmentEdgeManager.currentTime();
     // If nothing to flush, return, but we need to safely update the region sequence id
     if (this.memstoreSize.get() <= 0) {
       // Take an update lock because am about to change the sequence id and we want the sequence id
@@ -1960,7 +1960,7 @@ public class HRegion implements HeapSize { // , Writable{
     }
 
     // Record latest flush time
-    this.lastFlushTime = EnvironmentEdgeManager.currentTimeMillis();
+    this.lastFlushTime = EnvironmentEdgeManager.currentTime();
 
     // Update the last flushed sequence id for region. TODO: This is dup'd inside the WAL/FSHlog.
     this.lastFlushSeqId = flushSeqId;
@@ -1971,7 +1971,7 @@ public class HRegion implements HeapSize { // , Writable{
       notifyAll(); // FindBugs NN_NAKED_NOTIFY
     }
 
-    long time = EnvironmentEdgeManager.currentTimeMillis() - startTime;
+    long time = EnvironmentEdgeManager.currentTime() - startTime;
     long memstoresize = this.memstoreSize.get();
     String msg = "Finished memstore flush of ~" +
       StringUtils.byteDesc(totalFlushableSize) + "/" + totalFlushableSize +
@@ -2515,7 +2515,7 @@ public class HRegion implements HeapSize { // , Writable{
       // we acquire at least one.
       // ----------------------------------
       int numReadyToWrite = 0;
-      long now = EnvironmentEdgeManager.currentTimeMillis();
+      long now = EnvironmentEdgeManager.currentTime();
       while (lastIndexExclusive < batchOp.operations.length) {
         Mutation mutation = batchOp.getMutation(lastIndexExclusive);
         boolean isPutMutation = mutation instanceof Put;
@@ -2600,7 +2600,7 @@ public class HRegion implements HeapSize { // , Writable{
 
       // we should record the timestamp only after we have acquired the rowLock,
       // otherwise, newer puts/deletes are not guaranteed to have a newer timestamp
-      now = EnvironmentEdgeManager.currentTimeMillis();
+      now = EnvironmentEdgeManager.currentTime();
       byte[] byteNow = Bytes.toBytes(now);
 
       // Nothing to put/delete -- an exception in the above such as NoSuchColumnFamily?
@@ -3370,7 +3370,7 @@ public class HRegion implements HeapSize { // , Writable{
             2000);
         // How often to send a progress report (default 1/2 master timeout)
         int period = this.conf.getInt("hbase.hstore.report.period", 300000);
-        long lastReport = EnvironmentEdgeManager.currentTimeMillis();
+        long lastReport = EnvironmentEdgeManager.currentTime();
 
         while ((entry = reader.next()) != null) {
           HLogKey key = entry.getKey();
@@ -3385,7 +3385,7 @@ public class HRegion implements HeapSize { // , Writable{
             if (intervalEdits >= interval) {
               // Number of edits interval reached
               intervalEdits = 0;
-              long cur = EnvironmentEdgeManager.currentTimeMillis();
+              long cur = EnvironmentEdgeManager.currentTime();
               if (lastReport + period <= cur) {
                 status.setStatus("Replaying edits..." +
                     " skipped=" + skippedEdits +
@@ -4726,7 +4726,7 @@ public class HRegion implements HeapSize { // , Writable{
     meta.checkResources();
     // The row key is the region name
     byte[] row = r.getRegionName();
-    final long now = EnvironmentEdgeManager.currentTimeMillis();
+    final long now = EnvironmentEdgeManager.currentTime();
     final List<Cell> cells = new ArrayList<Cell>(2);
     cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY,
       HConstants.REGIONINFO_QUALIFIER, now,
@@ -5025,7 +5025,7 @@ public class HRegion implements HeapSize { // , Writable{
     // Short circuit the read only case
     if (processor.readOnly()) {
       try {
-        long now = EnvironmentEdgeManager.currentTimeMillis();
+        long now = EnvironmentEdgeManager.currentTime();
         doProcessRowWithTimeout(
             processor, now, this, null, null, timeout);
         processor.postProcess(this, walEdit, true);
@@ -5060,7 +5060,7 @@ public class HRegion implements HeapSize { // , Writable{
       // Get a mvcc write number
       mvccNum = MultiVersionConsistencyControl.getPreAssignedWriteNumber(this.sequenceId);
 
-      long now = EnvironmentEdgeManager.currentTimeMillis();
+      long now = EnvironmentEdgeManager.currentTime();
       try {
         // 4. Let the processor scan the rows, generate mutations and add
         //    waledits
@@ -5261,7 +5261,7 @@ public class HRegion implements HeapSize { // , Writable{
           // now start my own transaction
           mvccNum = MultiVersionConsistencyControl.getPreAssignedWriteNumber(this.sequenceId);
           w = mvcc.beginMemstoreInsertWithSeqNum(mvccNum);
-          long now = EnvironmentEdgeManager.currentTimeMillis();
+          long now = EnvironmentEdgeManager.currentTime();
           // Process each family
           for (Map.Entry<byte[], List<Cell>> family : append.getFamilyCellMap().entrySet()) {
 
@@ -5478,7 +5478,7 @@ public class HRegion implements HeapSize { // , Writable{
           // now start my own transaction
           mvccNum = MultiVersionConsistencyControl.getPreAssignedWriteNumber(this.sequenceId);
           w = mvcc.beginMemstoreInsertWithSeqNum(mvccNum);
-          long now = EnvironmentEdgeManager.currentTimeMillis();
+          long now = EnvironmentEdgeManager.currentTime();
           // Process each family
           for (Map.Entry<byte [], List<Cell>> family:
               increment.getFamilyCellMap().entrySet()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index b8746a0..eab29e7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2042,7 +2042,7 @@ public class HRegionServer extends HasThread implements
       rpcServices.requestCount.set(0);
       LOG.info("reportForDuty to master=" + masterServerName + " with port="
         + rpcServices.isa.getPort() + ", startcode=" + this.startcode);
-      long now = EnvironmentEdgeManager.currentTimeMillis();
+      long now = EnvironmentEdgeManager.currentTime();
       int port = rpcServices.isa.getPort();
       RegionServerStartupRequest.Builder request = RegionServerStartupRequest.newBuilder();
       request.setPort(port);
@@ -2719,7 +2719,7 @@ public class HRegionServer extends HasThread implements
     public MovedRegionInfo(ServerName serverName, long closeSeqNum) {
       this.serverName = serverName;
       this.seqNum = closeSeqNum;
-      ts = EnvironmentEdgeManager.currentTimeMillis();
+      ts = EnvironmentEdgeManager.currentTime();
      }
 
     public ServerName getServerName() {
@@ -2761,7 +2761,7 @@ public class HRegionServer extends HasThread implements
   private MovedRegionInfo getMovedRegion(final String encodedRegionName) {
     MovedRegionInfo dest = movedRegions.get(encodedRegionName);
 
-    long now = EnvironmentEdgeManager.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     if (dest != null) {
       if (dest.getMoveTime() > (now - TIMEOUT_REGION_MOVED)) {
         return dest;

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index a74a463..d746fa6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -774,7 +774,7 @@ public class HStore implements Store {
         + " into store " + this + " (new location: " + dstPath + ")");
     if (LOG.isTraceEnabled()) {
       String traceMessage = "BULK LOAD time,size,store size,store files ["
-          + EnvironmentEdgeManager.currentTimeMillis() + "," + r.length() + "," + storeSize
+          + EnvironmentEdgeManager.currentTime() + "," + r.length() + "," + storeSize
           + "," + storeEngine.getStoreFileManager().getStorefileCount() + "]";
       LOG.trace(traceMessage);
     }
@@ -1018,7 +1018,7 @@ public class HStore implements Store {
         totalSize += sf.getReader().length();
       }
       String traceMessage = "FLUSH time,count,size,store size,store files ["
-          + EnvironmentEdgeManager.currentTimeMillis() + "," + sfs.size() + "," + totalSize
+          + EnvironmentEdgeManager.currentTime() + "," + sfs.size() + "," + totalSize
           + "," + storeSize + "," + storeEngine.getStoreFileManager().getStorefileCount() + "]";
       LOG.trace(traceMessage);
     }
@@ -1147,7 +1147,7 @@ public class HStore implements Store {
         + " into tmpdir=" + fs.getTempDir() + ", totalSize="
         + StringUtils.humanReadableInt(cr.getSize()));
 
-    long compactionStartTime = EnvironmentEdgeManager.currentTimeMillis();
+    long compactionStartTime = EnvironmentEdgeManager.currentTime();
     List<StoreFile> sfs = null;
     try {
       // Commence the compaction.
@@ -1251,7 +1251,7 @@ public class HStore implements Store {
    */
   private void logCompactionEndMessage(
       CompactionRequest cr, List<StoreFile> sfs, long compactionStartTime) {
-    long now = EnvironmentEdgeManager.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     StringBuilder message = new StringBuilder(
       "Completed" + (cr.isMajor() ? " major" : "") + " compaction of "
       + cr.getFiles().size() + (cr.isAllFiles() ? " (all)" : "") + " file(s) in "
@@ -1523,7 +1523,7 @@ public class HStore implements Store {
         long cfTtl = getStoreFileTtl();
         if (cfTtl != Long.MAX_VALUE) {
           delSfs = storeEngine.getStoreFileManager().getUnneededFiles(
-              EnvironmentEdgeManager.currentTimeMillis() - cfTtl, filesCompacting);
+              EnvironmentEdgeManager.currentTime() - cfTtl, filesCompacting);
           addToCompactingFiles(delSfs);
         }
       }
@@ -2022,7 +2022,7 @@ public class HStore implements Store {
 
     this.lock.readLock().lock();
     try {
-      long now = EnvironmentEdgeManager.currentTimeMillis();
+      long now = EnvironmentEdgeManager.currentTime();
 
       return this.memstore.updateColumnValue(row,
           f,

http://git-wip-us.apache.org/repos/asf/hbase/blob/3bfbd062/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java
index 9ff7741..4673d0d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java
@@ -290,7 +290,7 @@ public class Leases extends HasThread {
     }
 
     public long getDelay(TimeUnit unit) {
-      return unit.convert(this.expirationTime - EnvironmentEdgeManager.currentTimeMillis(),
+      return unit.convert(this.expirationTime - EnvironmentEdgeManager.currentTime(),
           TimeUnit.MILLISECONDS);
     }
 
@@ -305,7 +305,7 @@ public class Leases extends HasThread {
      * Resets the expiration time of the lease.
      */
     public void resetExpirationTime() {
-      this.expirationTime = EnvironmentEdgeManager.currentTimeMillis() + this.leaseTimeoutPeriod;
+      this.expirationTime = EnvironmentEdgeManager.currentTime() + this.leaseTimeoutPeriod;
     }
   }
 }