You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by br...@apache.org on 2014/05/27 22:26:12 UTC
svn commit: r1597870 - in
/hadoop/common/branches/branch-2/hadoop-hdfs-project:
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs/
Author: brandonli
Date: Tue May 27 20:26:12 2014
New Revision: 1597870
URL: http://svn.apache.org/r1597870
Log:
HDFS-6416. Merging change r1597868 from trunk
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java?rev=1597870&r1=1597869&r2=1597870&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java Tue May 27 20:26:12 2014
@@ -54,6 +54,7 @@ import org.apache.hadoop.nfs.nfs3.respon
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
import org.jboss.netty.channel.Channel;
import com.google.common.annotations.VisibleForTesting;
@@ -136,7 +137,7 @@ class OpenFileCtx {
this.channel = channel;
this.xid = xid;
this.preOpAttr = preOpAttr;
- this.startTime = System.currentTimeMillis();
+ this.startTime = Time.monotonicNow();
}
@Override
@@ -158,11 +159,11 @@ class OpenFileCtx {
private Daemon dumpThread;
private void updateLastAccessTime() {
- lastAccessTime = System.currentTimeMillis();
+ lastAccessTime = Time.monotonicNow();
}
private boolean checkStreamTimeout(long streamTimeout) {
- return System.currentTimeMillis() - lastAccessTime > streamTimeout;
+ return Time.monotonicNow() - lastAccessTime > streamTimeout;
}
long getLastAccessTime() {
@@ -698,7 +699,7 @@ class OpenFileCtx {
+ " updating the mtime, then return success");
Nfs3FileAttributes postOpAttr = null;
try {
- dfsClient.setTimes(path, System.currentTimeMillis(), -1);
+ dfsClient.setTimes(path, Time.monotonicNow(), -1);
postOpAttr = Nfs3Utils.getFileAttr(dfsClient, path, iug);
} catch (IOException e) {
LOG.info("Got error when processing perfect overwrite, path=" + path
@@ -1007,7 +1008,7 @@ class OpenFileCtx {
if (LOG.isDebugEnabled()) {
LOG.debug("FileId: " + latestAttr.getFileId() + " Service time:"
- + (System.currentTimeMillis() - commit.getStartTime())
+ + (Time.monotonicNow() - commit.getStartTime())
+ "ms. Sent response for commit:" + commit);
}
entry = pendingCommits.firstEntry();
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java?rev=1597870&r1=1597869&r2=1597870&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java Tue May 27 20:26:12 2014
@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@@ -99,7 +100,7 @@ class OpenFileCtxCache {
LOG.warn("No eviction candidate. All streams have pending work.");
return null;
} else {
- long idleTime = System.currentTimeMillis()
+ long idleTime = Time.monotonicNow()
- idlest.getValue().getLastAccessTime();
if (idleTime < Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT) {
if (LOG.isDebugEnabled()) {
@@ -250,7 +251,7 @@ class OpenFileCtxCache {
// Check if it can sleep
try {
- long workedTime = System.currentTimeMillis() - lastWakeupTime;
+ long workedTime = Time.monotonicNow() - lastWakeupTime;
if (workedTime < rotation) {
if (LOG.isTraceEnabled()) {
LOG.trace("StreamMonitor can still have a sleep:"
@@ -258,7 +259,7 @@ class OpenFileCtxCache {
}
Thread.sleep(rotation - workedTime);
}
- lastWakeupTime = System.currentTimeMillis();
+ lastWakeupTime = Time.monotonicNow();
} catch (InterruptedException e) {
LOG.info("StreamMonitor got interrupted");
@@ -267,4 +268,4 @@ class OpenFileCtxCache {
}
}
}
-}
\ No newline at end of file
+}
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1597870&r1=1597869&r2=1597870&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue May 27 20:26:12 2014
@@ -129,6 +129,9 @@ Release 2.5.0 - UNRELEASED
HDFS-6110 adding more slow action log in critical write path
(Liang Xie via stack)
+ HDFS-6416. Use Time#monotonicNow in OpenFileCtx and OpenFileCtxCatch to
+ avoid system clock bugs (Abhiraj Butala via brandonli)
+
OPTIMIZATIONS
HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)