You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zh...@apache.org on 2015/01/29 19:06:14 UTC
[31/34] hadoop git commit: HDFS-7423. various typos and message
formatting fixes in nfs daemon and doc. (Charles Lamb via yliu)
HDFS-7423. various typos and message formatting fixes in nfs daemon and doc. (Charles Lamb via yliu)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/840d2143
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/840d2143
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/840d2143
Branch: refs/heads/HDFS-EC
Commit: 840d21438a7b8a4a677a61ca641ac04af096f7ba
Parents: 8ced72c
Author: yliu <yl...@apache.org>
Authored: Thu Jan 29 04:56:04 2015 +0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Jan 29 10:05:27 2015 -0800
----------------------------------------------------------------------
.../hadoop/hdfs/nfs/nfs3/AsyncDataService.java | 16 +--
.../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java | 78 +++++------
.../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 138 +++++++++----------
.../hadoop/hdfs/nfs/nfs3/WriteManager.java | 26 ++--
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../org/apache/hadoop/hdfs/DFSOutputStream.java | 10 +-
6 files changed, 136 insertions(+), 135 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/840d2143/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
index 429a457..ee3f90a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
@@ -22,12 +22,11 @@ import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
- * This class is a thread pool to easily schedule async data operations.Current
+ * This class is a thread pool to easily schedule async data operations. Current
* async data operation is write back operation. In the future, we could use it
* for readahead operations too.
*/
@@ -69,8 +68,8 @@ public class AsyncDataService {
}
if (LOG.isDebugEnabled()) {
LOG.debug("Current active thread number: " + executor.getActiveCount()
- + " queue size:" + executor.getQueue().size()
- + " scheduled task number:" + executor.getTaskCount());
+ + " queue size: " + executor.getQueue().size()
+ + " scheduled task number: " + executor.getTaskCount());
}
executor.execute(task);
}
@@ -105,10 +104,9 @@ public class AsyncDataService {
}
/**
- * A task for write data back to HDFS for a file. Since only one thread can
- * write for a file, any time there should be only one task(in queue or
- * executing) for one file existing, and this should be guaranteed by the
- * caller.
+ * A task to write data back to HDFS for a file. Since only one thread can
+ * write to a file, there should only be one task at any time for a file
+ * (in queue or executing), and this should be guaranteed by the caller.
*/
static class WriteBackTask implements Runnable {
@@ -135,7 +133,7 @@ public class AsyncDataService {
try {
openFileCtx.executeWriteBack();
} catch (Throwable t) {
- LOG.error("Asyn data service got error:", t);
+ LOG.error("Async data service got error: ", t);
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/840d2143/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
index a06d1c5..9610f48 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
@@ -211,7 +211,7 @@ class OpenFileCtx {
private long updateNonSequentialWriteInMemory(long count) {
long newValue = nonSequentialWriteInMemory.addAndGet(count);
if (LOG.isDebugEnabled()) {
- LOG.debug("Update nonSequentialWriteInMemory by " + count + " new value:"
+ LOG.debug("Update nonSequentialWriteInMemory by " + count + " new value: "
+ newValue);
}
@@ -312,7 +312,7 @@ class OpenFileCtx {
private void dump() {
// Create dump outputstream for the first time
if (dumpOut == null) {
- LOG.info("Create dump file:" + dumpFilePath);
+ LOG.info("Create dump file: " + dumpFilePath);
File dumpFile = new File(dumpFilePath);
try {
synchronized (this) {
@@ -367,8 +367,8 @@ class OpenFileCtx {
updateNonSequentialWriteInMemory(-dumpedDataSize);
}
} catch (IOException e) {
- LOG.error("Dump data failed:" + writeCtx + " with error:" + e
- + " OpenFileCtx state:" + activeState);
+ LOG.error("Dump data failed: " + writeCtx + " with error: " + e
+ + " OpenFileCtx state: " + activeState);
// Disable dump
enabledDump = false;
return;
@@ -428,8 +428,8 @@ class OpenFileCtx {
return null;
} else {
if (xid != writeCtx.getXid()) {
- LOG.warn("Got a repeated request, same range, with a different xid:"
- + xid + " xid in old request:" + writeCtx.getXid());
+ LOG.warn("Got a repeated request, same range, with a different xid: "
+ + xid + " xid in old request: " + writeCtx.getXid());
//TODO: better handling.
}
return writeCtx;
@@ -441,7 +441,7 @@ class OpenFileCtx {
IdMappingServiceProvider iug) {
if (!activeState) {
- LOG.info("OpenFileCtx is inactive, fileId:"
+ LOG.info("OpenFileCtx is inactive, fileId: "
+ request.getHandle().getFileId());
WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
@@ -523,7 +523,7 @@ class OpenFileCtx {
int originalCount = WriteCtx.INVALID_ORIGINAL_COUNT;
if (LOG.isDebugEnabled()) {
- LOG.debug("requesed offset=" + offset + " and current offset="
+ LOG.debug("requested offset=" + offset + " and current offset="
+ cachedOffset);
}
@@ -556,7 +556,7 @@ class OpenFileCtx {
// Fail non-append call
if (offset < cachedOffset) {
- LOG.warn("(offset,count,nextOffset):" + "(" + offset + "," + count + ","
+ LOG.warn("(offset,count,nextOffset): " + "(" + offset + "," + count + ","
+ nextOffset + ")");
return null;
} else {
@@ -568,7 +568,7 @@ class OpenFileCtx {
dataState);
if (LOG.isDebugEnabled()) {
LOG.debug("Add new write to the list with nextOffset " + cachedOffset
- + " and requesed offset=" + offset);
+ + " and requested offset=" + offset);
}
if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) {
// update the memory size
@@ -584,7 +584,7 @@ class OpenFileCtx {
+ pendingWrites.size());
}
} else {
- LOG.warn("Got a repeated request, same range, with xid:" + xid
+ LOG.warn("Got a repeated request, same range, with xid: " + xid
+ " nextOffset " + +cachedOffset + " req offset=" + offset);
}
return writeCtx;
@@ -662,7 +662,7 @@ class OpenFileCtx {
// offset < nextOffset
processOverWrite(dfsClient, request, channel, xid, iug);
} else {
- // The writes is added to pendingWrites.
+ // The write is added to pendingWrites.
// Check and start writing back if necessary
boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx);
if (!startWriting) {
@@ -674,7 +674,7 @@ class OpenFileCtx {
// responses of the previous batch. So here send response immediately
// for unstable non-sequential write
if (stableHow != WriteStableHow.UNSTABLE) {
- LOG.info("Have to change stable write to unstable write:"
+ LOG.info("Have to change stable write to unstable write: "
+ request.getStableHow());
stableHow = WriteStableHow.UNSTABLE;
}
@@ -719,7 +719,7 @@ class OpenFileCtx {
+ "Continue processing the perfect overwrite.");
} catch (IOException e) {
LOG.info("hsync failed when processing possible perfect overwrite, path="
- + path + " error:" + e);
+ + path + " error: " + e);
return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
Nfs3Constant.WRITE_COMMIT_VERF);
}
@@ -728,7 +728,7 @@ class OpenFileCtx {
fis = dfsClient.createWrappedInputStream(dfsClient.open(path));
readCount = fis.read(offset, readbuffer, 0, count);
if (readCount < count) {
- LOG.error("Can't read back " + count + " bytes, partial read size:"
+ LOG.error("Can't read back " + count + " bytes, partial read size: "
+ readCount);
return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
Nfs3Constant.WRITE_COMMIT_VERF);
@@ -757,7 +757,7 @@ class OpenFileCtx {
postOpAttr = Nfs3Utils.getFileAttr(dfsClient, path, iug);
} catch (IOException e) {
LOG.info("Got error when processing perfect overwrite, path=" + path
- + " error:" + e);
+ + " error: " + e);
return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
Nfs3Constant.WRITE_COMMIT_VERF);
}
@@ -808,7 +808,7 @@ class OpenFileCtx {
ret = COMMIT_STATUS.COMMIT_ERROR;
}
} catch (IOException e) {
- LOG.error("Got stream error during data sync:" + e);
+ LOG.error("Got stream error during data sync: " + e);
// Do nothing. Stream will be closed eventually by StreamMonitor.
// status = Nfs3Status.NFS3ERR_IO;
ret = COMMIT_STATUS.COMMIT_ERROR;
@@ -972,7 +972,7 @@ class OpenFileCtx {
// Check the stream timeout
if (checkStreamTimeout(streamTimeout)) {
if (LOG.isDebugEnabled()) {
- LOG.debug("stream can be closed for fileId:" + fileId);
+ LOG.debug("stream can be closed for fileId: " + fileId);
}
flag = true;
}
@@ -988,7 +988,7 @@ class OpenFileCtx {
private synchronized WriteCtx offerNextToWrite() {
if (pendingWrites.isEmpty()) {
if (LOG.isDebugEnabled()) {
- LOG.debug("The asyn write task has no pending writes, fileId: "
+ LOG.debug("The async write task has no pending writes, fileId: "
+ latestAttr.getFileId());
}
// process pending commit again to handle this race: a commit is added
@@ -1021,7 +1021,7 @@ class OpenFileCtx {
this.asyncStatus = false;
} else if (range.getMin() < offset && range.getMax() > offset) {
// shouldn't happen since we do sync for overlapped concurrent writers
- LOG.warn("Got a overlapping write (" + range.getMin() + ","
+ LOG.warn("Got an overlapping write (" + range.getMin() + ", "
+ range.getMax() + "), nextOffset=" + offset
+ ". Silently drop it now");
pendingWrites.remove(range);
@@ -1044,10 +1044,10 @@ class OpenFileCtx {
return null;
}
- /** Invoked by AsynDataService to write back to HDFS */
+ /** Invoked by AsyncDataService to write back to HDFS */
void executeWriteBack() {
Preconditions.checkState(asyncStatus,
- "openFileCtx has false asyncStatus, fileId:" + latestAttr.getFileId());
+ "openFileCtx has false asyncStatus, fileId: " + latestAttr.getFileId());
final long startOffset = asyncWriteBackStartOffset;
try {
while (activeState) {
@@ -1072,10 +1072,10 @@ class OpenFileCtx {
if (startOffset == asyncWriteBackStartOffset) {
asyncStatus = false;
} else {
- LOG.info("Another asyn task is already started before this one"
- + " is finalized. fileId:" + latestAttr.getFileId()
- + " asyncStatus:" + asyncStatus + " original startOffset:"
- + startOffset + " new startOffset:" + asyncWriteBackStartOffset
+ LOG.info("Another async task is already started before this one"
+ + " is finalized. fileId: " + latestAttr.getFileId()
+ + " asyncStatus: " + asyncStatus + " original startOffset: "
+ + startOffset + " new startOffset: " + asyncWriteBackStartOffset
+ ". Won't change asyncStatus here.");
}
}
@@ -1104,7 +1104,7 @@ class OpenFileCtx {
}
status = Nfs3Status.NFS3ERR_IO;
} catch (IOException e) {
- LOG.error("Got stream error during data sync:", e);
+ LOG.error("Got stream error during data sync: ", e);
// Do nothing. Stream will be closed eventually by StreamMonitor.
status = Nfs3Status.NFS3ERR_IO;
}
@@ -1139,9 +1139,9 @@ class OpenFileCtx {
new VerifierNone()), commit.getXid());
if (LOG.isDebugEnabled()) {
- LOG.debug("FileId: " + latestAttr.getFileId() + " Service time:"
+ LOG.debug("FileId: " + latestAttr.getFileId() + " Service time: "
+ Nfs3Utils.getElapsedTime(commit.startTime)
- + "ns. Sent response for commit:" + commit);
+ + "ns. Sent response for commit: " + commit);
}
entry = pendingCommits.firstEntry();
}
@@ -1158,7 +1158,7 @@ class OpenFileCtx {
FileHandle handle = writeCtx.getHandle();
if (LOG.isDebugEnabled()) {
LOG.debug("do write, fileId: " + handle.getFileId() + " offset: "
- + offset + " length:" + count + " stableHow:" + stableHow.name());
+ + offset + " length: " + count + " stableHow: " + stableHow.name());
}
try {
@@ -1183,7 +1183,7 @@ class OpenFileCtx {
updateNonSequentialWriteInMemory(-count);
if (LOG.isDebugEnabled()) {
LOG.debug("After writing " + handle.getFileId() + " at offset "
- + offset + ", updated the memory count, new value:"
+ + offset + ", updated the memory count, new value: "
+ nonSequentialWriteInMemory.get());
}
}
@@ -1192,18 +1192,18 @@ class OpenFileCtx {
if (!writeCtx.getReplied()) {
if (stableHow != WriteStableHow.UNSTABLE) {
- LOG.info("Do sync for stable write:" + writeCtx);
+ LOG.info("Do sync for stable write: " + writeCtx);
try {
if (stableHow == WriteStableHow.DATA_SYNC) {
fos.hsync();
} else {
Preconditions.checkState(stableHow == WriteStableHow.FILE_SYNC,
- "Unknown WriteStableHow:" + stableHow);
+ "Unknown WriteStableHow: " + stableHow);
// Sync file data and length
fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
}
} catch (IOException e) {
- LOG.error("hsync failed with writeCtx:" + writeCtx, e);
+ LOG.error("hsync failed with writeCtx: " + writeCtx, e);
throw e;
}
}
@@ -1211,8 +1211,8 @@ class OpenFileCtx {
WccAttr preOpAttr = latestAttr.getWccAttr();
WccData fileWcc = new WccData(preOpAttr, latestAttr);
if (writeCtx.getOriginalCount() != WriteCtx.INVALID_ORIGINAL_COUNT) {
- LOG.warn("Return original count:" + writeCtx.getOriginalCount()
- + " instead of real data count:" + count);
+ LOG.warn("Return original count: " + writeCtx.getOriginalCount()
+ + " instead of real data count: " + count);
count = writeCtx.getOriginalCount();
}
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
@@ -1263,8 +1263,8 @@ class OpenFileCtx {
fos.close();
}
} catch (IOException e) {
- LOG.info("Can't close stream for fileId:" + latestAttr.getFileId()
- + ", error:" + e);
+ LOG.info("Can't close stream for fileId: " + latestAttr.getFileId()
+ + ", error: " + e);
}
// Reply error for pending writes
@@ -1272,7 +1272,7 @@ class OpenFileCtx {
WccAttr preOpAttr = latestAttr.getWccAttr();
while (!pendingWrites.isEmpty()) {
OffsetRange key = pendingWrites.firstKey();
- LOG.info("Fail pending write: (" + key.getMin() + "," + key.getMax()
+ LOG.info("Fail pending write: (" + key.getMin() + ", " + key.getMax()
+ "), nextOffset=" + nextOffset.get());
WriteCtx writeCtx = pendingWrites.remove(key);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/840d2143/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index 9204c4d..7ca21e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -160,7 +160,6 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
private final long blockSize;
private final int bufferSize;
private final boolean aixCompatMode;
- private Statistics statistics;
private String writeDumpDir; // The dir save dump files
private final RpcCallCache rpcCallCache;
@@ -245,7 +244,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
pauseMonitor.start();
metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
}
- writeManager.startAsyncDataSerivce();
+ writeManager.startAsyncDataService();
try {
infoServer.start();
} catch (IOException e) {
@@ -331,7 +330,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
- LOG.debug("GETATTR for fileId: " + handle.getFileId() + " client:"
+ LOG.debug("GETATTR for fileId: " + handle.getFileId() + " client: "
+ remoteAddress);
}
@@ -356,7 +355,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
return response;
}
if (attrs == null) {
- LOG.error("Can't get path for fileId:" + handle.getFileId());
+ LOG.error("Can't get path for fileId: " + handle.getFileId());
response.setStatus(Nfs3Status.NFS3ERR_STALE);
return response;
}
@@ -372,7 +371,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
if (setMode && updateFields.contains(SetAttrField.MODE)) {
if (LOG.isDebugEnabled()) {
- LOG.debug("set new mode:" + newAttr.getMode());
+ LOG.debug("set new mode: " + newAttr.getMode());
}
dfsClient.setPermission(fileIdPath,
new FsPermission((short) (newAttr.getMode())));
@@ -392,7 +391,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
.getMilliSeconds() : -1;
if (atime != -1 || mtime != -1) {
if (LOG.isDebugEnabled()) {
- LOG.debug("set atime:" + +atime + " mtime:" + mtime);
+ LOG.debug("set atime: " + +atime + " mtime: " + mtime);
}
dfsClient.setTimes(fileIdPath, mtime, atime);
}
@@ -424,7 +423,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
- LOG.debug("NFS SETATTR fileId: " + handle.getFileId() + " client:"
+ LOG.debug("NFS SETATTR fileId: " + handle.getFileId() + " client: "
+ remoteAddress);
}
@@ -440,7 +439,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
try {
preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
if (preOpAttr == null) {
- LOG.info("Can't get path for fileId:" + handle.getFileId());
+ LOG.info("Can't get path for fileId: " + handle.getFileId());
response.setStatus(Nfs3Status.NFS3ERR_STALE);
return response;
}
@@ -511,7 +510,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
String fileName = request.getName();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS LOOKUP dir fileId: " + dirHandle.getFileId() + " name: "
- + fileName + " client:" + remoteAddress);
+ + fileName + " client: " + remoteAddress);
}
try {
@@ -520,7 +519,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
dirHandle, fileName);
if (postOpObjAttr == null) {
if (LOG.isDebugEnabled()) {
- LOG.debug("NFS LOOKUP fileId: " + dirHandle.getFileId() + " name:"
+ LOG.debug("NFS LOOKUP fileId: " + dirHandle.getFileId() + " name: "
+ fileName + " does not exist");
}
Nfs3FileAttributes postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient,
@@ -532,7 +531,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
Nfs3FileAttributes postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient,
dirFileIdPath, iug);
if (postOpDirAttr == null) {
- LOG.info("Can't get path for dir fileId:" + dirHandle.getFileId());
+ LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
return new LOOKUP3Response(Nfs3Status.NFS3ERR_STALE);
}
FileHandle fileHandle = new FileHandle(postOpObjAttr.getFileId());
@@ -579,7 +578,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
Nfs3FileAttributes attrs;
if (LOG.isDebugEnabled()) {
- LOG.debug("NFS ACCESS fileId: " + handle.getFileId() + " client:"
+ LOG.debug("NFS ACCESS fileId: " + handle.getFileId() + " client: "
+ remoteAddress);
}
@@ -588,7 +587,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
attrs = writeManager.getFileAttr(dfsClient, handle, iug);
if (attrs == null) {
- LOG.error("Can't get path for fileId:" + handle.getFileId());
+ LOG.error("Can't get path for fileId: " + handle.getFileId());
return new ACCESS3Response(Nfs3Status.NFS3ERR_STALE);
}
int access = Nfs3Utils.getAccessRightsForUserGroup(
@@ -646,7 +645,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
- LOG.debug("NFS READLINK fileId: " + handle.getFileId() + " client:"
+ LOG.debug("NFS READLINK fileId: " + handle.getFileId() + " client: "
+ remoteAddress);
}
@@ -657,15 +656,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
Nfs3FileAttributes postOpAttr = Nfs3Utils.getFileAttr(dfsClient,
fileIdPath, iug);
if (postOpAttr == null) {
- LOG.info("Can't get path for fileId:" + handle.getFileId());
+ LOG.info("Can't get path for fileId: " + handle.getFileId());
return new READLINK3Response(Nfs3Status.NFS3ERR_STALE);
}
if (postOpAttr.getType() != NfsFileType.NFSLNK.toValue()) {
- LOG.error("Not a symlink, fileId:" + handle.getFileId());
+ LOG.error("Not a symlink, fileId: " + handle.getFileId());
return new READLINK3Response(Nfs3Status.NFS3ERR_INVAL);
}
if (target == null) {
- LOG.error("Symlink target should not be null, fileId:"
+ LOG.error("Symlink target should not be null, fileId: "
+ handle.getFileId());
return new READLINK3Response(Nfs3Status.NFS3ERR_SERVERFAULT);
}
@@ -726,7 +725,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS READ fileId: " + handle.getFileId() + " offset: " + offset
- + " count: " + count + " client:" + remoteAddress);
+ + " count: " + count + " client: " + remoteAddress);
}
Nfs3FileAttributes attrs;
@@ -739,13 +738,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
Nfs3Utils.getFileIdPath(handle), iug);
} catch (IOException e) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Get error accessing file, fileId:" + handle.getFileId(), e);
+ LOG.debug("Get error accessing file, fileId: " + handle.getFileId(), e);
}
return new READ3Response(Nfs3Status.NFS3ERR_IO);
}
if (attrs == null) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Can't get path for fileId:" + handle.getFileId());
+ LOG.debug("Can't get path for fileId: " + handle.getFileId());
}
return new READ3Response(Nfs3Status.NFS3ERR_NOENT);
}
@@ -806,8 +805,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle),
iug);
if (readCount < count) {
- LOG.info("Partical read. Asked offset:" + offset + " count:" + count
- + " and read back:" + readCount + "file size:" + attrs.getSize());
+ LOG.info("Partical read. Asked offset: " + offset + " count: " + count
+ + " and read back: " + readCount + " file size: "
+ + attrs.getSize());
}
// HDFS returns -1 for read beyond file size.
if (readCount < 0) {
@@ -866,15 +866,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS WRITE fileId: " + handle.getFileId() + " offset: "
- + offset + " length:" + count + " stableHow:" + stableHow.getValue()
- + " xid:" + xid + " client:" + remoteAddress);
+ + offset + " length: " + count + " stableHow: " + stableHow.getValue()
+ + " xid: " + xid + " client: " + remoteAddress);
}
Nfs3FileAttributes preOpAttr = null;
try {
preOpAttr = writeManager.getFileAttr(dfsClient, handle, iug);
if (preOpAttr == null) {
- LOG.error("Can't get path for fileId:" + handle.getFileId());
+ LOG.error("Can't get path for fileId: " + handle.getFileId());
return new WRITE3Response(Nfs3Status.NFS3ERR_STALE);
}
@@ -885,7 +885,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
if (LOG.isDebugEnabled()) {
- LOG.debug("requesed offset=" + offset + " and current filesize="
+ LOG.debug("requested offset=" + offset + " and current filesize="
+ preOpAttr.getSize());
}
@@ -940,7 +940,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
String fileName = request.getName();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS CREATE dir fileId: " + dirHandle.getFileId()
- + " filename: " + fileName + " client:" + remoteAddress);
+ + " filename: " + fileName + " client: " + remoteAddress);
}
int createMode = request.getMode();
@@ -948,7 +948,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
&& request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE)
&& request.getObjAttr().getSize() != 0) {
LOG.error("Setting file size is not supported when creating file: "
- + fileName + " dir fileId:" + dirHandle.getFileId());
+ + fileName + " dir fileId: " + dirHandle.getFileId());
return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
}
@@ -961,7 +961,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
try {
preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (preOpDirAttr == null) {
- LOG.error("Can't get path for dirHandle:" + dirHandle);
+ LOG.error("Can't get path for dirHandle: " + dirHandle);
return new CREATE3Response(Nfs3Status.NFS3ERR_STALE);
}
@@ -985,7 +985,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
fos = dfsClient.createWrappedOutputStream(
dfsClient.create(fileIdPath, permission, flag, false, replication,
blockSize, null, bufferSize, null),
- statistics);
+ null);
if ((createMode == Nfs3Constant.CREATE_UNCHECKED)
|| (createMode == Nfs3Constant.CREATE_GUARDED)) {
@@ -1013,7 +1013,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
fos = null;
} else {
if (LOG.isDebugEnabled()) {
- LOG.debug("Opened stream for file:" + fileName + ", fileId:"
+ LOG.debug("Opened stream for file: " + fileName + ", fileId: "
+ fileHandle.getFileId());
}
}
@@ -1024,7 +1024,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
try {
fos.close();
} catch (IOException e1) {
- LOG.error("Can't close stream for dirFileId:" + dirHandle.getFileId()
+ LOG.error("Can't close stream for dirFileId: " + dirHandle.getFileId()
+ " filename: " + fileName, e1);
}
}
@@ -1033,7 +1033,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr),
dfsClient, dirFileIdPath, iug);
} catch (IOException e1) {
- LOG.error("Can't get postOpDirAttr for dirFileId:"
+ LOG.error("Can't get postOpDirAttr for dirFileId: "
+ dirHandle.getFileId(), e1);
}
}
@@ -1073,7 +1073,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
String fileName = request.getName();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS MKDIR dirId: " + dirHandle.getFileId() + " filename: "
- + fileName + " client:" + remoteAddress);
+ + fileName + " client: " + remoteAddress);
}
if (request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE)) {
@@ -1090,7 +1090,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
try {
preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (preOpDirAttr == null) {
- LOG.info("Can't get path for dir fileId:" + dirHandle.getFileId());
+ LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
return new MKDIR3Response(Nfs3Status.NFS3ERR_STALE);
}
@@ -1173,7 +1173,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
String fileName = request.getName();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS REMOVE dir fileId: " + dirHandle.getFileId()
- + " fileName: " + fileName + " client:" + remoteAddress);
+ + " fileName: " + fileName + " client: " + remoteAddress);
}
String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
@@ -1182,7 +1182,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
try {
preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (preOpDirAttr == null) {
- LOG.info("Can't get path for dir fileId:" + dirHandle.getFileId());
+ LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
return new REMOVE3Response(Nfs3Status.NFS3ERR_STALE);
}
@@ -1254,7 +1254,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
if (LOG.isDebugEnabled()) {
LOG.debug("NFS RMDIR dir fileId: " + dirHandle.getFileId()
- + " fileName: " + fileName + " client:" + remoteAddress);
+ + " fileName: " + fileName + " client: " + remoteAddress);
}
String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
@@ -1263,7 +1263,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
try {
preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (preOpDirAttr == null) {
- LOG.info("Can't get path for dir fileId:" + dirHandle.getFileId());
+ LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
return new RMDIR3Response(Nfs3Status.NFS3ERR_STALE);
}
@@ -1341,7 +1341,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
String toName = request.getToName();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS RENAME from: " + fromHandle.getFileId() + "/" + fromName
- + " to: " + toHandle.getFileId() + "/" + toName + " client:"
+ + " to: " + toHandle.getFileId() + "/" + toName + " client: "
+ remoteAddress);
}
@@ -1354,14 +1354,14 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
try {
fromPreOpAttr = Nfs3Utils.getFileAttr(dfsClient, fromDirFileIdPath, iug);
if (fromPreOpAttr == null) {
- LOG.info("Can't get path for fromHandle fileId:"
+ LOG.info("Can't get path for fromHandle fileId: "
+ fromHandle.getFileId());
return new RENAME3Response(Nfs3Status.NFS3ERR_STALE);
}
toPreOpAttr = Nfs3Utils.getFileAttr(dfsClient, toDirFileIdPath, iug);
if (toPreOpAttr == null) {
- LOG.info("Can't get path for toHandle fileId:" + toHandle.getFileId());
+ LOG.info("Can't get path for toHandle fileId: " + toHandle.getFileId());
return new RENAME3Response(Nfs3Status.NFS3ERR_STALE);
}
@@ -1441,7 +1441,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
String linkIdPath = linkDirIdPath + "/" + name;
if (LOG.isDebugEnabled()) {
LOG.debug("NFS SYMLINK, target: " + symData + " link: " + linkIdPath
- + " client:" + remoteAddress);
+ + " client: " + remoteAddress);
}
try {
@@ -1463,7 +1463,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
objAttr.getFileId()), objAttr, dirWcc);
} catch (IOException e) {
- LOG.warn("Exception:" + e);
+ LOG.warn("Exception: " + e);
int status = mapErrorStatus(e);
response.setStatus(status);
return response;
@@ -1529,18 +1529,18 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
FileHandle handle = request.getHandle();
long cookie = request.getCookie();
if (cookie < 0) {
- LOG.error("Invalid READDIR request, with negitve cookie:" + cookie);
+ LOG.error("Invalid READDIR request, with negative cookie: " + cookie);
return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL);
}
long count = request.getCount();
if (count <= 0) {
- LOG.info("Nonpositive count in invalid READDIR request:" + count);
+ LOG.info("Nonpositive count in invalid READDIR request: " + count);
return new READDIR3Response(Nfs3Status.NFS3_OK);
}
if (LOG.isDebugEnabled()) {
LOG.debug("NFS READDIR fileId: " + handle.getFileId() + " cookie: "
- + cookie + " count: " + count + " client:" + remoteAddress);
+ + cookie + " count: " + count + " client: " + remoteAddress);
}
HdfsFileStatus dirStatus;
@@ -1551,11 +1551,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
dirStatus = dfsClient.getFileInfo(dirFileIdPath);
if (dirStatus == null) {
- LOG.info("Can't get path for fileId:" + handle.getFileId());
+ LOG.info("Can't get path for fileId: " + handle.getFileId());
return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
}
if (!dirStatus.isDir()) {
- LOG.error("Can't readdir for regular file, fileId:"
+ LOG.error("Can't readdir for regular file, fileId: "
+ handle.getFileId());
return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR);
}
@@ -1588,7 +1588,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
if (dotdotStatus == null) {
// This should not happen
- throw new IOException("Can't get path for handle path:"
+ throw new IOException("Can't get path for handle path: "
+ dotdotFileIdPath);
}
dotdotFileId = dotdotStatus.getFileId();
@@ -1606,7 +1606,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
postOpAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (postOpAttr == null) {
- LOG.error("Can't get path for fileId:" + handle.getFileId());
+ LOG.error("Can't get path for fileId: " + handle.getFileId());
return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
}
} catch (IOException e) {
@@ -1687,24 +1687,24 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
FileHandle handle = request.getHandle();
long cookie = request.getCookie();
if (cookie < 0) {
- LOG.error("Invalid READDIRPLUS request, with negitve cookie:" + cookie);
+ LOG.error("Invalid READDIRPLUS request, with negative cookie: " + cookie);
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
}
long dirCount = request.getDirCount();
if (dirCount <= 0) {
- LOG.info("Nonpositive dircount in invalid READDIRPLUS request:" + dirCount);
+ LOG.info("Nonpositive dircount in invalid READDIRPLUS request: " + dirCount);
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
}
int maxCount = request.getMaxCount();
if (maxCount <= 0) {
- LOG.info("Nonpositive maxcount in invalid READDIRPLUS request:" + maxCount);
+ LOG.info("Nonpositive maxcount in invalid READDIRPLUS request: " + maxCount);
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
}
if (LOG.isDebugEnabled()) {
LOG.debug("NFS READDIRPLUS fileId: " + handle.getFileId() + " cookie: "
+ cookie + " dirCount: " + dirCount + " maxCount: " + maxCount
- + " client:" + remoteAddress);
+ + " client: " + remoteAddress);
}
HdfsFileStatus dirStatus;
@@ -1716,11 +1716,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
dirStatus = dfsClient.getFileInfo(dirFileIdPath);
if (dirStatus == null) {
- LOG.info("Can't get path for fileId:" + handle.getFileId());
+ LOG.info("Can't get path for fileId: " + handle.getFileId());
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
}
if (!dirStatus.isDir()) {
- LOG.error("Can't readdirplus for regular file, fileId:"
+ LOG.error("Can't readdirplus for regular file, fileId: "
+ handle.getFileId());
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_NOTDIR);
}
@@ -1751,7 +1751,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
if (dotdotStatus == null) {
// This should not happen
- throw new IOException("Can't get path for handle path:"
+ throw new IOException("Can't get path for handle path: "
+ dotdotFileIdPath);
}
dotdotFileId = dotdotStatus.getFileId();
@@ -1769,7 +1769,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (postOpDirAttr == null) {
- LOG.info("Can't get path for fileId:" + handle.getFileId());
+ LOG.info("Can't get path for fileId: " + handle.getFileId());
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
}
} catch (IOException e) {
@@ -1801,7 +1801,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
try {
attr = writeManager.getFileAttr(dfsClient, childHandle, iug);
} catch (IOException e) {
- LOG.error("Can't get file attributes for fileId:" + fileId, e);
+ LOG.error("Can't get file attributes for fileId: " + fileId, e);
continue;
}
entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId,
@@ -1818,7 +1818,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
try {
attr = writeManager.getFileAttr(dfsClient, childHandle, iug);
} catch (IOException e) {
- LOG.error("Can't get file attributes for fileId:" + fileId, e);
+ LOG.error("Can't get file attributes for fileId: " + fileId, e);
continue;
}
entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId,
@@ -1863,7 +1863,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
- LOG.debug("NFS FSSTAT fileId: " + handle.getFileId() + " client:"
+ LOG.debug("NFS FSSTAT fileId: " + handle.getFileId() + " client: "
+ remoteAddress);
}
@@ -1875,7 +1875,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
Nfs3FileAttributes attrs = writeManager.getFileAttr(dfsClient, handle,
iug);
if (attrs == null) {
- LOG.info("Can't get path for fileId:" + handle.getFileId());
+ LOG.info("Can't get path for fileId: " + handle.getFileId());
return new FSSTAT3Response(Nfs3Status.NFS3ERR_STALE);
}
@@ -1938,7 +1938,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
- LOG.debug("NFS FSINFO fileId: " + handle.getFileId() + " client:"
+ LOG.debug("NFS FSINFO fileId: " + handle.getFileId() + " client: "
+ remoteAddress);
}
@@ -1956,7 +1956,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
Nfs3FileAttributes attrs = Nfs3Utils.getFileAttr(dfsClient,
Nfs3Utils.getFileIdPath(handle), iug);
if (attrs == null) {
- LOG.info("Can't get path for fileId:" + handle.getFileId());
+ LOG.info("Can't get path for fileId: " + handle.getFileId());
return new FSINFO3Response(Nfs3Status.NFS3ERR_STALE);
}
@@ -2005,7 +2005,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
Nfs3FileAttributes attrs;
if (LOG.isDebugEnabled()) {
- LOG.debug("NFS PATHCONF fileId: " + handle.getFileId() + " client:"
+ LOG.debug("NFS PATHCONF fileId: " + handle.getFileId() + " client: "
+ remoteAddress);
}
@@ -2013,7 +2013,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle),
iug);
if (attrs == null) {
- LOG.info("Can't get path for fileId:" + handle.getFileId());
+ LOG.info("Can't get path for fileId: " + handle.getFileId());
return new PATHCONF3Response(Nfs3Status.NFS3ERR_STALE);
}
@@ -2057,7 +2057,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS COMMIT fileId: " + handle.getFileId() + " offset="
- + request.getOffset() + " count=" + request.getCount() + " client:"
+ + request.getOffset() + " count=" + request.getCount() + " client: "
+ remoteAddress);
}
@@ -2066,7 +2066,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
try {
preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
if (preOpAttr == null) {
- LOG.info("Can't get path for fileId:" + handle.getFileId());
+ LOG.info("Can't get path for fileId: " + handle.getFileId());
return new COMMIT3Response(Nfs3Status.NFS3ERR_STALE);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/840d2143/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
index 52c75ed..7810ce2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
@@ -101,7 +101,7 @@ public class WriteManager {
this.fileContextCache = new OpenFileCtxCache(config, streamTimeout);
}
- void startAsyncDataSerivce() {
+ void startAsyncDataService() {
if (asyncDataServiceStarted) {
return;
}
@@ -139,7 +139,7 @@ public class WriteManager {
FileHandle fileHandle = request.getHandle();
OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
if (openFileCtx == null) {
- LOG.info("No opened stream for fileId:" + fileHandle.getFileId());
+ LOG.info("No opened stream for fileId: " + fileHandle.getFileId());
String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle.getFileId());
HdfsDataOutputStream fos = null;
@@ -156,14 +156,14 @@ public class WriteManager {
} catch (RemoteException e) {
IOException io = e.unwrapRemoteException();
if (io instanceof AlreadyBeingCreatedException) {
- LOG.warn("Can't append file:" + fileIdPath
- + ". Possibly the file is being closed. Drop the request:"
+ LOG.warn("Can't append file: " + fileIdPath
+ + ". Possibly the file is being closed. Drop the request: "
+ request + ", wait for the client to retry...");
return;
}
throw e;
} catch (IOException e) {
- LOG.error("Can't apapend to file:" + fileIdPath, e);
+ LOG.error("Can't append to file: " + fileIdPath, e);
if (fos != null) {
fos.close();
}
@@ -188,7 +188,7 @@ public class WriteManager {
try {
fos.close();
} catch (IOException e) {
- LOG.error("Can't close stream for fileId:" + handle.getFileId(), e);
+ LOG.error("Can't close stream for fileId: " + handle.getFileId(), e);
}
// Notify client to retry
WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
@@ -201,7 +201,7 @@ public class WriteManager {
}
if (LOG.isDebugEnabled()) {
- LOG.debug("Opened stream for appending file:" + fileHandle.getFileId());
+ LOG.debug("Opened stream for appending file: " + fileHandle.getFileId());
}
}
@@ -220,7 +220,7 @@ public class WriteManager {
if (openFileCtx == null) {
if (LOG.isDebugEnabled()) {
- LOG.debug("No opened stream for fileId:" + fileHandle.getFileId()
+ LOG.debug("No opened stream for fileId: " + fileHandle.getFileId()
+ " commitOffset=" + commitOffset
+ ". Return success in this case.");
}
@@ -254,8 +254,8 @@ public class WriteManager {
status = Nfs3Status.NFS3_OK;
break;
default:
- LOG.error("Should not get commit return code:" + ret.name());
- throw new RuntimeException("Should not get commit return code:"
+ LOG.error("Should not get commit return code: " + ret.name());
+ throw new RuntimeException("Should not get commit return code: "
+ ret.name());
}
}
@@ -269,7 +269,7 @@ public class WriteManager {
OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
if (openFileCtx == null) {
- LOG.info("No opened stream for fileId:" + fileHandle.getFileId()
+ LOG.info("No opened stream for fileId: " + fileHandle.getFileId()
+ " commitOffset=" + commitOffset + ". Return success in this case.");
status = Nfs3Status.NFS3_OK;
@@ -295,8 +295,8 @@ public class WriteManager {
status = Nfs3Status.NFS3_OK;
break;
default:
- LOG.error("Should not get commit return code:" + ret.name());
- throw new RuntimeException("Should not get commit return code:"
+ LOG.error("Should not get commit return code: " + ret.name());
+ throw new RuntimeException("Should not get commit return code: "
+ ret.name());
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/840d2143/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fd29408..4932c80 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -813,6 +813,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7611. deleteSnapshot and delete of a file can leave orphaned blocks
in the blocksMap on NameNode restart. (jing9 and Byron Wong)
+ HDFS-7423. various typos and message formatting fixes in nfs daemon and
+ doc. (Charles Lamb via yliu)
+
Release 2.6.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/840d2143/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 8cebda1..71bf0d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -357,9 +357,9 @@ public class DFSOutputStream extends FSOutputSummer
@Override
public String toString() {
- return "packet seqno:" + this.seqno +
- " offsetInBlock:" + this.offsetInBlock +
- " lastPacketInBlock:" + this.lastPacketInBlock +
+ return "packet seqno: " + this.seqno +
+ " offsetInBlock: " + this.offsetInBlock +
+ " lastPacketInBlock: " + this.lastPacketInBlock +
" lastByteOffsetInBlock: " + this.getLastByteOffsetBlock();
}
}
@@ -2007,7 +2007,7 @@ public class DFSOutputStream extends FSOutputSummer
// bytesCurBlock potentially incremented if there was buffered data
if (DFSClient.LOG.isDebugEnabled()) {
- DFSClient.LOG.debug("DFSClient flush():"
+ DFSClient.LOG.debug("DFSClient flush(): "
+ " bytesCurBlock=" + bytesCurBlock
+ " lastFlushOffset=" + lastFlushOffset
+ " createNewBlock=" + endBlock);
@@ -2103,7 +2103,7 @@ public class DFSOutputStream extends FSOutputSummer
DFSClient.LOG.warn("Error while syncing", e);
synchronized (this) {
if (!isClosed()) {
- lastException.set(new IOException("IOException flush:" + e));
+ lastException.set(new IOException("IOException flush: " + e));
closeThreads(true);
}
}