You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2013/10/17 22:48:40 UTC
svn commit: r1533259 - in
/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs:
CHANGES.txt src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
Author: suresh
Date: Thu Oct 17 20:48:39 2013
New Revision: 1533259
URL: http://svn.apache.org/r1533259
Log:
HDFS-5375. Merge 1533258 from trunk.
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1533259&r1=1533258&r2=1533259&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Oct 17 20:48:39 2013
@@ -83,6 +83,8 @@ Release 2.3.0 - UNRELEASED
HDFS-5130. Add test for snapshot related FsShell and DFSAdmin commands.
(Binglin Chang via jing9)
+ HDFS-5374. Remove deadcode in DFSOutputStream. (suresh)
+
OPTIMIZATIONS
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1533259&r1=1533258&r2=1533259&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java Thu Oct 17 20:48:39 2013
@@ -47,7 +47,6 @@ import org.apache.hadoop.fs.FSOutputSumm
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Syncable;
-import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
@@ -138,7 +137,7 @@ public class DFSOutputStream extends FSO
private long currentSeqno = 0;
private long lastQueuedSeqno = -1;
private long lastAckedSeqno = -1;
- private long bytesCurBlock = 0; // bytes writen in current block
+ private long bytesCurBlock = 0; // bytes written in current block
private int packetSize = 0; // write packet size, not including the header.
private int chunksPerPacket = 0;
private final AtomicReference<IOException> lastException = new AtomicReference<IOException>();
@@ -466,8 +465,7 @@ public class DFSOutputStream extends FSO
}
}
- Packet one = null;
-
+ Packet one;
try {
// process datanode IO errors if any
boolean doSleep = false;
@@ -511,7 +509,7 @@ public class DFSOutputStream extends FSO
if(DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug("Allocating new block");
}
- nodes = nextBlockOutputStream(src);
+ nodes = nextBlockOutputStream();
initDataStreaming();
} else if (stage == BlockConstructionStage.PIPELINE_SETUP_APPEND) {
if(DFSClient.LOG.isDebugEnabled()) {
@@ -575,9 +573,6 @@ public class DFSOutputStream extends FSO
}
lastPacket = Time.now();
- if (one.isHeartbeatPacket()) { //heartbeat packet
- }
-
// update bytesSent
long tmpBytesSent = one.getLastByteOffsetBlock();
if (bytesSent < tmpBytesSent) {
@@ -695,7 +690,7 @@ public class DFSOutputStream extends FSO
}
//
- // Processes reponses from the datanodes. A packet is removed
+ // Processes responses from the datanodes. A packet is removed
// from the ackQueue when its response arrives.
//
private class ResponseProcessor extends Daemon {
@@ -737,18 +732,18 @@ public class DFSOutputStream extends FSO
}
assert seqno != PipelineAck.UNKOWN_SEQNO :
- "Ack for unkown seqno should be a failed ack: " + ack;
+ "Ack for unknown seqno should be a failed ack: " + ack;
if (seqno == Packet.HEART_BEAT_SEQNO) { // a heartbeat ack
continue;
}
// a success ack for a data packet
- Packet one = null;
+ Packet one;
synchronized (dataQueue) {
one = ackQueue.getFirst();
}
if (one.seqno != seqno) {
- throw new IOException("Responseprocessor: Expecting seqno " +
+ throw new IOException("ResponseProcessor: Expecting seqno " +
" for block " + block +
one.seqno + " but received " + seqno);
}
@@ -1057,7 +1052,7 @@ public class DFSOutputStream extends FSO
* Must get block ID and the IDs of the destinations from the namenode.
* Returns the list of target datanodes.
*/
- private DatanodeInfo[] nextBlockOutputStream(String client) throws IOException {
+ private DatanodeInfo[] nextBlockOutputStream() throws IOException {
LocatedBlock lb = null;
DatanodeInfo[] nodes = null;
int count = dfsClient.getConf().nBlockWriteRetry;
@@ -1215,8 +1210,7 @@ public class DFSOutputStream extends FSO
}
private LocatedBlock locateFollowingBlock(long start,
- DatanodeInfo[] excludedNodes)
- throws IOException, UnresolvedLinkException {
+ DatanodeInfo[] excludedNodes) throws IOException {
int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry;
long sleeptime = 400;
while (true) {
@@ -1287,7 +1281,7 @@ public class DFSOutputStream extends FSO
* Create a socket for a write pipeline
* @param first the first datanode
* @param length the pipeline length
- * @param client
+ * @param client client
* @return the socket connected to the first datanode
*/
static Socket createSocketForPipeline(final DatanodeInfo first,
@@ -1479,7 +1473,7 @@ public class DFSOutputStream extends FSO
//
// Rather than wait around for space in the queue, we should instead try to
// return to the caller as soon as possible, even though we slightly overrun
- // the MAX_PACKETS iength.
+ // the MAX_PACKETS length.
Thread.currentThread().interrupt();
break;
}
@@ -1705,7 +1699,7 @@ public class DFSOutputStream extends FSO
}
}
// If 1) any new blocks were allocated since the last flush, or 2) to
- // update length in NN is requried, then persist block locations on
+ // update length in NN is required, then persist block locations on
// namenode.
if (persistBlocks.getAndSet(false) || updateLength) {
try {