You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2011/07/14 22:16:03 UTC
svn commit: r1146881 - in /hadoop/common/branches/HDFS-1073/hdfs: ./
src/c++/libhdfs/ src/contrib/hdfsproxy/ src/java/
src/java/org/apache/hadoop/hdfs/
src/java/org/apache/hadoop/hdfs/server/balancer/
src/java/org/apache/hadoop/hdfs/server/blockmanagem...
Author: todd
Date: Thu Jul 14 20:16:02 2011
New Revision: 1146881
URL: http://svn.apache.org/viewvc?rev=1146881&view=rev
Log:
Merge trunk into HDFS-1073
Added:
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
- copied unchanged from r1146865, hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
- copied unchanged from r1146865, hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
- copied unchanged from r1146865, hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
- copied unchanged from r1146865, hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java
- copied unchanged from r1146865, hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java
Removed:
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DecommissionManager.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/Host2NodesMap.java
hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHost2NodesMap.java
Modified:
hadoop/common/branches/HDFS-1073/hdfs/ (props changed)
hadoop/common/branches/HDFS-1073/hdfs/CHANGES.txt
hadoop/common/branches/HDFS-1073/hdfs/src/c++/libhdfs/ (props changed)
hadoop/common/branches/HDFS-1073/hdfs/src/contrib/hdfsproxy/ (props changed)
hadoop/common/branches/HDFS-1073/hdfs/src/java/ (props changed)
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/DFSClient.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/ (props changed)
hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml
hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java
hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
hadoop/common/branches/HDFS-1073/hdfs/src/webapps/datanode/ (props changed)
hadoop/common/branches/HDFS-1073/hdfs/src/webapps/hdfs/ (props changed)
hadoop/common/branches/HDFS-1073/hdfs/src/webapps/secondary/ (props changed)
Propchange: hadoop/common/branches/HDFS-1073/hdfs/
------------------------------------------------------------------------------
--- svn:ignore (original)
+++ svn:ignore Thu Jul 14 20:16:02 2011
@@ -4,5 +4,6 @@ build.properties
logs
.classpath
.externalToolBuilders
+.launches
.project
.settings
Propchange: hadoop/common/branches/HDFS-1073/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Jul 14 20:16:02 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hdfs:1134994-1143516
+/hadoop/common/trunk/hdfs:1134994-1146865
/hadoop/core/branches/branch-0.19/hdfs:713112
/hadoop/hdfs/branches/HDFS-1052:987665-1095512
/hadoop/hdfs/branches/HDFS-265:796829-820463
Modified: hadoop/common/branches/HDFS-1073/hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/CHANGES.txt?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/CHANGES.txt Thu Jul 14 20:16:02 2011
@@ -540,6 +540,20 @@ Trunk (unreleased changes)
HDFS-2109. Store uMask as member variable to DFSClient.Conf. (Bharath
Mundlapudi via szetszwo)
+ HDFS-2111. Add tests for ensuring that the DN will start with a few bad
+ data directories. (Harsh J Chouraria via todd)
+
+ HDFS-2134. Move DecommissionManager to the blockmanagement package.
+ (szetszwo)
+
+ HDFS-1977. Stop using StringUtils.stringifyException().
+ (Bharath Mundlapudi via jitendra)
+
+ HDFS-2131. Add new tests for the -overwrite/-f option in put and
+ copyFromLocal by HADOOP-7361. (Uma Maheswara Rao G via szetszwo)
+
+ HDFS-2140. Move Host2NodesMap to the blockmanagement package. (szetszwo)
+
OPTIMIZATIONS
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
@@ -805,9 +819,18 @@ Trunk (unreleased changes)
HDFS-2053. Bug in INodeDirectory#computeContentSummary warning.
(Michael Noll via eli)
- HDFS-1990. Fix resource leaks in BlockReceiver.close(). (Uma Maheswara
+ HDFS-1990. Fix resource leaks in BlockReceiver.close(). (Uma Maheswara
Rao G via szetszwo)
+ HDFS-2034. Length in DFSInputStream.getBlockRange(..) becomes -ve when
+ reading only from a currently being written block. (John George via
+ szetszwo)
+
+ HDFS-2132. Potential resource leak in EditLogFileOutputStream.close. (atm)
+
+ HDFS-2120. on reconnect, DN can connect to NN even with different source
+ versions. (John George via atm)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
@@ -1066,6 +1089,9 @@ Release 0.22.0 - Unreleased
UnsupportedActionException("register") instead of "journal".
(Ching-Shen Chen via shv)
+ HDFS-2054 BlockSender.sendChunk() prints ERROR for connection closures
+ encountered during transferToFully() (Kihwal Lee via stack)
+
OPTIMIZATIONS
HDFS-1140. Speedup INode.getPathComponents. (Dmytro Molkov via shv)
Propchange: hadoop/common/branches/HDFS-1073/hdfs/src/c++/libhdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Jul 14 20:16:02 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hdfs/src/c++/libhdfs:1134994-1143516
+/hadoop/common/trunk/hdfs/src/c++/libhdfs:1134994-1146865
/hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
/hadoop/core/trunk/src/c++/libhdfs:776175-784663
/hadoop/hdfs/branches/HDFS-1052/src/c++/libhdfs:987665-1095512
Propchange: hadoop/common/branches/HDFS-1073/hdfs/src/contrib/hdfsproxy/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Jul 14 20:16:02 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hdfs/src/contrib/hdfsproxy:1134994-1143516
+/hadoop/common/trunk/hdfs/src/contrib/hdfsproxy:1134994-1146865
/hadoop/core/branches/branch-0.19/hdfs/src/contrib/hdfsproxy:713112
/hadoop/core/trunk/src/contrib/hdfsproxy:776175-784663
/hadoop/hdfs/branches/HDFS-1052/src/contrib/hdfsproxy:987665-1095512
Propchange: hadoop/common/branches/HDFS-1073/hdfs/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Jul 14 20:16:02 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hdfs/src/java:1134994-1143516
+/hadoop/common/trunk/hdfs/src/java:1134994-1146865
/hadoop/core/branches/branch-0.19/hdfs/src/java:713112
/hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
/hadoop/hdfs/branches/HDFS-1052/src/java:987665-1095512
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/DFSClient.java Thu Jul 14 20:16:02 2011
@@ -101,7 +101,6 @@ import org.apache.hadoop.security.UserGr
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.util.StringUtils;
/********************************************************
* DFSClient can connect to a Hadoop Filesystem and
@@ -489,8 +488,7 @@ public class DFSClient implements FSCons
try {
return namenode.getPreferredBlockSize(f);
} catch (IOException ie) {
- LOG.warn("Problem getting block size: " +
- StringUtils.stringifyException(ie));
+ LOG.warn("Problem getting block size", ie);
throw ie;
}
}
@@ -1578,9 +1576,8 @@ public class DFSClient implements FSCons
try {
reportBadBlocks(lblocks);
} catch (IOException ie) {
- LOG.info("Found corruption while reading " + file
- + ". Error repairing corrupt blocks. Bad blocks remain. "
- + StringUtils.stringifyException(ie));
+ LOG.info("Found corruption while reading " + file
+ + ". Error repairing corrupt blocks. Bad blocks remain.", ie);
}
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/DFSInputStream.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/DFSInputStream.java Thu Jul 14 20:16:02 2011
@@ -47,7 +47,6 @@ import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.StringUtils;
/****************************************************************
* DFSInputStream provides bytes from a named file. It handles
@@ -294,8 +293,8 @@ public class DFSInputStream extends FSIn
/**
* Get blocks in the specified range.
- * Fetch them from the namenode if not cached.
- *
+ * Fetch them from the namenode if not cached. This function
+ * will not get a read request beyond the EOF.
* @param offset
* @param length
* @return consequent segment of located blocks
@@ -304,28 +303,31 @@ public class DFSInputStream extends FSIn
private synchronized List<LocatedBlock> getBlockRange(long offset,
long length)
throws IOException {
+ // getFileLength(): returns total file length
+ // locatedBlocks.getFileLength(): returns length of completed blocks
+ if (offset >= getFileLength()) {
+ throw new IOException("Offset: " + offset +
+ " exceeds file length: " + getFileLength());
+ }
+
final List<LocatedBlock> blocks;
- if (locatedBlocks.isLastBlockComplete()) {
- blocks = getFinalizedBlockRange(offset, length);
+ final long lengthOfCompleteBlk = locatedBlocks.getFileLength();
+ final boolean readOffsetWithinCompleteBlk = offset < lengthOfCompleteBlk;
+ final boolean readLengthPastCompleteBlk = offset + length > lengthOfCompleteBlk;
+
+ if (readOffsetWithinCompleteBlk) {
+ //get the blocks of finalized (completed) block range
+ blocks = getFinalizedBlockRange(offset,
+ Math.min(length, lengthOfCompleteBlk - offset));
+ } else {
+ blocks = new ArrayList<LocatedBlock>(1);
}
- else {
- final boolean readPastEnd = offset + length > locatedBlocks.getFileLength();
- /* if requested length is greater than current file length
- * then, it could possibly be from the current block being
- * written to. First get the finalized block range and then
- * if necessary, get the length of last block being written
- * to.
- */
- if (readPastEnd)
- length = locatedBlocks.getFileLength() - offset;
-
- blocks = getFinalizedBlockRange(offset, length);
- /* requested length is greater than what finalized blocks
- * have.
- */
- if (readPastEnd)
- blocks.add(locatedBlocks.getLastLocatedBlock());
+
+ // get the blocks from incomplete block range
+ if (readLengthPastCompleteBlk) {
+ blocks.add(locatedBlocks.getLastLocatedBlock());
}
+
return blocks;
}
@@ -496,7 +498,7 @@ public class DFSInputStream extends FSIn
if (!retryCurrentNode) {
DFSClient.LOG.warn("Exception while reading from "
+ getCurrentBlock() + " of " + src + " from "
- + currentNode + ": " + StringUtils.stringifyException(e));
+ + currentNode, e);
}
ioe = e;
}
@@ -554,7 +556,7 @@ public class DFSInputStream extends FSIn
throw ce;
} catch (IOException e) {
if (retries == 1) {
- DFSClient.LOG.warn("DFS Read: " + StringUtils.stringifyException(e));
+ DFSClient.LOG.warn("DFS Read", e);
}
blockEnd = -1;
if (currentNode != null) { addToDeadNodes(currentNode); }
@@ -928,9 +930,8 @@ public class DFSInputStream extends FSIn
} catch (IOException e) {//make following read to retry
if(DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug("Exception while seek to " + targetPos
- + " from " + getCurrentBlock() + " of " + src
- + " from " + currentNode + ": "
- + StringUtils.stringifyException(e));
+ + " from " + getCurrentBlock() + " of " + src + " from "
+ + currentNode, e);
}
}
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java Thu Jul 14 20:16:02 2011
@@ -75,8 +75,6 @@ import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.PureJavaCrc32;
-import org.apache.hadoop.util.StringUtils;
-
/****************************************************************
@@ -548,8 +546,7 @@ class DFSOutputStream extends FSOutputSu
Thread.sleep(artificialSlowdown);
}
} catch (Throwable e) {
- DFSClient.LOG.warn("DataStreamer Exception: " +
- StringUtils.stringifyException(e));
+ DFSClient.LOG.warn("DataStreamer Exception", e);
if (e instanceof IOException) {
setLastException((IOException)e);
}
@@ -698,9 +695,8 @@ class DFSOutputStream extends FSOutputSu
synchronized (dataQueue) {
dataQueue.notifyAll();
}
- DFSClient.LOG.warn("DFSOutputStream ResponseProcessor exception " +
- " for block " + block +
- StringUtils.stringifyException(e));
+ DFSClient.LOG.warn("DFSOutputStream ResponseProcessor exception "
+ + " for block " + block, e);
responderClosed = true;
}
}
@@ -1101,7 +1097,7 @@ class DFSOutputStream extends FSOutputSu
throw e;
} else {
--retries;
- DFSClient.LOG.info(StringUtils.stringifyException(e));
+ DFSClient.LOG.info("Exception while adding a block", e);
if (System.currentTimeMillis() - localstart > 5000) {
DFSClient.LOG.info("Waiting for replication for "
+ (System.currentTimeMillis() - localstart) / 1000
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java Thu Jul 14 20:16:02 2011
@@ -729,7 +729,7 @@ public class Balancer {
blocksToReceive -= getBlockList();
continue;
} catch (IOException e) {
- LOG.warn(StringUtils.stringifyException(e));
+ LOG.warn("Exception while getting block list", e);
return;
}
}
@@ -1553,7 +1553,7 @@ public class Balancer {
try {
System.exit(ToolRunner.run(null, new Cli(), args));
} catch (Throwable e) {
- LOG.error(StringUtils.stringifyException(e));
+ LOG.error("Exiting balancer due an exception", e);
System.exit(-1);
}
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java Thu Jul 14 20:16:02 2011
@@ -52,7 +52,6 @@ import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Daemon;
-import org.apache.hadoop.util.StringUtils;
/**
* The class provides utilities for {@link Balancer} to access a NameNode
@@ -222,7 +221,7 @@ class NameNodeConnector {
try {
blockTokenSecretManager.setKeys(namenode.getBlockKeys());
} catch (Exception e) {
- LOG.error(StringUtils.stringifyException(e));
+ LOG.error("Failed to set keys", e);
}
try {
Thread.sleep(keyUpdaterInterval);
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Thu Jul 14 20:16:02 2011
@@ -99,6 +99,8 @@ public class BlockManager {
*/
public final BlocksMap blocksMap;
+ private final DatanodeManager datanodeManager;
+
//
// Store blocks-->datanodedescriptor(s) map of corrupt replicas
//
@@ -164,6 +166,7 @@ public class BlockManager {
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT) * 1000L);
setConfigurationParameters(conf);
blocksMap = new BlocksMap(capacity, DEFAULT_MAP_LOAD_FACTOR);
+ datanodeManager = new DatanodeManager(fsn);
}
void setConfigurationParameters(Configuration conf) throws IOException {
@@ -207,13 +210,20 @@ public class BlockManager {
FSNamesystem.LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks);
}
- public void activate() {
+ public void activate(Configuration conf) {
pendingReplications.start();
+ datanodeManager.activate(conf);
}
public void close() {
if (pendingReplications != null) pendingReplications.stop();
blocksMap.close();
+ datanodeManager.close();
+ }
+
+ /** @return the datanodeManager */
+ public DatanodeManager getDatanodeManager() {
+ return datanodeManager;
}
public void metaSave(PrintWriter out) {
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java Thu Jul 14 20:16:02 2011
@@ -45,4 +45,11 @@ public class IncorrectVersionException e
+ versionReported + ". Expecting = " + versionExpected + ".");
}
+ public IncorrectVersionException(String versionReported,
+ String ofWhat,
+ String versionExpected) {
+ super("Unexpected version "
+ + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: "
+ + versionReported + ". Expecting = " + versionExpected + ".");
+ }
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java Thu Jul 14 20:16:02 2011
@@ -49,7 +49,6 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.util.StringUtils;
/**
* Performs two types of scanning:
@@ -434,9 +433,8 @@ class BlockPoolSliceScanner {
return;
}
- LOG.warn((second ? "Second " : "First ") +
- "Verification failed for " + block + ". Exception : " +
- StringUtils.stringifyException(e));
+ LOG.warn((second ? "Second " : "First ") + "Verification failed for "
+ + block, e);
if (second) {
totalScanErrors++;
@@ -512,8 +510,7 @@ class BlockPoolSliceScanner {
logReader[1] = log.getPreviousFileReader();
}
} catch (IOException e) {
- LOG.warn("Could not read previous verification times : " +
- StringUtils.stringifyException(e));
+ LOG.warn("Could not read previous verification times", e);
}
try {
@@ -645,8 +642,7 @@ class BlockPoolSliceScanner {
}
}
} catch (RuntimeException e) {
- LOG.warn("RuntimeException during BlockPoolScanner.scan() : " +
- StringUtils.stringifyException(e));
+ LOG.warn("RuntimeException during BlockPoolScanner.scan()", e);
throw e;
} finally {
cleanUp();
@@ -910,8 +906,7 @@ class BlockPoolSliceScanner {
try {
readNext();
} catch (IOException e) {
- LOG.info("Could not reade next line in LogHandler : " +
- StringUtils.stringifyException(e));
+ LOG.info("Could not read next line in LogHandler", e);
}
return curLine;
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Thu Jul 14 20:16:02 2011
@@ -49,7 +49,6 @@ import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.PureJavaCrc32;
-import org.apache.hadoop.util.StringUtils;
/** A class that receives a block and writes to its own disk, meanwhile
* may copies it to another site. If a throttler is provided,
@@ -276,9 +275,8 @@ class BlockReceiver implements Closeable
*/
private void handleMirrorOutError(IOException ioe) throws IOException {
String bpid = block.getBlockPoolId();
- LOG.info(datanode.getDNRegistrationForBP(bpid) + ":Exception writing block " +
- block + " to mirror " + mirrorAddr + "\n" +
- StringUtils.stringifyException(ioe));
+ LOG.info(datanode.getDNRegistrationForBP(bpid)
+ + ":Exception writing block " + block + " to mirror " + mirrorAddr, ioe);
if (Thread.interrupted()) { // shut down if the thread is interrupted
throw ioe;
} else { // encounter an error while writing to mirror
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java Thu Jul 14 20:16:02 2011
@@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.util.DataT
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.SocketOutputStream;
import org.apache.hadoop.util.DataChecksum;
-import org.apache.hadoop.util.StringUtils;
/**
* Reads a block from the disk and sends it to a recipient.
@@ -328,9 +327,8 @@ class BlockSender implements java.io.Clo
try {
checksumIn.readFully(buf, checksumOff, checksumLen);
} catch (IOException e) {
- LOG.warn(" Could not read or failed to veirfy checksum for data" +
- " at offset " + offset + " for block " + block + " got : "
- + StringUtils.stringifyException(e));
+ LOG.warn(" Could not read or failed to veirfy checksum for data"
+ + " at offset " + offset + " for block " + block, e);
IOUtils.closeStream(checksumIn);
checksumIn = null;
if (corruptChecksumOk) {
@@ -401,10 +399,19 @@ class BlockSender implements java.io.Clo
}
} catch (IOException e) {
- /* exception while writing to the client (well, with transferTo(),
- * it could also be while reading from the local file).
+ /* Exception while writing to the client. Connection closure from
+ * the other end is mostly the case and we do not care much about
+ * it. But other things can go wrong, especially in transferTo(),
+ * which we do not want to ignore.
+ *
+ * The message parsing below should not be considered as a good
+ * coding example. NEVER do it to drive a program logic. NEVER.
+ * It was done here because the NIO throws an IOException for EPIPE.
*/
- LOG.error("BlockSender.sendChunks() exception: " + StringUtils.stringifyException(e));
+ String ioem = e.getMessage();
+ if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
+ LOG.error("BlockSender.sendChunks() exception: ", e);
+ }
throw ioeToSocketException(e);
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Thu Jul 14 20:16:02 2011
@@ -861,8 +861,8 @@ public class DataNode extends Configured
/* One common reason is that NameNode could be in safe mode.
* Should we keep on retrying in that case?
*/
- LOG.warn("Failed to report bad block " + block + " to namenode : " +
- " Exception : " + StringUtils.stringifyException(e));
+ LOG.warn("Failed to report bad block " + block + " to namenode : "
+ + " Exception", e);
}
}
@@ -1111,12 +1111,11 @@ public class DataNode extends Configured
if (UnregisteredNodeException.class.getName().equals(reClass) ||
DisallowedDatanodeException.class.getName().equals(reClass) ||
IncorrectVersionException.class.getName().equals(reClass)) {
- LOG.warn("blockpool " + blockPoolId + " is shutting down: " +
- StringUtils.stringifyException(re));
+ LOG.warn("blockpool " + blockPoolId + " is shutting down", re);
shouldServiceRun = false;
return;
}
- LOG.warn(StringUtils.stringifyException(re));
+ LOG.warn("RemoteException in offerService", re);
try {
long sleepTime = Math.min(1000, heartBeatInterval);
Thread.sleep(sleepTime);
@@ -1124,7 +1123,7 @@ public class DataNode extends Configured
Thread.currentThread().interrupt();
}
} catch (IOException e) {
- LOG.warn(StringUtils.stringifyException(e));
+ LOG.warn("IOException in offerService", e);
}
} // while (shouldRun && shouldServiceRun)
} // offerService
@@ -1144,7 +1143,26 @@ public class DataNode extends Configured
void register() throws IOException {
LOG.info("in register: sid=" + bpRegistration.getStorageID() + ";SI="
+ bpRegistration.storageInfo);
-
+
+ // build and layout versions should match
+ String nsBuildVer = bpNamenode.versionRequest().getBuildVersion();
+ String stBuildVer = Storage.getBuildVersion();
+
+ if (!nsBuildVer.equals(stBuildVer)) {
+ LOG.warn("Data-node and name-node Build versions must be " +
+ "the same. Namenode build version: " + nsBuildVer + "Datanode " +
+ "build version: " + stBuildVer);
+ throw new IncorrectVersionException(nsBuildVer, "namenode", stBuildVer);
+ }
+
+ if (FSConstants.LAYOUT_VERSION != bpNSInfo.getLayoutVersion()) {
+ LOG.warn("Data-node and name-node layout versions must be " +
+ "the same. Expected: "+ FSConstants.LAYOUT_VERSION +
+ " actual "+ bpNSInfo.getLayoutVersion());
+ throw new IncorrectVersionException
+ (bpNSInfo.getLayoutVersion(), "namenode");
+ }
+
while(shouldRun && shouldServiceRun) {
try {
// Use returned registration from namenode with updated machine name.
@@ -1241,18 +1259,18 @@ public class DataNode extends Configured
startDistributedUpgradeIfNeeded();
offerService();
} catch (Exception ex) {
- LOG.error("Exception: " + StringUtils.stringifyException(ex));
+ LOG.error("Exception in BPOfferService", ex);
if (shouldRun && shouldServiceRun) {
try {
Thread.sleep(5000);
} catch (InterruptedException ie) {
- LOG.warn("Received exception: ", ie);
+ LOG.warn("Received exception", ie);
}
}
}
}
} catch (Throwable ex) {
- LOG.warn("Unexpected exception ", ex);
+ LOG.warn("Unexpected exception", ex);
} finally {
LOG.warn(bpRegistration + " ending block pool service for: "
+ blockPoolId);
@@ -1737,8 +1755,7 @@ public class DataNode extends Configured
try {
nn.errorReport(bpos.bpRegistration, dpError, errMsgr);
} catch(IOException e) {
- LOG.warn("Error reporting disk failure to NameNode: " +
- StringUtils.stringifyException(e));
+ LOG.warn("Error reporting disk failure to NameNode", e);
}
}
@@ -2007,8 +2024,9 @@ public class DataNode extends Configured
}
}
} catch (IOException ie) {
- LOG.warn(bpReg + ":Failed to transfer " + b + " to " + targets[0].getName()
- + " got " + StringUtils.stringifyException(ie));
+ LOG.warn(
+ bpReg + ":Failed to transfer " + b + " to " + targets[0].getName()
+ + " got ", ie);
// check if there are any disk problem
checkDiskError();
@@ -2279,7 +2297,7 @@ public class DataNode extends Configured
if (datanode != null)
datanode.join();
} catch (Throwable e) {
- LOG.error(StringUtils.stringifyException(e));
+ LOG.error("Exception in secureMain", e);
System.exit(-1);
} finally {
// We need to add System.exit here because either shutdown was called or
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Thu Jul 14 20:16:02 2011
@@ -173,8 +173,8 @@ public class DataStorage extends Storage
}
} catch (IOException ioe) {
sd.unlock();
- LOG.warn("Ignoring storage directory "+ dataDir
- + " due to an exception: " + StringUtils.stringifyException(ioe));
+ LOG.warn("Ignoring storage directory " + dataDir
+ + " due to an exception", ioe);
//continue with other good dirs
continue;
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Thu Jul 14 20:16:02 2011
@@ -62,7 +62,6 @@ import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
-import org.apache.hadoop.util.StringUtils;
import com.google.protobuf.ByteString;
@@ -268,10 +267,8 @@ class DataXceiver extends Receiver imple
/* What exactly should we do here?
* Earlier version shutdown() datanode if there is disk error.
*/
- LOG.warn(dnR + ":Got exception while serving " +
- block + " to " +
- remoteAddress + ":\n" +
- StringUtils.stringifyException(ioe) );
+ LOG.warn(dnR + ":Got exception while serving " + block + " to "
+ + remoteAddress, ioe);
throw ioe;
} finally {
IOUtils.closeStream(blockSender);
@@ -424,8 +421,7 @@ class DataXceiver extends Receiver imple
} else {
LOG.info(datanode + ":Exception transfering block " +
block + " to mirror " + mirrorNode +
- ". continuing without the mirror.\n" +
- StringUtils.stringifyException(e));
+ ". continuing without the mirror.", e);
}
}
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java Thu Jul 14 20:16:02 2011
@@ -34,7 +34,6 @@ import org.apache.hadoop.hdfs.server.bal
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Daemon;
-import org.apache.hadoop.util.StringUtils;
/**
@@ -157,7 +156,7 @@ class DataXceiverServer implements Runna
ss.close();
} catch (IOException ie) {
LOG.warn(datanode.getMachineName()
- + ":DataXceiverServer: Close exception due to: ", ie);
+ + " :DataXceiverServer: close exception", ie);
}
}
@@ -167,8 +166,7 @@ class DataXceiverServer implements Runna
try {
this.ss.close();
} catch (IOException ie) {
- LOG.warn(datanode.getMachineName() + ":DataXceiverServer.kill(): "
- + StringUtils.stringifyException(ie));
+ LOG.warn(datanode.getMachineName() + ":DataXceiverServer.kill(): ", ie);
}
// close all the sockets that were accepted earlier
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Thu Jul 14 20:16:02 2011
@@ -62,7 +62,6 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker;
-import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.apache.hadoop.util.ReflectionUtils;
@@ -2377,8 +2376,7 @@ public class FSDataset implements FSCons
try {
datanode.reportBadBlocks(new ExtendedBlock(bpid, corruptBlock));
} catch (IOException e) {
- DataNode.LOG.warn("Failed to repot bad block " + corruptBlock
- + "Exception:" + StringUtils.stringifyException(e));
+ DataNode.LOG.warn("Failed to repot bad block " + corruptBlock, e);
}
}
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java Thu Jul 14 20:16:02 2011
@@ -24,7 +24,6 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
-import org.apache.hadoop.util.StringUtils;
import java.io.IOException;
import java.net.SocketTimeoutException;
@@ -110,7 +109,7 @@ public abstract class UpgradeObjectDatan
try {
doUpgrade();
} catch(Exception e) {
- DataNode.LOG.error(StringUtils.stringifyException(e));
+ DataNode.LOG.error("Exception in doUpgrade", e);
}
break;
}
@@ -129,7 +128,7 @@ public abstract class UpgradeObjectDatan
if(upgradeManager != null)
upgradeManager.completeUpgrade();
} catch(IOException e) {
- DataNode.LOG.error(StringUtils.stringifyException(e));
+ DataNode.LOG.error("Exception in completeUpgrade", e);
}
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java Thu Jul 14 20:16:02 2011
@@ -33,6 +33,8 @@ import org.apache.hadoop.io.DataOutputBu
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Writable;
+import com.google.common.annotations.VisibleForTesting;
+
/**
* An implementation of the abstract class {@link EditLogOutputStream}, which
* stores edits in a local file.
@@ -137,32 +139,41 @@ class EditLogFileOutputStream extends Ed
throw new IOException("Trying to use aborted output stream");
}
- // close should have been called after all pending transactions
- // have been flushed & synced.
- // if already closed, just skip
- if(bufCurrent != null)
- {
- int bufSize = bufCurrent.size();
- if (bufSize != 0) {
- throw new IOException("FSEditStream has " + bufSize
- + " bytes still to be flushed and cannot " + "be closed.");
- }
- bufCurrent.close();
- bufCurrent = null;
- }
-
- if(bufReady != null) {
- bufReady.close();
- bufReady = null;
- }
-
- // remove the last INVALID marker from transaction log.
- if (fc != null && fc.isOpen()) {
- fc.truncate(fc.position());
- fc.close();
- }
- if (fp != null) {
- fp.close();
+ try {
+ // close should have been called after all pending transactions
+ // have been flushed & synced.
+ // if already closed, just skip
+ if(bufCurrent != null)
+ {
+ int bufSize = bufCurrent.size();
+ if (bufSize != 0) {
+ throw new IOException("FSEditStream has " + bufSize
+ + " bytes still to be flushed and cannot " + "be closed.");
+ }
+ bufCurrent.close();
+ bufCurrent = null;
+ }
+
+ if(bufReady != null) {
+ bufReady.close();
+ bufReady = null;
+ }
+
+ // remove the last INVALID marker from transaction log.
+ if (fc != null && fc.isOpen()) {
+ fc.truncate(fc.position());
+ fc.close();
+ fc = null;
+ }
+ if (fp != null) {
+ fp.close();
+ fp = null;
+ }
+ } finally {
+ IOUtils.cleanup(FSNamesystem.LOG, bufCurrent, bufReady, fc, fp);
+ bufCurrent = bufReady = null;
+ fc = null;
+ fp = null;
}
fp = null;
}
@@ -263,4 +274,14 @@ class EditLogFileOutputStream extends Ed
public boolean isOpen() {
return fp != null;
}
+
+ @VisibleForTesting
+ public void setFileChannelForTesting(FileChannel fc) {
+ this.fc = fc;
+ }
+
+ @VisibleForTesting
+ public FileChannel getFileChannelForTesting() {
+ return fc;
+ }
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Jul 14 20:16:02 2011
@@ -268,7 +268,7 @@ public class FSNamesystem implements FSC
* <p>
* Mapping: StorageID -> DatanodeDescriptor
*/
- NavigableMap<String, DatanodeDescriptor> datanodeMap =
+ public final NavigableMap<String, DatanodeDescriptor> datanodeMap =
new TreeMap<String, DatanodeDescriptor>();
Random r = new Random();
@@ -319,14 +319,12 @@ public class FSNamesystem implements FSC
ReplaceDatanodeOnFailure.DEFAULT;
private volatile SafeModeInfo safeMode; // safe mode information
- private Host2NodesMap host2DataNodeMap = new Host2NodesMap();
/** datanode network toplogy */
public NetworkTopology clusterMap = new NetworkTopology();
private DNSToSwitchMapping dnsToSwitchMapping;
private HostsFileReader hostsReader;
- private Daemon dnthread = null;
private long maxFsObjects = 0; // maximum number of fs objects
@@ -405,7 +403,7 @@ public class FSNamesystem implements FSC
*/
void activate(Configuration conf) throws IOException {
setBlockTotal();
- blockManager.activate();
+ blockManager.activate(conf);
this.hbthread = new Daemon(new HeartbeatMonitor());
this.lmthread = new Daemon(leaseManager.new Monitor());
this.replthread = new Daemon(new ReplicationMonitor());
@@ -416,13 +414,6 @@ public class FSNamesystem implements FSC
this.nnrmthread = new Daemon(new NameNodeResourceMonitor());
nnrmthread.start();
- this.dnthread = new Daemon(new DecommissionManager(this).new Monitor(
- conf.getInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
- DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT),
- conf.getInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_KEY,
- DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT)));
- dnthread.start();
-
this.dnsToSwitchMapping = ReflectionUtils.newInstance(
conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
ScriptBasedMapping.class,
@@ -638,7 +629,6 @@ public class FSNamesystem implements FSC
if (blockManager != null) blockManager.close();
if (hbthread != null) hbthread.interrupt();
if (replthread != null) replthread.interrupt();
- if (dnthread != null) dnthread.interrupt();
if (smmthread != null) smmthread.interrupt();
if (dtSecretManager != null) dtSecretManager.stopThreads();
if (nnrmthread != null) nnrmthread.interrupt();
@@ -663,7 +653,7 @@ public class FSNamesystem implements FSC
}
/** Is this name system running? */
- boolean isRunning() {
+ public boolean isRunning() {
return fsRunning;
}
@@ -889,8 +879,8 @@ public class FSNamesystem implements FSC
LocatedBlocks blocks = getBlockLocations(src, offset, length, true, true);
if (blocks != null) {
//sort the blocks
- DatanodeDescriptor client = host2DataNodeMap.getDatanodeByHost(
- clientMachine);
+ final DatanodeDescriptor client =
+ blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
for (LocatedBlock b : blocks.getLocatedBlocks()) {
clusterMap.pseudoSortByDistance(client, b.getLocations());
@@ -1501,8 +1491,8 @@ public class FSNamesystem implements FSC
}
}
- DatanodeDescriptor clientNode =
- host2DataNodeMap.getDatanodeByHost(clientMachine);
+ final DatanodeDescriptor clientNode =
+ blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
if (append && myFile != null) {
//
@@ -2853,7 +2843,8 @@ public class FSNamesystem implements FSC
+ " storage " + nodeReg.getStorageID());
DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID());
- DatanodeDescriptor nodeN = host2DataNodeMap.getDatanodeByName(nodeReg.getName());
+ DatanodeDescriptor nodeN =
+ blockManager.getDatanodeManager().getDatanodeByHost(nodeReg.getName());
if (nodeN != null && nodeN != nodeS) {
NameNode.LOG.info("BLOCK* NameSystem.registerDatanode: "
@@ -2862,7 +2853,7 @@ public class FSNamesystem implements FSC
// which is not served by anybody anymore.
removeDatanode(nodeN);
// physically remove node from datanodeMap
- wipeDatanode(nodeN);
+ blockManager.getDatanodeManager().wipeDatanode(nodeN);
nodeN = null;
}
@@ -2929,7 +2920,7 @@ public class FSNamesystem implements FSC
DatanodeDescriptor nodeDescr
= new DatanodeDescriptor(nodeReg, NetworkTopology.DEFAULT_RACK, hostName);
resolveNetworkLocation(nodeDescr);
- unprotectedAddDatanode(nodeDescr);
+ blockManager.getDatanodeManager().addDatanode(nodeDescr);
clusterMap.add(nodeDescr);
checkDecommissioning(nodeDescr, dnAddress);
@@ -3227,7 +3218,7 @@ public class FSNamesystem implements FSC
lastBlockKeyUpdate = now;
}
} catch (Exception e) {
- FSNamesystem.LOG.error(StringUtils.stringifyException(e));
+ FSNamesystem.LOG.error("Exception while checking heartbeat", e);
}
try {
Thread.sleep(5000); // 5 seconds
@@ -3367,44 +3358,6 @@ public class FSNamesystem implements FSC
+ nodeDescr.getName() + " is out of service now.");
}
}
-
- void unprotectedAddDatanode(DatanodeDescriptor nodeDescr) {
- assert hasWriteLock();
- // To keep host2DataNodeMap consistent with datanodeMap,
- // remove from host2DataNodeMap the datanodeDescriptor removed
- // from datanodeMap before adding nodeDescr to host2DataNodeMap.
- synchronized (datanodeMap) {
- host2DataNodeMap.remove(
- datanodeMap.put(nodeDescr.getStorageID(), nodeDescr));
- }
- host2DataNodeMap.add(nodeDescr);
-
- if(NameNode.stateChangeLog.isDebugEnabled()) {
- NameNode.stateChangeLog.debug(
- "BLOCK* NameSystem.unprotectedAddDatanode: "
- + "node " + nodeDescr.getName() + " is added to datanodeMap.");
- }
- }
-
- /**
- * Physically remove node from datanodeMap.
- *
- * @param nodeID node
- * @throws IOException
- */
- void wipeDatanode(DatanodeID nodeID) throws IOException {
- assert hasWriteLock();
- String key = nodeID.getStorageID();
- synchronized (datanodeMap) {
- host2DataNodeMap.remove(datanodeMap.remove(key));
- }
- if(NameNode.stateChangeLog.isDebugEnabled()) {
- NameNode.stateChangeLog.debug(
- "BLOCK* NameSystem.wipeDatanode: "
- + nodeID.getName() + " storage " + key
- + " is removed from datanodeMap.");
- }
- }
FSImage getFSImage() {
return dir.fsImage;
@@ -3990,7 +3943,7 @@ public class FSNamesystem implements FSC
* Change, if appropriate, the admin state of a datanode to
* decommission completed. Return true if decommission is complete.
*/
- boolean checkDecommissionStateInternal(DatanodeDescriptor node) {
+ public boolean checkDecommissionStateInternal(DatanodeDescriptor node) {
assert hasWriteLock();
//
// Check to see if all blocks in this decommissioned
@@ -4305,7 +4258,7 @@ public class FSNamesystem implements FSC
try {
needUpgrade = startDistributedUpgradeIfNeeded();
} catch(IOException e) {
- FSNamesystem.LOG.error(StringUtils.stringifyException(e));
+ FSNamesystem.LOG.error("IOException in startDistributedUpgradeIfNeeded", e);
}
if(needUpgrade) {
// switch to manual safe mode
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Thu Jul 14 20:16:02 2011
@@ -692,7 +692,7 @@ public class NameNode implements Namenod
try {
if (httpServer != null) httpServer.stop();
} catch (Exception e) {
- LOG.error(StringUtils.stringifyException(e));
+ LOG.error("Exception while stopping httpserver", e);
}
if(namesystem != null) namesystem.close();
if(emptier != null) emptier.interrupt();
@@ -1668,7 +1668,7 @@ public class NameNode implements Namenod
if (namenode != null)
namenode.join();
} catch (Throwable e) {
- LOG.error(StringUtils.stringifyException(e));
+ LOG.error("Exception in namenode join", e);
System.exit(-1);
}
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Thu Jul 14 20:16:02 2011
@@ -320,7 +320,7 @@ public class SecondaryNameNode implement
try {
if (checkpointImage != null) checkpointImage.close();
} catch(IOException e) {
- LOG.warn(StringUtils.stringifyException(e));
+ LOG.warn("Exception while closing CheckpointStorage", e);
}
}
@@ -330,7 +330,7 @@ public class SecondaryNameNode implement
try {
ugi = UserGroupInformation.getLoginUser();
} catch (IOException e) {
- LOG.error(StringUtils.stringifyException(e));
+ LOG.error("Exception while getting login user", e);
e.printStackTrace();
Runtime.getRuntime().exit(-1);
}
@@ -378,12 +378,10 @@ public class SecondaryNameNode implement
lastCheckpointTime = now;
}
} catch (IOException e) {
- LOG.error("Exception in doCheckpoint: ");
- LOG.error(StringUtils.stringifyException(e));
+ LOG.error("Exception in doCheckpoint", e);
e.printStackTrace();
} catch (Throwable e) {
- LOG.error("Throwable Exception in doCheckpoint: ");
- LOG.error(StringUtils.stringifyException(e));
+ LOG.error("Throwable Exception in doCheckpoint", e);
e.printStackTrace();
Runtime.getRuntime().exit(-1);
}
Propchange: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Jul 14 20:16:02 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hdfs/src/test/hdfs:1134994-1143516
+/hadoop/common/trunk/hdfs/src/test/hdfs:1134994-1146865
/hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
/hadoop/core/trunk/src/test/hdfs:776175-785643
/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:987665-1095512
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml Thu Jul 14 20:16:02 2011
@@ -2671,6 +2671,24 @@
</test>
<test> <!-- TESTED -->
+ <description>cp: putting file into an already existing destination with -f option(absolute path)</description>
+ <test-commands>
+ <command>-fs NAMENODE -touchz /user/file0</command>
+ <command>-fs NAMENODE -cp -f CLITEST_DATA/data120bytes /user/file0</command>
+ <command>-fs NAMENODE -cat /user/file0</command>
+ </test-commands>
+ <cleanup-commands>
+ <command>-fs NAMENODE -rm -r /user</command>
+ </cleanup-commands>
+ <comparators>
+ <comparator>
+ <type>RegexpComparator</type>
+ <expected-output>12345678901234</expected-output>
+ </comparator>
+ </comparators>
+ </test>
+
+ <test> <!-- TESTED -->
<description>cp: copying directory to directory in hdfs:// path</description>
<test-commands>
<command>-fs NAMENODE -mkdir hdfs:///dir0</command>
@@ -4077,6 +4095,24 @@
</test>
<test> <!-- TESTED -->
+ <description>put: putting file into an already existing destination with -f option(absolute path)</description>
+ <test-commands>
+ <command>-fs NAMENODE -touchz /user/file0</command>
+ <command>-fs NAMENODE -put -f CLITEST_DATA/data120bytes /user/file0</command>
+ <command>-fs NAMENODE -cat /user/file0</command>
+ </test-commands>
+ <cleanup-commands>
+ <command>-fs NAMENODE -rm -r /user</command>
+ </cleanup-commands>
+ <comparators>
+ <comparator>
+ <type>RegexpComparator</type>
+ <expected-output>12345678901234</expected-output>
+ </comparator>
+ </comparators>
+ </test>
+
+ <test> <!-- TESTED -->
<description>put: putting file into an already existing destination(relative path)</description>
<test-commands>
<command>-fs NAMENODE -touchz file0</command>
@@ -4593,6 +4629,25 @@
</test>
<test> <!-- TESTED -->
+ <description>copyFromLocal: copying file into an already existing destination with -f option(absolute path)</description>
+ <test-commands>
+ <command>-fs NAMENODE -touchz /user/file0</command>
+ <command>-fs NAMENODE -copyFromLocal -f CLITEST_DATA/data120bytes /user/file0</command>
+ <command>-fs NAMENODE -cat /user/file0</command>
+ </test-commands>
+ <cleanup-commands>
+ <command>-fs NAMENODE -rm -r /user</command>
+ </cleanup-commands>
+ <comparators>
+ <comparator>
+ <type>RegexpComparator</type>
+ <expected-output>12345678901234</expected-output>
+ </comparator>
+ </comparators>
+ </test>
+
+ <test> <!-- TESTED -->
+
<description>copyFromLocal: copying file into an already existing destination(relative path)</description>
<test-commands>
<command>-fs NAMENODE -touchz file0</command>
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java Thu Jul 14 20:16:02 2011
@@ -1354,4 +1354,73 @@ public class TestDFSShell extends TestCa
int res = admin.run(new String[] {"-refreshNodes"});
assertEquals("expected to fail -1", res , -1);
}
+
+ // force Copy Option is -f
+ public void testCopyCommandsWithForceOption() throws Exception {
+ Configuration conf = new Configuration();
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+ .format(true).build();
+ FsShell shell = null;
+ FileSystem fs = null;
+ File localFile = new File("testFileForPut");
+ Path hdfsTestDir = new Path("ForceTestDir");
+ try {
+ fs = cluster.getFileSystem();
+ fs.mkdirs(hdfsTestDir);
+ localFile.createNewFile();
+ writeFile(fs, new Path("testFileForPut"));
+ shell = new FsShell();
+
+ // Tests for put
+ String[] argv = new String[] { "-put", "-f", localFile.getName(),
+ "ForceTestDir" };
+ int res = ToolRunner.run(shell, argv);
+ int SUCCESS = 0;
+ int ERROR = 1;
+ assertEquals("put -f is not working", SUCCESS, res);
+
+ argv = new String[] { "-put", localFile.getName(), "ForceTestDir" };
+ res = ToolRunner.run(shell, argv);
+ assertEquals("put command itself is able to overwrite the file", ERROR,
+ res);
+
+ // Tests for copyFromLocal
+ argv = new String[] { "-copyFromLocal", "-f", localFile.getName(),
+ "ForceTestDir" };
+ res = ToolRunner.run(shell, argv);
+ assertEquals("copyFromLocal -f is not working", SUCCESS, res);
+
+ argv = new String[] { "-copyFromLocal", localFile.getName(),
+ "ForceTestDir" };
+ res = ToolRunner.run(shell, argv);
+ assertEquals(
+ "copyFromLocal command itself is able to overwrite the file", ERROR,
+ res);
+
+ // Tests for cp
+ argv = new String[] { "-cp", "-f", localFile.getName(), "ForceTestDir" };
+ res = ToolRunner.run(shell, argv);
+ assertEquals("cp -f is not working", SUCCESS, res);
+
+ argv = new String[] { "-cp", localFile.getName(),
+ "ForceTestDir" };
+ res = ToolRunner.run(shell, argv);
+ assertEquals("cp command itself is able to overwrite the file", ERROR,
+ res);
+ } finally {
+ if (null != shell)
+ shell.close();
+
+ if (localFile.exists())
+ localFile.delete();
+
+ if (null != fs) {
+ fs.delete(hdfsTestDir, true);
+ fs.close();
+ }
+ cluster.shutdown();
+ }
+
+ }
+
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java Thu Jul 14 20:16:02 2011
@@ -44,6 +44,7 @@ public class TestWriteRead {
private static final int BUFFER_SIZE = 8192 * 100;
private static final String ROOT_DIR = "/tmp/";
+ private static final long blockSize = 1024*100;
// command-line options. Different defaults for unit test vs real cluster
String filenameOption = ROOT_DIR + "fileX1";
@@ -69,8 +70,8 @@ public class TestWriteRead {
LOG.info("initJunitModeTest");
conf = new HdfsConfiguration();
- conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024 * 100); // 100K
- // blocksize
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); // 100K
+ // blocksize
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
@@ -99,15 +100,14 @@ public class TestWriteRead {
}
/** Junit Test reading while writing. */
-
@Test
public void testWriteReadSeq() throws IOException {
useFCOption = false;
positionReadOption = false;
String fname = filenameOption;
-
+ long rdBeginPos = 0;
// need to run long enough to fail: takes 25 to 35 seec on Mac
- int stat = testWriteAndRead(fname, WR_NTIMES, WR_CHUNK_SIZE);
+ int stat = testWriteAndRead(fname, WR_NTIMES, WR_CHUNK_SIZE, rdBeginPos);
LOG.info("Summary status from test1: status= " + stat);
Assert.assertEquals(0, stat);
}
@@ -117,14 +117,27 @@ public class TestWriteRead {
public void testWriteReadPos() throws IOException {
String fname = filenameOption;
positionReadOption = true; // position read
- int stat = testWriteAndRead(fname, WR_NTIMES, WR_CHUNK_SIZE);
+ long rdBeginPos = 0;
+ int stat = testWriteAndRead(fname, WR_NTIMES, WR_CHUNK_SIZE, rdBeginPos);
Assert.assertEquals(0, stat);
}
+ /** Junit Test position read of the current block being written. */
+ @Test
+ public void testReadPosCurrentBlock() throws IOException {
+ String fname = filenameOption;
+ positionReadOption = true; // position read
+ int wrChunkSize = (int)(blockSize) + (int)(blockSize/2);
+ long rdBeginPos = blockSize+1;
+ int numTimes=5;
+ int stat = testWriteAndRead(fname, numTimes, wrChunkSize, rdBeginPos);
+ Assert.assertEquals(0, stat);
+ }
// equivalent of TestWriteRead1
private int clusterTestWriteRead1() throws IOException {
- int stat = testWriteAndRead(filenameOption, loopOption, chunkSizeOption);
+ long rdBeginPos = 0;
+ int stat = testWriteAndRead(filenameOption, loopOption, chunkSizeOption, rdBeginPos);
return stat;
}
@@ -133,10 +146,9 @@ public class TestWriteRead {
* Return number of bytes read.
* Support both sequential read and position read.
*/
- private long readData(String fname, byte[] buffer, long byteExpected)
+ private long readData(String fname, byte[] buffer, long byteExpected, long beginPosition)
throws IOException {
long totalByteRead = 0;
- long beginPosition = 0;
Path path = getFullyQualifiedPath(fname);
FSDataInputStream in = null;
@@ -263,7 +275,7 @@ public class TestWriteRead {
* After each iteration of write, do a read of the file from begin to end.
* Return 0 on success, else number of failure.
*/
- private int testWriteAndRead(String fname, int loopN, int chunkSize)
+ private int testWriteAndRead(String fname, int loopN, int chunkSize, long readBeginPosition)
throws IOException {
int countOfFailures = 0;
@@ -324,7 +336,7 @@ public class TestWriteRead {
+ ". TotalByteVisible = " + totalByteVisible + " to file "
+ fname);
}
- byteVisibleToRead = readData(fname, inBuffer, totalByteVisible);
+ byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition);
String readmsg = "Written=" + totalByteWritten + " ; Expected Visible="
+ totalByteVisible + " ; Got Visible=" + byteVisibleToRead
@@ -353,7 +365,7 @@ public class TestWriteRead {
out.close();
- byteVisibleToRead = readData(fname, inBuffer, totalByteVisible);
+ byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition);
String readmsg2 = "Written=" + totalByteWritten + " ; Expected Visible="
+ totalByteVisible + " ; Got Visible=" + byteVisibleToRead
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java Thu Jul 14 20:16:02 2011
@@ -24,7 +24,6 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -95,6 +94,54 @@ public class TestDataNodeVolumeFailureTo
/**
* Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration
+ * option, ie the DN tolerates a failed-to-use scenario during
+ * its start-up.
+ */
+ @Test
+ public void testValidVolumesAtStartup() throws Exception {
+ assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
+
+ // Make sure no DNs are running.
+ cluster.shutdownDataNodes();
+
+ // Bring up a datanode with two default data dirs, but with one bad one.
+ conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
+
+ // We use subdirectories 0 and 1 in order to have only a single
+ // data dir's parent inject a failure.
+ File tld = new File(MiniDFSCluster.getBaseDirectory(), "badData");
+ File dataDir1 = new File(tld, "data1");
+ File dataDir1Actual = new File(dataDir1, "1");
+ dataDir1Actual.mkdirs();
+ // Force an IOE to occur on one of the dfs.data.dir.
+ File dataDir2 = new File(tld, "data2");
+ prepareDirToFail(dataDir2);
+ File dataDir2Actual = new File(dataDir2, "2");
+
+ // Start one DN, with manually managed DN dir
+ conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+ dataDir1Actual.getPath() + "," + dataDir2Actual.getPath());
+ cluster.startDataNodes(conf, 1, false, null, null);
+ cluster.waitActive();
+
+ try {
+ assertTrue("The DN should have started up fine.",
+ cluster.isDataNodeUp());
+ DataNode dn = cluster.getDataNodes().get(0);
+ String si = dn.getFSDataset().getStorageInfo();
+ assertTrue("The DN should have started with this directory",
+ si.contains(dataDir1Actual.getPath()));
+ assertFalse("The DN shouldn't have a bad directory.",
+ si.contains(dataDir2Actual.getPath()));
+ } finally {
+ cluster.shutdownDataNodes();
+ FileUtil.chmod(dataDir2.toString(), "755");
+ }
+
+ }
+
+ /**
+ * Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration
* option, ie the DN shuts itself down when the number of failures
* experienced drops below the tolerated amount.
*/
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java?rev=1146881&r1=1146880&r2=1146881&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java Thu Jul 14 20:16:02 2011
@@ -20,9 +20,11 @@ package org.apache.hadoop.hdfs.server.na
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
+import java.nio.channels.FileChannel;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.conf.Configuration;
@@ -35,6 +37,7 @@ import org.apache.hadoop.hdfs.HdfsConfig
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Before;
import org.junit.Test;
+import org.mockito.Mockito;
public class TestEditLogFileOutputStream {
@@ -82,6 +85,30 @@ public class TestEditLogFileOutputStream
assertTrue("Edit log disk space used should be at least 257 blocks",
256 * 4096 <= new DU(editLog, conf).getUsed());
}
+
+ @Test
+ public void testClose() throws IOException {
+ String errorMessage = "TESTING: fc.truncate() threw IOE";
+
+ File testDir = new File(System.getProperty("test.build.data", "/tmp"));
+ assertTrue("could not create test directory", testDir.exists() || testDir.mkdirs());
+ File f = new File(testDir, "edits");
+ assertTrue("could not create test file", f.createNewFile());
+ EditLogFileOutputStream elos = new EditLogFileOutputStream(f, 0);
+
+ FileChannel mockFc = Mockito.spy(elos.getFileChannelForTesting());
+ Mockito.doThrow(new IOException(errorMessage)).when(mockFc).truncate(Mockito.anyLong());
+ elos.setFileChannelForTesting(mockFc);
+
+ try {
+ elos.close();
+ fail("elos.close() succeeded, but should have thrown");
+ } catch (IOException e) {
+ assertEquals("wrong IOE thrown from elos.close()", e.getMessage(), errorMessage);
+ }
+
+ assertEquals("fc was not nulled when elos.close() failed", elos.getFileChannelForTesting(), null);
+ }
/**
* Tests EditLogFileOutputStream doesn't throw NullPointerException on
Propchange: hadoop/common/branches/HDFS-1073/hdfs/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Jul 14 20:16:02 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hdfs/src/webapps/datanode:1134994-1143516
+/hadoop/common/trunk/hdfs/src/webapps/datanode:1134994-1146865
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
/hadoop/core/trunk/src/webapps/datanode:776175-784663
/hadoop/hdfs/branches/HDFS-1052/src/webapps/datanode:987665-1095512
Propchange: hadoop/common/branches/HDFS-1073/hdfs/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Jul 14 20:16:02 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hdfs/src/webapps/hdfs:1134994-1143516
+/hadoop/common/trunk/hdfs/src/webapps/hdfs:1134994-1146865
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
/hadoop/core/trunk/src/webapps/hdfs:776175-784663
/hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs:987665-1095512
Propchange: hadoop/common/branches/HDFS-1073/hdfs/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Jul 14 20:16:02 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hdfs/src/webapps/secondary:1134994-1143516
+/hadoop/common/trunk/hdfs/src/webapps/secondary:1134994-1146865
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
/hadoop/core/trunk/src/webapps/secondary:776175-784663
/hadoop/hdfs/branches/HDFS-1052/src/webapps/secondary:987665-1095512