You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by dh...@apache.org on 2009/03/11 20:31:10 UTC
svn commit: r752590 - in /hadoop/core/trunk: CHANGES.txt
src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
src/test/org/apache/hadoop/hdfs/TestFileCreation.java
Author: dhruba
Date: Wed Mar 11 19:31:09 2009
New Revision: 752590
URL: http://svn.apache.org/viewvc?rev=752590&view=rev
Log:
HADOOP-3998. Fix dfsclient exception when JVM is shutdown. (dhruba)
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreation.java
Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=752590&r1=752589&r2=752590&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Wed Mar 11 19:31:09 2009
@@ -1000,6 +1000,8 @@
dfs.support.append is set to true. (dhruba)
HADOOP-5333. libhdfs supports appending to files. (dhruba)
+
+ HADOOP-3998. Fix dfsclient exception when JVM is shutdown. (dhruba)
Release 0.19.1 - Unreleased
Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=752590&r1=752589&r2=752590&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java Wed Mar 11 19:31:09 2009
@@ -205,8 +205,8 @@
*/
public synchronized void close() throws IOException {
if(clientRunning) {
- clientRunning = false;
leasechecker.close();
+ clientRunning = false;
// close connections to the namenode
RPC.stopProxy(rpcNamenode);
@@ -2133,7 +2133,6 @@
private volatile boolean closed = false;
public void run() {
-
while (!closed && clientRunning) {
// if the Responder encountered an error, shutdown Responder
@@ -2468,21 +2467,30 @@
// The original bad datanode is left in the list because it is
// conservative to remove only one datanode in one iteration.
for (int j = 0; j < nodes.length; j++) {
- if (nodes[j] == primaryNode) {
+ if (nodes[j].equals(primaryNode)) {
errorIndex = j; // forget original bad node.
}
}
+ // remove primary node from list
+ newnodes = new DatanodeInfo[nodes.length-1];
+ System.arraycopy(nodes, 0, newnodes, 0, errorIndex);
+ System.arraycopy(nodes, errorIndex+1, newnodes, errorIndex,
+ newnodes.length-errorIndex);
+ nodes = newnodes;
LOG.warn("Error Recovery for block " + block + " failed " +
" because recovery from primary datanode " +
primaryNode + " failed " + recoveryErrorCount +
- " times. Marking primary datanode as bad.");
+ " times. " + " Pipeline was " + pipelineMsg +
+ ". Marking primary datanode as bad.");
recoveryErrorCount = 0;
+ errorIndex = -1;
return true; // sleep when we return from here
}
String emsg = "Error Recovery for block " + block + " failed " +
" because recovery from primary datanode " +
primaryNode + " failed " + recoveryErrorCount +
- " times. Aborting...";
+ " times. " + " Pipeline was " + pipelineMsg +
+ ". Aborting...";
LOG.warn(emsg);
lastException = new IOException(emsg);
closed = true;
@@ -2492,7 +2500,8 @@
LOG.warn("Error Recovery for block " + block + " failed " +
" because recovery from primary datanode " +
primaryNode + " failed " + recoveryErrorCount +
- " times. Will retry...");
+ " times. " + " Pipeline was " + pipelineMsg +
+ ". Will retry...");
return true; // sleep when we return from here
} finally {
RPC.stopProxy(primary);
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreation.java?rev=752590&r1=752589&r2=752590&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreation.java Wed Mar 11 19:31:09 2009
@@ -56,6 +56,7 @@
//((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+ ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
}
static final long seed = 0xDEADBEEFL;
@@ -703,4 +704,31 @@
System.out.println("testLeaseExpireHardLimit successful");
}
+
+ // test closing file system before all file handles are closed.
+ public void testFsClose() throws Exception {
+ System.out.println("test file system close start");
+ final int DATANODE_NUM = 3;
+
+ Configuration conf = new Configuration();
+
+ // create cluster
+ MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
+ DistributedFileSystem dfs = null;
+ try {
+ cluster.waitActive();
+ dfs = (DistributedFileSystem)cluster.getFileSystem();
+
+ // create a new file.
+ final String f = DIR + "foofs";
+ final Path fpath = new Path(f);
+ FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM);
+ out.write("something".getBytes());
+
+ // close file system without closing file
+ dfs.close();
+ } finally {
+ System.out.println("testFsClose successful");
+ }
+ }
}