You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by dh...@apache.org on 2009/03/11 20:39:42 UTC
svn commit: r752591 - in /hadoop/core/branches/branch-0.20: CHANGES.txt
src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
src/test/org/apache/hadoop/hdfs/TestFileCreation.java
Author: dhruba
Date: Wed Mar 11 19:39:41 2009
New Revision: 752591
URL: http://svn.apache.org/viewvc?rev=752591&view=rev
Log:
HADOOP-3998. Fix dfsclient exception when JVM is shutdown. (dhruba)
Modified:
hadoop/core/branches/branch-0.20/CHANGES.txt (contents, props changed)
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileCreation.java
Modified: hadoop/core/branches/branch-0.20/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/CHANGES.txt?rev=752591&r1=752590&r2=752591&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.20/CHANGES.txt Wed Mar 11 19:39:41 2009
@@ -724,6 +724,8 @@
dfs.support.append is set to true. (dhruba)
HADOOP-5333. libhdfs supports appending to files. (dhruba)
+
+ HADOOP-3998. Fix dfsclient exception when JVM is shutdown. (dhruba)
Release 0.19.1 - Unreleased
Propchange: hadoop/core/branches/branch-0.20/CHANGES.txt
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Mar 11 19:39:41 2009
@@ -1,3 +1,3 @@
/hadoop/core/branches/branch-0.18/CHANGES.txt:727226
/hadoop/core/branches/branch-0.19/CHANGES.txt:713112
-/hadoop/core/trunk/CHANGES.txt:727001,727117,727191,727212,727228,727255,727869,728187,729052,729987,732385,732572,732613,732777,732838,732869,733887,734870,734916,735082,736426,738602,738697,739416,740077,740157,741703,741762,743296,743745,743816,743892,744894,745180,745268,746010,746193,746206,746227,746233,746274,746902-746903,746944,746968,746970,747279,747802,748084,748090,748783,749262,749318,749863,750533,752073,752514,752555
+/hadoop/core/trunk/CHANGES.txt:727001,727117,727191,727212,727228,727255,727869,728187,729052,729987,732385,732572,732613,732777,732838,732869,733887,734870,734916,735082,736426,738602,738697,739416,740077,740157,741703,741762,743296,743745,743816,743892,744894,745180,745268,746010,746193,746206,746227,746233,746274,746902-746903,746944,746968,746970,747279,747802,748084,748090,748783,749262,749318,749863,750533,752073,752514,752555,752590
Modified: hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=752591&r1=752590&r2=752591&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java Wed Mar 11 19:39:41 2009
@@ -206,8 +206,8 @@
*/
public synchronized void close() throws IOException {
if(clientRunning) {
- clientRunning = false;
leasechecker.close();
+ clientRunning = false;
// close connections to the namenode
RPC.stopProxy(rpcNamenode);
@@ -2180,7 +2180,6 @@
private volatile boolean closed = false;
public void run() {
-
while (!closed && clientRunning) {
// if the Responder encountered an error, shutdown Responder
@@ -2515,21 +2514,30 @@
// The original bad datanode is left in the list because it is
// conservative to remove only one datanode in one iteration.
for (int j = 0; j < nodes.length; j++) {
- if (nodes[j] == primaryNode) {
+ if (nodes[j].equals(primaryNode)) {
errorIndex = j; // forget original bad node.
}
}
+ // remove primary node from list
+ newnodes = new DatanodeInfo[nodes.length-1];
+ System.arraycopy(nodes, 0, newnodes, 0, errorIndex);
+ System.arraycopy(nodes, errorIndex+1, newnodes, errorIndex,
+ newnodes.length-errorIndex);
+ nodes = newnodes;
LOG.warn("Error Recovery for block " + block + " failed " +
" because recovery from primary datanode " +
primaryNode + " failed " + recoveryErrorCount +
- " times. Marking primary datanode as bad.");
+ " times. " + " Pipeline was " + pipelineMsg +
+ ". Marking primary datanode as bad.");
recoveryErrorCount = 0;
+ errorIndex = -1;
return true; // sleep when we return from here
}
String emsg = "Error Recovery for block " + block + " failed " +
" because recovery from primary datanode " +
primaryNode + " failed " + recoveryErrorCount +
- " times. Aborting...";
+ " times. " + " Pipeline was " + pipelineMsg +
+ ". Aborting...";
LOG.warn(emsg);
lastException = new IOException(emsg);
closed = true;
@@ -2539,7 +2547,8 @@
LOG.warn("Error Recovery for block " + block + " failed " +
" because recovery from primary datanode " +
primaryNode + " failed " + recoveryErrorCount +
- " times. Will retry...");
+ " times. " + " Pipeline was " + pipelineMsg +
+ ". Will retry...");
return true; // sleep when we return from here
} finally {
RPC.stopProxy(primary);
Modified: hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileCreation.java?rev=752591&r1=752590&r2=752591&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileCreation.java Wed Mar 11 19:39:41 2009
@@ -56,6 +56,7 @@
//((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+ ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
}
static final long seed = 0xDEADBEEFL;
@@ -703,4 +704,31 @@
System.out.println("testLeaseExpireHardLimit successful");
}
+
+ // test closing file system before all file handles are closed.
+ public void testFsClose() throws Exception {
+ System.out.println("test file system close start");
+ final int DATANODE_NUM = 3;
+
+ Configuration conf = new Configuration();
+
+ // create cluster
+ MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
+ DistributedFileSystem dfs = null;
+ try {
+ cluster.waitActive();
+ dfs = (DistributedFileSystem)cluster.getFileSystem();
+
+ // create a new file.
+ final String f = DIR + "foofs";
+ final Path fpath = new Path(f);
+ FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM);
+ out.write("something".getBytes());
+
+ // close file system without closing file
+ dfs.close();
+ } finally {
+ System.out.println("testFsClose successful");
+ }
+ }
}