You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cu...@apache.org on 2006/06/02 22:13:25 UTC
svn commit: r411288 - in /lucene/hadoop/trunk: CHANGES.txt
src/java/org/apache/hadoop/dfs/DataNode.java
Author: cutting
Date: Fri Jun 2 13:13:24 2006
New Revision: 411288
URL: http://svn.apache.org/viewvc?rev=411288&view=rev
Log:
HADOOP-270. Fix potential deadlock in datanode shutdown. Contributed by Hairong.
Modified:
lucene/hadoop/trunk/CHANGES.txt
lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?rev=411288&r1=411287&r2=411288&view=diff
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Fri Jun 2 13:13:24 2006
@@ -97,8 +97,10 @@
26. HADOOP-265. Tasktracker now fails to start if it does not have a
writable local directory for temporary files. In this case, it
- logs a message to the JobTracker and exits. (Hairong Kuang via cutting)
+ logs a message to the JobTracker and exits. (Hairong Kuang via cutting)
+27. HADOOP-270. Fix potential deadlock in datanode shutdown.
+ (Hairong Kuang via cutting)
Release 0.2.1 - 2006-05-12
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java?rev=411288&r1=411287&r2=411288&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java Fri Jun 2 13:13:24 2006
@@ -146,7 +146,6 @@
// initialize data node internal structure
this.data = new FSDataset(datadir, conf);
this.dataXceiveServer = new Daemon(new DataXceiveServer(ss));
- this.dataXceiveServer.start();
long blockReportIntervalBasis =
conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL);
@@ -189,10 +188,6 @@
this.shouldRun = false;
((DataXceiveServer) this.dataXceiveServer.getRunnable()).kill();
try {
- this.dataXceiveServer.join();
- } catch (InterruptedException ie) {
- }
- try {
this.storage.close();
} catch (IOException ie) {
}
@@ -213,6 +208,9 @@
* forever calling remote NameNode functions.
*/
public void offerService() throws Exception {
+ // start dataXceiveServer
+ dataXceiveServer.start();
+
long lastHeartbeat = 0, lastBlockReport = 0;
LOG.info("using BLOCKREPORT_INTERVAL of " + blockReportInterval + "msec");
@@ -328,6 +326,12 @@
} // while (shouldRun)
} catch(DiskErrorException e) {
handleDiskError(e.getMessage());
+ }
+
+ // wait for dataXceiveServer to terminate
+ try {
+ this.dataXceiveServer.join();
+ } catch (InterruptedException ie) {
}
} // offerService