You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by om...@apache.org on 2011/03/04 05:56:10 UTC

svn commit: r1077794 - in /hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode: DataNode.java FSDataset.java

Author: omalley
Date: Fri Mar  4 04:56:09 2011
New Revision: 1077794

URL: http://svn.apache.org/viewvc?rev=1077794&view=rev
Log:
commit c3d4bb11701d645b88336dce4bdfc9b6e545022a
Author: Bharath Mundlapudi <bh...@yahoo-inc.com>
Date:   Wed Mar 2 00:17:27 2011 -0800

    HDFS-1592 https://issues.apache.org/jira/secure/attachment/12472397/HDFS-1592-rel20.patch
    
    +++ b/YAHOO-CHANGES.txt
    +Release 0.20.204.0 - unreleased
    +
    +    Bug:4318740  At Startup, Valid volumes required in FSDataset doesn't
    +    handle consistently with volumes tolerated. (bharathm)
    +

Modified:
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1077794&r1=1077793&r2=1077794&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri Mar  4 04:56:09 2011
@@ -519,12 +519,7 @@ public class DataNode extends Configured
         + nsInfo.getBuildVersion() + "; datanode BV = "
         + Storage.getBuildVersion();
       LOG.fatal( errorMsg );
-      try {
-        namenode.errorReport( dnRegistration,
-                              DatanodeProtocol.NOTIFY, errorMsg );
-      } catch( SocketTimeoutException e ) {  // namenode is busy
-        LOG.info("Problem connecting to server: " + getNameNodeAddr());
-      }
+      notifyNamenode(DatanodeProtocol.NOTIFY, errorMsg);  
       throw new IOException( errorMsg );
     }
     assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
@@ -785,6 +780,17 @@ public class DataNode extends Configured
     }
   }
   
+  private void notifyNamenode(int dpCode, String msg) {
+    //inform NameNode
+    try {
+      namenode.errorReport(
+	                           dnRegistration, dpCode, msg);
+    } catch( SocketTimeoutException e ) {  // namenode is busy
+        LOG.info("Problem connecting to server: " + getNameNodeAddr());
+    } catch(IOException ignored) {              
+    }
+  }
+  
   private void handleDiskError(String errMsgr) {
     boolean hasEnoughResource = data.hasEnoughResource();
     LOG.warn("DataNode.handleDiskError: Keep Running: " + hasEnoughResource);
@@ -796,13 +802,9 @@ public class DataNode extends Configured
       // DN will be shutdown and NN should remove it
       dp_error = DatanodeProtocol.FATAL_DISK_ERROR;
     }
+
     //inform NameNode
-    try {
-      namenode.errorReport(
-                           dnRegistration, dp_error, errMsgr);
-    } catch(IOException ignored) {              
-    }
-    
+    notifyNamenode(dp_error, errMsgr);
     
     if(hasEnoughResource) {
       scheduleBlockReport(0);
@@ -1089,9 +1091,7 @@ public class DataNode extends Configured
       // block does not exist or is under-construction
       String errStr = "Can't send invalid block " + block;
       LOG.info(errStr);
-      namenode.errorReport(dnRegistration, 
-                           DatanodeProtocol.INVALID_BLOCK, 
-                           errStr);
+      notifyNamenode(DatanodeProtocol.INVALID_BLOCK, errStr);
       return;
     }
 
@@ -1491,7 +1491,7 @@ public class DataNode extends Configured
       ", xmitsInProgress=" + xmitsInProgress.get() +
       "}";
   }
-  
+
   private static void printUsage() {
     System.err.println("Usage: java DataNode");
     System.err.println("           [-rollback]");

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1077794&r1=1077793&r2=1077794&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Fri Mar  4 04:56:09 2011
@@ -764,12 +764,30 @@ public class FSDataset implements FSCons
     final int volFailuresTolerated =
       conf.getInt("dfs.datanode.failed.volumes.tolerated",
                   0);
-    this.validVolsRequired = storage.getNumStorageDirs() - volFailuresTolerated; 
+    
+    String[] dataDirs = conf.getStrings(DataNode.DATA_DIR_KEY);
+    
+    int volsConfigured=0;
+    
+    if(dataDirs != null)
+       volsConfigured = dataDirs.length;
+    
+    int volsFailed =  volsConfigured - storage.getNumStorageDirs();
+    
+    if( volsFailed < 0 ||
+    	volsFailed > volFailuresTolerated ) {
+        throw new DiskErrorException("Invalid value for volsFailed : " 
+        		+ volsFailed + " , Volumes tolerated : " + volFailuresTolerated);
+    }
+    
+    this.validVolsRequired =  volsConfigured - volFailuresTolerated;
+    
     if (validVolsRequired < 1 ||
         validVolsRequired > storage.getNumStorageDirs()) {
-      DataNode.LOG.error("Invalid value " + volFailuresTolerated + " for " +
-          "dfs.datanode.failed.volumes.tolerated");
+      throw new DiskErrorException("Invalid value for validVolsRequired : " 
+    		  + validVolsRequired + " , Current valid volumes: " + storage.getNumStorageDirs());
     }
+    
     FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
     for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
       volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(), conf);