You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by dh...@apache.org on 2007/09/13 00:09:11 UTC

svn commit: r575100 - in /lucene/hadoop/trunk: ./ src/java/org/apache/hadoop/dfs/ src/test/org/apache/hadoop/dfs/

Author: dhruba
Date: Wed Sep 12 15:09:09 2007
New Revision: 575100

URL: http://svn.apache.org/viewvc?rev=575100&view=rev
Log:
HADOOP-1846. DatanodeReport in ClientProtocol can report live
datanodes, dead datanodes or all datanodes. Client Protocol version
changed from 17 to 18.  (Hairong Kuang via dhruba)


Added:
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDatanodeReport.java   (with props)
Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ClientProtocol.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSConstants.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDataTransferProtocol.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestModTime.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?rev=575100&r1=575099&r2=575100&view=diff
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Wed Sep 12 15:09:09 2007
@@ -29,6 +29,10 @@
     with a 'final' tag may not be overridden by subsequently loaded
     configuration files, e.g., by jobs.  (Arun C. Murthy via cutting)
 
+    HADOOP-1846. DatanodeReport in ClientProtocol can report live 
+    datanodes, dead datanodes or all datanodes. Client Protocol version
+    changed from 17 to 18.  (Hairong Kuang via dhruba)
+
   NEW FEATURES
 
     HADOOP-1636.  Allow configuration of the number of jobs kept in

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ClientProtocol.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ClientProtocol.java?rev=575100&r1=575099&r2=575100&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ClientProtocol.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ClientProtocol.java Wed Sep 12 15:09:09 2007
@@ -33,8 +33,9 @@
    * Compared to the previous version the following changes have been introduced:
    * 16 : removed deprecated obtainLock() and releaseLock(). 
    * 17 : getBlockSize replaced by getPreferredBlockSize
+   * 18 : datanodereport returns dead, live or all nodes.
    */
-  public static final long versionID = 17L;
+  public static final long versionID = 18L;
   
   ///////////////////////////////////////
   // File contents
@@ -233,15 +234,19 @@
    * Right now, only two values are returned.
    * [0] contains the total storage capacity of the system,
    *     in bytes.
-   * [1] contains the available storage of the system, in bytes.
+   * [1] contains the total used space of the system, in bytes.
+   * [2] contains the available storage of the system, in bytes.
    */
   public long[] getStats() throws IOException;
 
   /**
-   * Get a full report on the system's current datanodes.
+   * Get a report on the system's current datanodes.
    * One DatanodeInfo object is returned for each DataNode.
+   * Return live datanodes if type is LIVE; dead datanodes if type is DEAD;
+   * otherwise all datanodes if type is ALL.
    */
-  public DatanodeInfo[] getDatanodeReport() throws IOException;
+  public DatanodeInfo[] getDatanodeReport(FSConstants.DatanodeReportType type)
+  throws IOException;
 
   /**
    * Get the block size for the given file.

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java?rev=575100&r1=575099&r2=575100&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java Wed Sep 12 15:09:09 2007
@@ -450,8 +450,9 @@
     return rawNums[1];
   }
 
-  public DatanodeInfo[] datanodeReport() throws IOException {
-    return namenode.getDatanodeReport();
+  public DatanodeInfo[] datanodeReport(DatanodeReportType type)
+  throws IOException {
+    return namenode.getDatanodeReport(type);
   }
     
   /**

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java?rev=575100&r1=575099&r2=575100&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java Wed Sep 12 15:09:09 2007
@@ -24,6 +24,7 @@
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.dfs.FSConstants.DatanodeReportType;
 import org.apache.hadoop.dfs.FSConstants.UpgradeAction;
 import org.apache.hadoop.util.*;
 
@@ -238,7 +239,7 @@
 
   /** Return statistics for each datanode. */
   public DatanodeInfo[] getDataNodeStats() throws IOException {
-    return dfs.datanodeReport();
+    return dfs.datanodeReport(DatanodeReportType.ALL);
   }
 
   /**

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSConstants.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSConstants.java?rev=575100&r1=575099&r2=575100&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSConstants.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSConstants.java Wed Sep 12 15:09:09 2007
@@ -131,6 +131,9 @@
   // Startup options
   public enum StartupOption{ FORMAT, REGULAR, UPGRADE, ROLLBACK; }
 
+  // type of the datanode report
+  public static enum DatanodeReportType {ALL, LIVE, DEAD }
+  
   /**
    * Type of the node
    */

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?rev=575100&r1=575099&r2=575100&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Wed Sep 12 15:09:09 2007
@@ -301,7 +301,8 @@
                             + maxReplication);
     this.maxReplicationStreams = conf.getInt("dfs.max-repl-streams", 2);
     long heartbeatInterval = conf.getLong("dfs.heartbeat.interval", 3) * 1000;
-    this.heartbeatRecheckInterval = 5 * 60 * 1000; // 5 minutes
+    this.heartbeatRecheckInterval = conf.getInt(
+        "heartbeat.recheck.interval", 5 * 60 * 1000); // 5 minutes
     this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval +
       10 * heartbeatInterval;
     this.replicationRecheckInterval = 3 * 1000; //  3 second
@@ -2446,15 +2447,29 @@
     }
   }
 
-  public synchronized DatanodeInfo[] datanodeReport() {
-    DatanodeInfo results[] = null;
+  public synchronized DatanodeInfo[] datanodeReport( DatanodeReportType type ) {
+    ArrayList<DatanodeInfo> results = new ArrayList<DatanodeInfo>();
     synchronized (datanodeMap) {
-      results = new DatanodeInfo[datanodeMap.size()];
-      int i = 0;
-      for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); it.hasNext();)
-        results[i++] = new DatanodeInfo(it.next());
+      for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); it.hasNext();) {
+        DatanodeDescriptor tmp = it.next();
+        switch (type) {
+        case ALL: 
+          results.add(new DatanodeInfo(tmp));
+          break;
+        case DEAD: 
+          if(isDatanodeDead(tmp)) {
+            results.add(new DatanodeInfo(tmp));
+          }
+          break;
+        case LIVE:
+          if(!isDatanodeDead(tmp)) {
+            results.add(new DatanodeInfo(tmp));
+          }
+          break;
+        }
+      }
     }
-    return results;
+    return results.toArray(new DatanodeInfo[results.size()]);
   }
     
   /**

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java?rev=575100&r1=575099&r2=575100&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java Wed Sep 12 15:09:09 2007
@@ -471,9 +471,10 @@
 
   /**
    */
-  public DatanodeInfo[] getDatanodeReport() throws IOException {
-    DatanodeInfo results[] = namesystem.datanodeReport();
-    if (results == null || results.length == 0) {
+  public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
+  throws IOException {
+    DatanodeInfo results[] = namesystem.datanodeReport(type);
+    if (results == null ) {
       throw new IOException("Cannot find datanode report");
     }
     return results;

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java?rev=575100&r1=575099&r2=575100&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java Wed Sep 12 15:09:09 2007
@@ -24,6 +24,7 @@
 import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.dfs.FSConstants.DatanodeReportType;
 import org.apache.hadoop.dfs.FSConstants.StartupOption;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -319,38 +320,15 @@
                                                    getNameNodePort());
     DFSClient client = new DFSClient(addr, conf);
 
-    //
-    // get initial state of datanodes
-    //  
-    DatanodeInfo[] oldinfo = client.datanodeReport();
-    while (oldinfo.length != dataNodes.size()) {
+    // make sure all datanodes are alive
+    while( client.datanodeReport(DatanodeReportType.LIVE).length
+        != dataNodes.size()) {
       try {
         Thread.sleep(500);
       } catch (Exception e) {
       }
-      oldinfo = client.datanodeReport();
     }
 
-    // 
-    // wait till all datanodes send at least yet another heartbeat
-    //
-    int numdead = 0;
-    while (numdead > 0) {
-      try {
-        Thread.sleep(500);
-      } catch (Exception e) {
-      }
-      DatanodeInfo[] info = client.datanodeReport();
-      if (info.length != dataNodes.size()) {
-        continue;
-      }
-      numdead = 0;
-      for (int i = 0; i < info.length; i++) {
-        if (oldinfo[i].getLastUpdate() >= info[i].getLastUpdate()) {
-          numdead++;
-        }
-      }
-    }
     client.close();
   }
 }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDataTransferProtocol.java?rev=575100&r1=575099&r2=575100&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDataTransferProtocol.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDataTransferProtocol.java Wed Sep 12 15:09:09 2007
@@ -30,6 +30,7 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.dfs.DFSClient.DFSDataInputStream;
+import org.apache.hadoop.dfs.FSConstants.DatanodeReportType;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
@@ -132,7 +133,7 @@
     DFSClient dfsClient = new DFSClient(
                  new InetSocketAddress("localhost", cluster.getNameNodePort()),
                  conf);                
-    datanode = dfsClient.datanodeReport()[0];
+    datanode = dfsClient.datanodeReport(DatanodeReportType.LIVE)[0];
     dnAddr = DataNode.createSocketAddr(datanode.getName());
     FileSystem fileSys = cluster.getFileSystem();
     

Added: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDatanodeReport.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDatanodeReport.java?rev=575100&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDatanodeReport.java (added)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDatanodeReport.java Wed Sep 12 15:09:09 2007
@@ -0,0 +1,85 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.dfs;
+
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+
+import junit.framework.TestCase;
+import org.apache.hadoop.conf.Configuration;
+
+import org.apache.hadoop.dfs.FSConstants.DatanodeReportType;
+
+/**
+ * This test ensures the all types of data node report work correctly.
+ */
+public class TestDatanodeReport extends TestCase {
+  final static private Configuration conf = new Configuration();
+  final static private int NUM_OF_DATANODES = 4;
+    
+  /**
+   * This test attempts to different types of datanode report.
+   */
+  public void testDatanodeReport() throws Exception {
+    conf.setInt(
+        "heartbeat.recheck.interval", 500); // 0.5s
+    MiniDFSCluster cluster = 
+      new MiniDFSCluster(conf, NUM_OF_DATANODES, true, null);
+    try {
+      //wait until the cluster is up
+      cluster.waitActive();
+
+      InetSocketAddress addr = new InetSocketAddress("localhost",
+          cluster.getNameNodePort());
+      DFSClient client = new DFSClient(addr, conf);
+
+      assertEquals(client.datanodeReport(DatanodeReportType.ALL).length,
+                   NUM_OF_DATANODES);
+      assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length,
+                   NUM_OF_DATANODES);
+      assertEquals(client.datanodeReport(DatanodeReportType.DEAD).length, 0);
+
+      // bring down one datanode
+      ArrayList<DataNode> datanodes = cluster.getDataNodes();
+      datanodes.remove(datanodes.size()-1).shutdown();
+
+      DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
+      while (nodeInfo.length != 1) {
+        try {
+          Thread.sleep(500);
+        } catch (Exception e) {
+        }
+        nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
+      }
+
+      assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length,
+                   NUM_OF_DATANODES-1);
+      assertEquals(client.datanodeReport(DatanodeReportType.ALL).length,
+                   NUM_OF_DATANODES);
+    }finally {
+      cluster.shutdown();
+    }
+  }
+ 
+  public static void main(String[] args) throws Exception {
+    new TestDatanodeReport().testDatanodeReport();
+  }
+  
+}
+
+

Propchange: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDatanodeReport.java
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDatanodeReport.java
------------------------------------------------------------------------------
    svn:keywords = Id Revision HeadURL

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java?rev=575100&r1=575099&r2=575100&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java Wed Sep 12 15:09:09 2007
@@ -26,6 +26,7 @@
 import java.net.*;
 import java.lang.InterruptedException;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.dfs.FSConstants.DatanodeReportType;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -169,7 +170,7 @@
                                   FileSystem localFileSys)
     throws IOException {
     DistributedFileSystem dfs = (DistributedFileSystem) filesys;
-    DatanodeInfo[] info = client.datanodeReport();
+    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
 
     //
     // pick one datanode randomly.
@@ -277,7 +278,7 @@
     InetSocketAddress addr = new InetSocketAddress("localhost", 
                                                    cluster.getNameNodePort());
     DFSClient client = new DFSClient(addr, conf);
-    DatanodeInfo[] info = client.datanodeReport();
+    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
     assertEquals("Number of Datanodes ", numDatanodes, info.length);
     FileSystem fileSys = cluster.getFileSystem();
     DistributedFileSystem dfs = (DistributedFileSystem) fileSys;
@@ -302,7 +303,7 @@
         cleanupFile(localFileSys, dir);
       }
     } catch (IOException e) {
-      info = client.datanodeReport();
+      info = client.datanodeReport(DatanodeReportType.ALL);
       printDatanodeReport(info);
       throw e;
     } finally {

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestModTime.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestModTime.java?rev=575100&r1=575099&r2=575100&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestModTime.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestModTime.java Wed Sep 12 15:09:09 2007
@@ -22,6 +22,7 @@
 import java.util.Random;
 import java.net.*;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.dfs.FSConstants.DatanodeReportType;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -80,7 +81,7 @@
     InetSocketAddress addr = new InetSocketAddress("localhost", 
                                                    cluster.getNameNodePort());
     DFSClient client = new DFSClient(addr, conf);
-    DatanodeInfo[] info = client.datanodeReport();
+    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
     assertEquals("Number of Datanodes ", numDatanodes, info.length);
     FileSystem fileSys = cluster.getFileSystem();
     int replicas = numDatanodes - 1;
@@ -170,7 +171,7 @@
      cleanupFile(fileSys, dir1);
      cleanupFile(fileSys, dir2);
     } catch (IOException e) {
-      info = client.datanodeReport();
+      info = client.datanodeReport(DatanodeReportType.ALL);
       printDatanodeReport(info);
       throw e;
     } finally {

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java?rev=575100&r1=575099&r2=575100&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java Wed Sep 12 15:09:09 2007
@@ -25,6 +25,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.dfs.FSConstants.DatanodeReportType;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -141,7 +142,7 @@
                                                    cluster.getNameNodePort());
     DFSClient client = new DFSClient(addr, conf);
     
-    DatanodeInfo[] info = client.datanodeReport();
+    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
     assertEquals("Number of Datanodes ", numDatanodes, info.length);
     FileSystem fileSys = cluster.getFileSystem();
     try {