You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cu...@apache.org on 2006/06/28 16:06:46 UTC

svn commit: r417759 - in /lucene/hadoop/trunk: ./ src/java/org/apache/hadoop/dfs/ src/webapps/dfs/

Author: cutting
Date: Wed Jun 28 07:06:45 2006
New Revision: 417759

URL: http://svn.apache.org/viewvc?rev=417759&view=rev
Log:
Revert patch from HADOOP-321 applied in revision 417566, since a bug was discovered.

Removed:
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java
Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSShell.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNodeReport.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeInfo.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
    lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?rev=417759&r1=417758&r2=417759&view=diff
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Wed Jun 28 07:06:45 2006
@@ -70,11 +70,7 @@
     on a DFS datanode.  One may specify both the percentage free and
     the number of bytes.  (Johan Oskarson via cutting)
 
-17. HADOOP-321.  Refactor some DFS classes.  DataNodeReport nearly
-    duplicated DataNodeInfo.  The former is now deprecated, replaced
-    by the latter.  (Konstantin Shvachko via cutting)
-
-18. HADOOP-325.  Fix a problem initializing RPC parameter classes, and
+17. HADOOP-325.  Fix a problem initializing RPC parameter classes, and
     remove the workaround used to initialize classes.
     (omalley via cutting)
 

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSShell.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSShell.java?rev=417759&r1=417758&r2=417759&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSShell.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSShell.java Wed Jun 28 07:06:45 2006
@@ -281,11 +281,11 @@
         System.out.println("Effective replication multiplier: " + (1.0 * rawUsed / used));
 
         System.out.println("-------------------------------------------------");
-        DatanodeInfo info[] = dfs.getDataNodeStats();
+        DataNodeReport info[] = dfs.getDataNodeStats();
         System.out.println("Datanodes available: " + info.length);
         System.out.println();
         for (int i = 0; i < info.length; i++) {
-          System.out.println(info[i].getDatanodeReport());
+          System.out.println(info[i]);
           System.out.println();
         }
       }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNodeReport.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNodeReport.java?rev=417759&r1=417758&r2=417759&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNodeReport.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNodeReport.java Wed Jun 28 07:06:45 2006
@@ -1,12 +1,46 @@
 package org.apache.hadoop.dfs;
 
+import java.util.Date;
+
+import org.apache.hadoop.io.UTF8;
+
 /** A report on the status of a DataNode.
  *
  * @see DistributedFileSystem#getDataNodeStats
- * @deprecated Use {@link DatanodeInfo} instead.
  */
-public class DataNodeReport extends DatanodeInfo {
+public class DataNodeReport {
+  String name;
+  String host;
+  long capacity;
+  long remaining;
+  long lastUpdate;
+  
+  /** The name of the datanode. */
+  public String getName() { return name; }
+
+  /** The hostname of the datanode. */
+  public String getHost() { return host; }
+
+  /** The raw capacity. */
+  public long getCapacity() { return capacity; }
+
+  /** The raw free space. */
+  public long getRemaining() { return remaining; }
+
+  /** The time when this information was accurate. */
+  public long getLastUpdate() { return lastUpdate; }
+
   public String toString() {
-    return super.getDatanodeReport();
+    StringBuffer buffer = new StringBuffer();
+    long c = getCapacity();
+    long r = getRemaining();
+    long u = c - r;
+    buffer.append("Name: "+name+"\n");
+    buffer.append("Total raw bytes: "+c+" ("+DFSShell.byteDesc(c)+")"+"\n");
+    buffer.append("Used raw bytes: "+u+" ("+DFSShell.byteDesc(u)+")"+"\n");
+    buffer.append("% used: "+DFSShell.limitDecimal(((1.0*u)/c)*100,2)+"%"+"\n");
+    buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
+    return buffer.toString();
   }
+
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java?rev=417759&r1=417758&r2=417759&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java Wed Jun 28 07:06:45 2006
@@ -7,7 +7,7 @@
  * 
  * @author Konstantin Shvachko
  */
-public class DatanodeID {
+class DatanodeID {
 
   protected String name;      /// hostname:portNumber
   protected String storageID; /// unique per cluster storageID

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeInfo.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeInfo.java?rev=417759&r1=417758&r2=417759&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeInfo.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeInfo.java Wed Jun 28 07:06:45 2006
@@ -15,95 +15,151 @@
  */
 package org.apache.hadoop.dfs;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.Date;
-
-import org.apache.hadoop.io.UTF8;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
+import org.apache.hadoop.io.*;
 
-/** 
- * DatanodeInfo represents the status of a DataNode.
+import java.io.*;
+import java.util.*;
+
+/**************************************************
+ * DatanodeInfo tracks stats on a given DataNode,
+ * such as available storage capacity, last update
+ * time, etc.
  *
  * @author Mike Cafarella
- * @author Konstantin Shvachko
- */
-public class DatanodeInfo extends DatanodeID implements Writable {
-  protected long capacity;
-  protected long remaining;
-  protected long lastUpdate;
-
-  DatanodeInfo() {
-    this( new String(), new String() );
-  }
-  
-  DatanodeInfo( String name, String storageID) {
-    super( name, storageID );
-    this.capacity = 0L;
-    this.remaining = 0L;
-    this.lastUpdate = 0L;
-  }
-  
-  /** The raw capacity. */
-  public long getCapacity() { return capacity; }
-
-  /** The raw free space. */
-  public long getRemaining() { return remaining; }
-
-  /** The time when this information was accurate. */
-  public long getLastUpdate() { return lastUpdate; }
-
-  /** @deprecated Use {@link #getLastUpdate()} instead. */
-  public long lastUpdate() { return getLastUpdate(); }
-
-  /** A formatted string for reporting the status of the DataNode. */
-  public String getDatanodeReport() {
-    StringBuffer buffer = new StringBuffer();
-    long c = getCapacity();
-    long r = getRemaining();
-    long u = c - r;
-    buffer.append("Name: "+name+"\n");
-    buffer.append("Total raw bytes: "+c+" ("+DFSShell.byteDesc(c)+")"+"\n");
-    buffer.append("Used raw bytes: "+u+" ("+DFSShell.byteDesc(u)+")"+"\n");
-    buffer.append("% used: "+DFSShell.limitDecimal(((1.0*u)/c)*100,2)+"%"+"\n");
-    buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
-    return buffer.toString();
-  }
-
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (DatanodeInfo.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new DatanodeInfo(); }
-       });
-  }
-
-  /**
-   */
-  public void write(DataOutput out) throws IOException {
-    new UTF8( this.name ).write(out);
-    new UTF8( this.storageID ).write(out);
-    out.writeLong(capacity);
-    out.writeLong(remaining);
-    out.writeLong(lastUpdate);
-  }
+ **************************************************/
+public class DatanodeInfo extends DatanodeID implements Writable, Comparable {
 
-  /**
+    static {                                      // register a ctor
+      WritableFactories.setFactory
+        (DatanodeInfo.class,
+         new WritableFactory() {
+           public Writable newInstance() { return new DatanodeInfo(); }
+         });
+    }
+
+    private long capacityBytes, remainingBytes, lastUpdate;
+    private volatile TreeSet blocks;
+
+    /** Create an empty DatanodeInfo.
+     */
+    public DatanodeInfo() {
+        this(new String(), new String(), 0, 0);
+    }
+
+    public DatanodeInfo( DatanodeID nodeID ) {
+      this( nodeID.getName(), nodeID.getStorageID(), 0, 0);
+    }
+    
+   /**
+    * Create an empty DatanodeInfo.
+    */
+    public DatanodeInfo(DatanodeID nodeID, 
+                        long capacity, 
+                        long remaining) {
+      this( nodeID.getName(), nodeID.getStorageID(), capacity, remaining );
+    }
+
+   /**
+    * @param name hostname:portNumber as String object.
+    */
+    public DatanodeInfo(String name, 
+                        String storageID, 
+                        long capacity, 
+                        long remaining) {
+        super( name, storageID );
+        this.blocks = new TreeSet();
+        updateHeartbeat(capacity, remaining);
+    }
+
+   /**
+    */
+    public void updateBlocks(Block newBlocks[]) {
+        blocks.clear();
+        for (int i = 0; i < newBlocks.length; i++) {
+            blocks.add(newBlocks[i]);
+        }
+    }
+
+   /**
+    */
+    public void addBlock(Block b) {
+        blocks.add(b);
+    }
+
+    /**
+     */
+    public void updateHeartbeat(long capacity, long remaining) {
+        this.capacityBytes = capacity;
+        this.remainingBytes = remaining;
+        this.lastUpdate = System.currentTimeMillis();
+    }
+
+    public Block[] getBlocks() {
+        return (Block[]) blocks.toArray(new Block[blocks.size()]);
+    }
+    public Iterator getBlockIterator() {
+        return blocks.iterator();
+    }
+    public long getCapacity() {
+        return capacityBytes;
+    }
+    public long getRemaining() {
+        return remainingBytes;
+    }
+    public long lastUpdate() {
+        return lastUpdate;
+    }
+
+  /** Comparable.
+   * Basis of compare is the String name (host:portNumber) only.
+   * @param o
+   * @return as specified by Comparable.
    */
-  public void readFields(DataInput in) throws IOException {
-    UTF8 uStr = new UTF8();
-    uStr.readFields(in);
-    this.name = uStr.toString();
-    uStr.readFields(in);
-    this.storageID = uStr.toString();
-    this.capacity = in.readLong();
-    this.remaining = in.readLong();
-    this.lastUpdate = in.readLong();
-  }
+    public int compareTo(Object o) {
+        DatanodeInfo d = (DatanodeInfo) o;
+        return name.compareTo(d.getName());
+    }
+
+    /////////////////////////////////////////////////
+    // Writable
+    /////////////////////////////////////////////////
+    /**
+     */
+    public void write(DataOutput out) throws IOException {
+        new UTF8( this.name ).write(out);
+        new UTF8( this.storageID ).write(out);
+        out.writeLong(capacityBytes);
+        out.writeLong(remainingBytes);
+        out.writeLong(lastUpdate);
+
+        /**
+        out.writeInt(blocks.length);
+        for (int i = 0; i < blocks.length; i++) {
+            blocks[i].write(out);
+        }
+        **/
+    }
+
+    /**
+     */
+    public void readFields(DataInput in) throws IOException {
+        UTF8 uStr = new UTF8();
+        uStr.readFields(in);
+        this.name = uStr.toString();
+        uStr.readFields(in);
+        this.storageID = uStr.toString();
+        this.capacityBytes = in.readLong();
+        this.remainingBytes = in.readLong();
+        this.lastUpdate = in.readLong();
+
+        /**
+        int numBlocks = in.readInt();
+        this.blocks = new Block[numBlocks];
+        for (int i = 0; i < blocks.length; i++) {
+            blocks[i] = new Block();
+            blocks[i].readFields(in);
+        }
+        **/
+    }
 }
+

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java?rev=417759&r1=417758&r2=417759&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java Wed Jun 28 07:06:45 2006
@@ -242,7 +242,18 @@
     }
 
     /** Return statistics for each datanode.*/
-    public DatanodeInfo[] getDataNodeStats() throws IOException {
-      return dfs.datanodeReport();
+    public DataNodeReport[] getDataNodeStats() throws IOException {
+      DatanodeInfo[]  dnReport = dfs.datanodeReport();
+      DataNodeReport[] reports = new DataNodeReport[dnReport.length];
+
+      for (int i = 0; i < dnReport.length; i++) {
+        reports[i] = new DataNodeReport();
+        reports[i].name = dnReport[i].getName();
+        reports[i].host = dnReport[i].getHost();
+        reports[i].capacity = dnReport[i].getCapacity();
+        reports[i].remaining = dnReport[i].getRemaining();
+        reports[i].lastUpdate = dnReport[i].lastUpdate();
+      }
+      return reports;
     }
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?rev=417759&r1=417758&r2=417759&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Wed Jun 28 07:06:45 2006
@@ -49,7 +49,6 @@
     //
     // Stores the block-->datanode(s) map.  Updated only in response
     // to client-sent information.
-    // Mapping: Block -> TreeSet<DatanodeDescriptor>
     //
     TreeMap blocksMap = new TreeMap();
 
@@ -57,10 +56,10 @@
     // Stores the datanode-->block map.  Done by storing a 
     // set of datanode info objects, sorted by name.  Updated only in
     // response to client-sent information.
-    // Mapping: StorageID -> DatanodeDescriptor
     //
     TreeMap datanodeMap = new TreeMap();
 
+    
     //
     // Stores the set of dead datanodes
     TreeMap deaddatanodeMap = new TreeMap();
@@ -69,7 +68,6 @@
     // Keeps a Vector for every named machine.  The Vector contains
     // blocks that have recently been invalidated and are thought to live
     // on the machine in question.
-    // Mapping: StorageID -> Vector<Block>
     //
     TreeMap recentInvalidateSets = new TreeMap();
 
@@ -77,20 +75,19 @@
     // Keeps a TreeSet for every named node.  Each treeset contains
     // a list of the blocks that are "extra" at that location.  We'll
     // eventually remove these extras.
-    // Mapping: Block -> TreeSet<DatanodeDescriptor>
     //
     TreeMap excessReplicateMap = new TreeMap();
 
     //
     // Keeps track of files that are being created, plus the
     // blocks that make them up.
-    // Mapping: fileName -> FileUnderConstruction
+    //
+    // Maps file names to FileUnderConstruction objects
     //
     TreeMap pendingCreates = new TreeMap();
 
     //
     // Keeps track of the blocks that are part of those pending creates
-    // Set of: Block
     //
     TreeSet pendingCreateBlocks = new TreeSet();
 
@@ -111,14 +108,14 @@
     Random r = new Random();
 
     //
-    // Stores a set of DatanodeDescriptor objects, sorted by heartbeat
+    // Stores a set of datanode info objects, sorted by heartbeat
     //
     TreeSet heartbeats = new TreeSet(new Comparator() {
         public int compare(Object o1, Object o2) {
-            DatanodeDescriptor d1 = (DatanodeDescriptor) o1;
-            DatanodeDescriptor d2 = (DatanodeDescriptor) o2;            
-            long lu1 = d1.getLastUpdate();
-            long lu2 = d2.getLastUpdate();
+            DatanodeInfo d1 = (DatanodeInfo) o1;
+            DatanodeInfo d2 = (DatanodeInfo) o2;            
+            long lu1 = d1.lastUpdate();
+            long lu2 = d2.lastUpdate();
             if (lu1 < lu2) {
                 return -1;
             } else if (lu1 > lu2) {
@@ -132,17 +129,14 @@
     //
     // Store set of Blocks that need to be replicated 1 or more times.
     // We also store pending replication-orders.
-    // Set of: Block
     //
     private TreeSet neededReplications = new TreeSet();
     private TreeSet pendingReplications = new TreeSet();
 
     //
     // Used for handling lock-leases
-    // Mapping: leaseHolder -> Lease
     //
     private TreeMap leases = new TreeMap();
-    // Set of: Lease
     private TreeSet sortedLeases = new TreeSet();
 
     //
@@ -252,17 +246,17 @@
         Block blocks[] = dir.getFile(src);
         if (blocks != null) {
             results = new Object[2];
-            DatanodeDescriptor machineSets[][] = new DatanodeDescriptor[blocks.length][];
+            DatanodeInfo machineSets[][] = new DatanodeInfo[blocks.length][];
 
             for (int i = 0; i < blocks.length; i++) {
                 TreeSet containingNodes = (TreeSet) blocksMap.get(blocks[i]);
                 if (containingNodes == null) {
-                    machineSets[i] = new DatanodeDescriptor[0];
+                    machineSets[i] = new DatanodeInfo[0];
                 } else {
-                    machineSets[i] = new DatanodeDescriptor[containingNodes.size()];
+                    machineSets[i] = new DatanodeInfo[containingNodes.size()];
                     int j = 0;
                     for (Iterator it = containingNodes.iterator(); it.hasNext(); j++) {
-                        machineSets[i][j] = (DatanodeDescriptor) it.next();
+                        machineSets[i][j] = (DatanodeInfo) it.next();
                     }
                 }
             }
@@ -388,7 +382,7 @@
         }
 
         // Get the array of replication targets 
-        DatanodeDescriptor targets[] = chooseTargets(replication, null, 
+        DatanodeInfo targets[] = chooseTargets(replication, null, 
                                                clientMachine, blockSize);
         if (targets.length < this.minReplication) {
             throw new IOException("failed to create file "+src
@@ -470,7 +464,7 @@
         }
         
         // Get the array of replication targets 
-        DatanodeDescriptor targets[] = chooseTargets(pendingFile.getReplication(), 
+        DatanodeInfo targets[] = chooseTargets(pendingFile.getReplication(), 
             null, pendingFile.getClientMachine(), pendingFile.getBlockSize());
         if (targets.length < this.minReplication) {
           throw new IOException("File " + src + " could only be replicated to " +
@@ -574,7 +568,7 @@
         for (int i = 0; i < nrBlocks; i++) {
             Block b = (Block)pendingBlocks[i];
             TreeSet containingNodes = (TreeSet) blocksMap.get(b);
-            DatanodeDescriptor node = (DatanodeDescriptor) containingNodes.first();
+            DatanodeInfo node = (DatanodeInfo) containingNodes.first();
             for (Iterator it = node.getBlockIterator(); it.hasNext(); ) {
                 Block cur = (Block) it.next();
                 if (b.getBlockId() == cur.getBlockId()) {
@@ -706,7 +700,7 @@
                 TreeSet containingNodes = (TreeSet) blocksMap.get(b);
                 if (containingNodes != null) {
                     for (Iterator it = containingNodes.iterator(); it.hasNext(); ) {
-                        DatanodeDescriptor node = (DatanodeDescriptor) it.next();
+                        DatanodeInfo node = (DatanodeInfo) it.next();
                         Vector invalidateSet = (Vector) recentInvalidateSets.get(node.getStorageID());
                         if (invalidateSet == null) {
                             invalidateSet = new Vector();
@@ -801,7 +795,7 @@
                 Vector v = new Vector();
                 if (containingNodes != null) {
                   for (Iterator it =containingNodes.iterator(); it.hasNext();) {
-                    DatanodeDescriptor cur = (DatanodeDescriptor) it.next();
+                    DatanodeInfo cur = (DatanodeInfo) it.next();
                     v.add(new UTF8( cur.getHost() ));
                   }
                 }
@@ -1051,8 +1045,8 @@
           + " storage " + nodeReg.getStorageID() );
 
       nodeReg.registrationID = getRegistrationID();
-      DatanodeDescriptor nodeS = (DatanodeDescriptor)datanodeMap.get(nodeReg.getStorageID());
-      DatanodeDescriptor nodeN = getDatanodeByName( nodeReg.getName() );
+      DatanodeInfo nodeS = (DatanodeInfo)datanodeMap.get(nodeReg.getStorageID());
+      DatanodeInfo nodeN = getDatanodeByName( nodeReg.getName() );
       
       if( nodeN != null && nodeS != null && nodeN == nodeS ) {
         // The same datanode has been just restarted to serve the same data 
@@ -1084,7 +1078,7 @@
         }
         // register new datanode
         datanodeMap.put(nodeReg.getStorageID(), 
-                        new DatanodeDescriptor( nodeReg ) ) ;
+                        new DatanodeInfo( nodeReg ) ) ;
         NameNode.stateChangeLog.debug(
             "BLOCK* NameSystem.registerDatanode: "
             + "node registered." );
@@ -1143,13 +1137,13 @@
         synchronized (datanodeMap) {
           long capacityDiff = 0;
           long remainingDiff = 0;
-          DatanodeDescriptor nodeinfo = getDatanode( nodeID );
+          DatanodeInfo nodeinfo = getDatanode( nodeID );
           deaddatanodeMap.remove(nodeID.getName());
 
           if (nodeinfo == null) {
             NameNode.stateChangeLog.debug("BLOCK* NameSystem.gotHeartbeat: "
                     +"brand-new heartbeat from "+nodeID.getName() );
-            nodeinfo = new DatanodeDescriptor(nodeID, capacity, remaining);
+            nodeinfo = new DatanodeInfo(nodeID, capacity, remaining);
             datanodeMap.put(nodeinfo.getStorageID(), nodeinfo);
             capacityDiff = capacity;
             remainingDiff = remaining;
@@ -1185,12 +1179,12 @@
 
     /**
      * remove a datanode info
-     * @param nodeID datanode ID
+     * @param name: datanode name
      * @author hairong
      */
     synchronized public void removeDatanode( DatanodeID nodeID ) 
     throws IOException {
-      DatanodeDescriptor nodeInfo = getDatanode( nodeID );
+      DatanodeInfo nodeInfo = getDatanode( nodeID );
       if (nodeInfo != null) {
         removeDatanode( nodeInfo );
       } else {
@@ -1201,10 +1195,10 @@
   
   /**
    * remove a datanode info
-   * @param nodeInfo datanode info
+   * @param nodeInfo: datanode info
    * @author hairong
    */
-    private void removeDatanode( DatanodeDescriptor nodeInfo ) {
+    private void removeDatanode( DatanodeInfo nodeInfo ) {
       heartbeats.remove(nodeInfo);
       datanodeMap.remove(nodeInfo.getStorageID());
       deaddatanodeMap.put(nodeInfo.getName(), nodeInfo);
@@ -1225,19 +1219,17 @@
      */
     synchronized void heartbeatCheck() {
       synchronized (heartbeats) {
-        DatanodeDescriptor nodeInfo = null;
+        DatanodeInfo nodeInfo = null;
 
         while ((heartbeats.size() > 0) &&
-               ((nodeInfo = (DatanodeDescriptor) heartbeats.first()) != null) &&
-               (nodeInfo.isDead())) {
+               ((nodeInfo = (DatanodeInfo) heartbeats.first()) != null) &&
+               (nodeInfo.lastUpdate() < System.currentTimeMillis() - EXPIRE_INTERVAL)) {
           NameNode.stateChangeLog.info("BLOCK* NameSystem.heartbeatCheck: "
               + "lost heartbeat from " + nodeInfo.getName());
           removeDatanode( nodeInfo );
-          /* SHV
           if (heartbeats.size() > 0) {
-              nodeInfo = (DatanodeDescriptor) heartbeats.first();
+              nodeInfo = (DatanodeInfo) heartbeats.first();
           }
-          */
         }
       }
     }
@@ -1251,7 +1243,7 @@
                                             ) throws IOException {
         NameNode.stateChangeLog.debug("BLOCK* NameSystem.processReport: "
           +"from "+nodeID.getName()+" "+newReport.length+" blocks" );
-        DatanodeDescriptor node = getDatanode( nodeID );
+        DatanodeInfo node = getDatanode( nodeID );
 
         //
         // Modify the (block-->datanode) map, according to the difference
@@ -1321,7 +1313,7 @@
      * Modify (block-->datanode) map.  Remove block from set of 
      * needed replications if this takes care of the problem.
      */
-    synchronized void addStoredBlock(Block block, DatanodeDescriptor node) {
+    synchronized void addStoredBlock(Block block, DatanodeInfo node) {
         TreeSet containingNodes = (TreeSet) blocksMap.get(block);
         if (containingNodes == null) {
             containingNodes = new TreeSet();
@@ -1377,7 +1369,7 @@
         return;
       Vector nonExcess = new Vector();
       for (Iterator it = containingNodes.iterator(); it.hasNext(); ) {
-          DatanodeDescriptor cur = (DatanodeDescriptor) it.next();
+          DatanodeInfo cur = (DatanodeInfo) it.next();
           TreeSet excessBlocks = (TreeSet) excessReplicateMap.get(cur.getStorageID());
           if (excessBlocks == null || ! excessBlocks.contains(block)) {
               nonExcess.add(cur);
@@ -1398,7 +1390,7 @@
     void chooseExcessReplicates(Vector nonExcess, Block b, short replication) {
         while (nonExcess.size() - replication > 0) {
             int chosenNode = r.nextInt(nonExcess.size());
-            DatanodeDescriptor cur = (DatanodeDescriptor) nonExcess.elementAt(chosenNode);
+            DatanodeInfo cur = (DatanodeInfo) nonExcess.elementAt(chosenNode);
             nonExcess.removeElementAt(chosenNode);
 
             TreeSet excessBlocks = (TreeSet) excessReplicateMap.get(cur.getStorageID());
@@ -1434,7 +1426,7 @@
      * Modify (block-->datanode) map.  Possibly generate 
      * replication tasks, if the removed block is still valid.
      */
-    synchronized void removeStoredBlock(Block block, DatanodeDescriptor node) {
+    synchronized void removeStoredBlock(Block block, DatanodeInfo node) {
         NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: "
                 +block.getBlockName() + " from "+node.getName() );
         TreeSet containingNodes = (TreeSet) blocksMap.get(block);
@@ -1479,7 +1471,7 @@
     public synchronized void blockReceived( DatanodeID nodeID,  
                                             Block block
                                           ) throws IOException {
-        DatanodeDescriptor node = getDatanode( nodeID );
+        DatanodeInfo node = getDatanode( nodeID );
         if (node == null) {
             NameNode.stateChangeLog.warn("BLOCK* NameSystem.blockReceived: "
              + block.getBlockName() + " is received from an unrecorded node " 
@@ -1517,14 +1509,14 @@
 
     /**
      */
-    public DatanodeDescriptor[] datanodeReport() {
-        DatanodeDescriptor results[] = null;
+    public DatanodeInfo[] datanodeReport() {
+        DatanodeInfo results[] = null;
         synchronized (heartbeats) {
             synchronized (datanodeMap) {
-                results = new DatanodeDescriptor[datanodeMap.size()];
+                results = new DatanodeInfo[datanodeMap.size()];
                 int i = 0;
                 for (Iterator it = datanodeMap.values().iterator(); it.hasNext(); ) {
-                    DatanodeDescriptor cur = (DatanodeDescriptor) it.next();
+                    DatanodeInfo cur = (DatanodeInfo) it.next();
                     results[i++] = cur;
                 }
             }
@@ -1545,9 +1537,9 @@
     }
     /** 
      */
-    public DatanodeDescriptor getDataNodeInfo(String name) {
+    public DatanodeInfo getDataNodeInfo(String name) {
         UTF8 src = new UTF8(name);
-        return (DatanodeDescriptor)datanodeMap.get(src);
+        return (DatanodeInfo)datanodeMap.get(src);
     }
     /** 
      */
@@ -1599,11 +1591,11 @@
      *
      * The Array that we return consists of two objects:
      * The 1st elt is an array of Blocks.
-     * The 2nd elt is a 2D array of DatanodeDescriptor objs, identifying the
+     * The 2nd elt is a 2D array of DatanodeInfo objs, identifying the
      *     target sequence for the Block at the appropriate index.
      *
      */
-    public synchronized Object[] pendingTransfers(DatanodeDescriptor srcNode,
+    public synchronized Object[] pendingTransfers(DatanodeInfo srcNode,
                                                   int xmitsInProgress) {
     synchronized (neededReplications) {
       Object results[] = null;
@@ -1638,7 +1630,7 @@
             // not be scheduled for removal on that node
             if (containingNodes.contains(srcNode)
                 && (excessBlocks == null || ! excessBlocks.contains(block))) {
-              DatanodeDescriptor targets[] = chooseTargets(
+              DatanodeInfo targets[] = chooseTargets(
                   Math.min( fileINode.getReplication() - containingNodes.size(),
                             this.maxReplicationStreams - xmitsInProgress), 
                   containingNodes, null, blockSize);
@@ -1662,8 +1654,8 @@
           int i = 0;
           for (Iterator it = replicateBlocks.iterator(); it.hasNext(); i++) {
             Block block = (Block) it.next();
-            DatanodeDescriptor targets[] = 
-                      (DatanodeDescriptor[]) replicateTargetSets.elementAt(i);
+            DatanodeInfo targets[] = 
+                      (DatanodeInfo[]) replicateTargetSets.elementAt(i);
             TreeSet containingNodes = (TreeSet) blocksMap.get(block);
 
             if (containingNodes.size() + targets.length >= 
@@ -1692,10 +1684,10 @@
           //
           // Build returned objects from above lists
           //
-          DatanodeDescriptor targetMatrix[][] = 
-                        new DatanodeDescriptor[replicateTargetSets.size()][];
+          DatanodeInfo targetMatrix[][] = 
+                        new DatanodeInfo[replicateTargetSets.size()][];
           for (i = 0; i < targetMatrix.length; i++) {
-            targetMatrix[i] = (DatanodeDescriptor[]) replicateTargetSets.elementAt(i);
+            targetMatrix[i] = (DatanodeInfo[]) replicateTargetSets.elementAt(i);
           }
 
           results = new Object[2];
@@ -1713,10 +1705,10 @@
      * @param desiredReplicates
      *          number of duplicates wanted.
      * @param forbiddenNodes
-     *          of DatanodeDescriptor instances that should not be considered targets.
-     * @return array of DatanodeDescriptor instances uses as targets.
+     *          of DatanodeInfo instances that should not be considered targets.
+     * @return array of DatanodeInfo instances uses as targets.
      */
-    DatanodeDescriptor[] chooseTargets(int desiredReplicates, TreeSet forbiddenNodes,
+    DatanodeInfo[] chooseTargets(int desiredReplicates, TreeSet forbiddenNodes,
                                  UTF8 clientMachine, long blockSize) {
         if (desiredReplicates > datanodeMap.size()) {
           LOG.warn("Replication requested of "+desiredReplicates
@@ -1729,14 +1721,14 @@
         Vector targets = new Vector();
 
         for (int i = 0; i < desiredReplicates; i++) {
-            DatanodeDescriptor target = chooseTarget(forbiddenNodes, alreadyChosen, 
+            DatanodeInfo target = chooseTarget(forbiddenNodes, alreadyChosen, 
                                                clientMachine, blockSize);
             if (target == null)
               break; // calling chooseTarget again won't help
             targets.add(target);
             alreadyChosen.add(target);
         }
-        return (DatanodeDescriptor[]) targets.toArray(new DatanodeDescriptor[targets.size()]);
+        return (DatanodeInfo[]) targets.toArray(new DatanodeInfo[targets.size()]);
     }
 
     /**
@@ -1746,12 +1738,12 @@
      * Right now it chooses randomly from available boxes.  In future could 
      * choose according to capacity and load-balancing needs (or even 
      * network-topology, to avoid inter-switch traffic).
-     * @param forbidden1 DatanodeDescriptor targets not allowed, null allowed.
-     * @param forbidden2 DatanodeDescriptor targets not allowed, null allowed.
-     * @return DatanodeDescriptor instance to use or null if something went wrong
+     * @param forbidden1 DatanodeInfo targets not allowed, null allowed.
+     * @param forbidden2 DatanodeInfo targets not allowed, null allowed.
+     * @return DatanodeInfo instance to use or null if something went wrong
      * (a log message is emitted if null is returned).
      */
-    DatanodeDescriptor chooseTarget(TreeSet forbidden1, TreeSet forbidden2, 
+    DatanodeInfo chooseTarget(TreeSet forbidden1, TreeSet forbidden2, 
                               UTF8 clientMachine, long blockSize) {
         //
         // Check if there are any available targets at all
@@ -1768,13 +1760,13 @@
         TreeSet forbiddenMachines = new TreeSet();
         if (forbidden1 != null) {
             for (Iterator it = forbidden1.iterator(); it.hasNext(); ) {
-                DatanodeDescriptor cur = (DatanodeDescriptor) it.next();
+                DatanodeInfo cur = (DatanodeInfo) it.next();
                 forbiddenMachines.add(cur.getHost());
             }
         }
         if (forbidden2 != null) {
             for (Iterator it = forbidden2.iterator(); it.hasNext(); ) {
-                DatanodeDescriptor cur = (DatanodeDescriptor) it.next();
+                DatanodeInfo cur = (DatanodeInfo) it.next();
                 forbiddenMachines.add(cur.getHost());
             }
         }
@@ -1784,7 +1776,7 @@
         //
         Vector targetList = new Vector();
         for (Iterator it = datanodeMap.values().iterator(); it.hasNext(); ) {
-            DatanodeDescriptor node = (DatanodeDescriptor) it.next();
+            DatanodeInfo node = (DatanodeInfo) it.next();
             if (! forbiddenMachines.contains(node.getHost())) {
                 targetList.add(node);
             }
@@ -1801,7 +1793,7 @@
             //
             if (clientMachine != null && clientMachine.getLength() > 0) {
                 for (Iterator it = targetList.iterator(); it.hasNext(); ) {
-                    DatanodeDescriptor node = (DatanodeDescriptor) it.next();
+                    DatanodeInfo node = (DatanodeInfo) it.next();
                     if (clientMachine.equals(node.getHost())) {
                         if (node.getRemaining() > blockSize * MIN_BLOCKS_FOR_WRITE) {
                             return node;
@@ -1814,7 +1806,7 @@
             // Otherwise, choose node according to target capacity
             //
             for (Iterator it = targetList.iterator(); it.hasNext(); ) {
-                DatanodeDescriptor node = (DatanodeDescriptor) it.next();
+                DatanodeInfo node = (DatanodeInfo) it.next();
                 if (node.getRemaining() > blockSize * MIN_BLOCKS_FOR_WRITE) {
                     return node;
                 }
@@ -1826,7 +1818,7 @@
             // a last resort, pick the first valid one we can find.
             //
             for (Iterator it = targetList.iterator(); it.hasNext(); ) {
-                DatanodeDescriptor node = (DatanodeDescriptor) it.next();
+                DatanodeInfo node = (DatanodeInfo) it.next();
                 if (node.getRemaining() > blockSize) {
                     return node;
                 }
@@ -1894,12 +1886,12 @@
      * Get data node by storage ID.
      * 
      * @param nodeID
-     * @return DatanodeDescriptor or null if the node is not found.
+     * @return DatanodeInfo or null if the node is not found.
      * @throws IOException
      */
-    public DatanodeDescriptor getDatanode( DatanodeID nodeID ) throws IOException {
+    public DatanodeInfo getDatanode( DatanodeID nodeID ) throws IOException {
       UnregisteredDatanodeException e = null;
-      DatanodeDescriptor node = (DatanodeDescriptor) datanodeMap.get(nodeID.getStorageID());
+      DatanodeInfo node = (DatanodeInfo) datanodeMap.get(nodeID.getStorageID());
       if (node == null) 
         return null;
       if (!node.getName().equals(nodeID.getName())) {
@@ -1919,12 +1911,12 @@
      * Otherwise an additional tree-like structure will be required.
      * 
      * @param name
-     * @return DatanodeDescriptor if found or null otherwise 
+     * @return DatanodeInfo if found or null otherwise 
      * @throws IOException
      */
-    public DatanodeDescriptor getDatanodeByName( String name ) throws IOException {
+    public DatanodeInfo getDatanodeByName( String name ) throws IOException {
       for (Iterator it = datanodeMap.values().iterator(); it.hasNext(); ) {
-        DatanodeDescriptor node = (DatanodeDescriptor) it.next();
+        DatanodeInfo node = (DatanodeInfo) it.next();
         if( node.getName().equals(name) )
            return node;
       }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java?rev=417759&r1=417758&r2=417759&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java Wed Jun 28 07:06:45 2006
@@ -391,7 +391,7 @@
         // Ask to perform pending transfers, if any
         //
         Object xferResults[] = namesystem.pendingTransfers(
-                       new DatanodeDescriptor( nodeReg ), xmitsInProgress );
+                       new DatanodeInfo( nodeReg ), xmitsInProgress );
         if (xferResults != null) {
             return new BlockCommand((Block[]) xferResults[0], (DatanodeInfo[][]) xferResults[1]);
         }

Modified: lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp?rev=417759&r1=417758&r2=417759&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp (original)
+++ lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp Wed Jun 28 07:06:45 2006
@@ -20,9 +20,9 @@
     String uGb = DFSShell.limitDecimal((1.0 * u)/(1024*1024*1024), 2);
     String percentUsed = DFSShell.limitDecimal(((1.0 * u)/c)*100, 2);
     out.print("<td style=\"vertical-align: top;\"> <b>" + 
-              d.getName() +
+              d.getName().toString() +
               "</b>&nbsp;<br><i><b>LastContact:</b>" + 
-              new Date(d.getLastUpdate())+ ";&nbsp;");
+              new Date(d.lastUpdate())+ ";&nbsp;");
     out.print("<b>Total raw bytes:</b>&nbsp;" + c + "(" + cGb + 
               "&nbsp;GB);&nbsp;");
     out.print("<b>Percent used:</b>&nbsp;" + percentUsed);
@@ -53,7 +53,7 @@
         out.print("<tr>");
         generateLiveNodeData(out, l);
         out.print("<td style=\"vertical-align: top;\">" + 
-                  d.getName() +
+                  d.getName().toString() +
                   "<br></td>");
         out.print("</tr>");
       }
@@ -69,7 +69,7 @@
           DatanodeInfo d = (DatanodeInfo)dead.elementAt(i);
           out.print("<td style=\"vertical-align: top;\"><br></td>");
           out.print("<td style=\"vertical-align: top;\">" + 
-                    d.getName() +
+                    d.getName().toString() +
                     "<br></td>");
         }
         out.print("</tr>");