You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by dh...@apache.org on 2007/10/19 07:04:01 UTC

svn commit: r586264 - in /lucene/hadoop/trunk: ./ src/java/org/apache/hadoop/dfs/

Author: dhruba
Date: Thu Oct 18 22:03:59 2007
New Revision: 586264

URL: http://svn.apache.org/viewvc?rev=586264&view=rev
Log:
HADOOP-1604.  An system administrator can finalize namenode upgrades
without running the cluster. (Konstantin Shvachko via dhruba)


Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FsckServlet.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ReplicationTargetChooser.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?rev=586264&r1=586263&r2=586264&view=diff
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Thu Oct 18 22:03:59 2007
@@ -14,6 +14,9 @@
     HADOOP-1855.  fsck verifies block placement policies and reports
     violations.  (Konstantin Shvachko via dhruba)
 
+    HADOOP-1604.  An system administrator can finalize namenode upgrades 
+    without running the cluster. (Konstantin Shvachko via dhruba)
+
 Branch 0.15 (unreleased changes)
 
   INCOMPATIBLE CHANGES

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java?rev=586264&r1=586263&r2=586264&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java Thu Oct 18 22:03:59 2007
@@ -25,8 +25,6 @@
 import java.net.URLConnection;
 import java.net.URLEncoder;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.util.Tool;
@@ -56,7 +54,6 @@
  *  
  */
 public class DFSck extends Configured implements Tool {
-  private static final Log LOG = LogFactory.getLog(DFSck.class.getName());
 
   DFSck() {}
   
@@ -82,13 +79,14 @@
   public int run(String[] args) throws Exception {
     String fsName = getInfoServer();
     if (args.length == 0) {
-      System.err.println("Usage: DFSck <path> [-move | -delete] [-files] [-blocks [-locations]]");
+      System.err.println("Usage: DFSck <path> [-move | -delete] [-files [-blocks [-locations | -racks]]]");
       System.err.println("\t<path>\tstart checking from this path");
       System.err.println("\t-move\tmove corrupted files to /lost+found");
       System.err.println("\t-delete\tdelete corrupted files");
       System.err.println("\t-files\tprint out files being checked");
       System.err.println("\t-blocks\tprint out block report");
       System.err.println("\t-locations\tprint out locations for every block");
+      System.err.println("\t-racks\tprint out network topology for data-node locations");
       ToolRunner.printGenericCommandUsage(System.err);
       return -1;
     }
@@ -105,6 +103,7 @@
       else if (args[idx].equals("-files")) { url.append("&files=1"); }
       else if (args[idx].equals("-blocks")) { url.append("&blocks=1"); }
       else if (args[idx].equals("-locations")) { url.append("&locations=1"); }
+      else if (args[idx].equals("-racks")) { url.append("&racks=1"); }
     }
     URL path = new URL(url.toString());
     URLConnection connection = path.openConnection();

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FsckServlet.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FsckServlet.java?rev=586264&r1=586263&r2=586264&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FsckServlet.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FsckServlet.java Thu Oct 18 22:03:59 2007
@@ -19,7 +19,6 @@
 
 import java.util.*;
 import java.io.*;
-import org.apache.hadoop.mapred.StatusHttpServer;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.conf.*;
 import org.apache.commons.logging.*;

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java?rev=586264&r1=586263&r2=586264&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java Thu Oct 18 22:03:59 2007
@@ -21,10 +21,10 @@
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Trash;
-import org.apache.hadoop.io.*;
 import org.apache.hadoop.ipc.*;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.net.NetworkTopology;
 
 import java.io.*;
 import java.net.*;
@@ -707,6 +707,10 @@
    */
   public InetSocketAddress getNameNodeAddress() {
     return nameNodeAddress;
+  }
+
+  NetworkTopology getNetworkTopology() {
+    return this.namesystem.clusterMap;
   }
 
   /**

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java?rev=586264&r1=586263&r2=586264&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java Thu Oct 18 22:03:59 2007
@@ -17,10 +17,6 @@
  */
 package org.apache.hadoop.dfs;
 
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.PrintWriter;
@@ -35,8 +31,8 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.UTF8;
-
+import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.dfs.FSConstants.DatanodeReportType;
 
 /**
  * This class provides rudimentary checking of DFS volumes for errors and
@@ -77,6 +73,7 @@
   private boolean showFiles = false;
   private boolean showBlocks = false;
   private boolean showLocations = false;
+  private boolean showRacks = false;
   private int fixing = FIXING_NONE;
   private String path = "/";
   
@@ -106,6 +103,7 @@
       else if (key.equals("files")) { this.showFiles = true; }
       else if (key.equals("blocks")) { this.showBlocks = true; }
       else if (key.equals("locations")) { this.showLocations = true; }
+      else if (key.equals("racks")) { this.showRacks = true; }
     }
   }
   
@@ -137,6 +135,9 @@
   }
   
   private void check(DFSFileInfo file, FsckResult res) throws IOException {
+    res.totalRacks = nn.getNetworkTopology().getNumOfRacks();
+    res.totalDatanodes = nn.getDatanodeReport(DatanodeReportType.LIVE).length;
+    int minReplication = FSNamesystem.getFSNamesystem().getMinReplication();
     if (file.isDir()) {
       if (showFiles) {
         out.println(file.getPath().toString() + " <dir>");
@@ -155,8 +156,8 @@
         0, fileLen);
     res.totalBlocks += blocks.locatedBlockCount();
     if (showFiles) {
-      out.print(file.getPath().toString() + " " + fileLen + ", " +
-          res.totalBlocks + " block(s): ");
+      out.print(file.getPath().toString() + " " + fileLen + " bytes, " +
+          blocks.locatedBlockCount() + " block(s): ");
     }  else {
       out.print('.');
       out.flush();
@@ -164,22 +165,26 @@
     }
     int missing = 0;
     long missize = 0;
-    int under = 0;
+    int underReplicatedPerFile = 0;
+    int misReplicatedPerFile = 0;
     StringBuffer report = new StringBuffer();
     int i = 0;
     for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
       Block block = lBlk.getBlock();
-      long id = block.getBlockId();
+      String blkName = block.getBlockName();
       DatanodeInfo[] locs = lBlk.getLocations();
+      res.totalReplicas += locs.length;
       short targetFileReplication = file.getReplication();
       if (locs.length > targetFileReplication) {
-        res.overReplicatedBlocks += (locs.length - targetFileReplication);
+        res.excessiveReplicas += (locs.length - targetFileReplication);
         res.numOverReplicatedBlocks += 1;
       }
+      if (locs.length >= minReplication)
+        res.numMinReplicatedBlocks++;
       if (locs.length < targetFileReplication && locs.length > 0) {
-        res.underReplicatedBlocks += (targetFileReplication - locs.length);
+        res.missingReplicas += (targetFileReplication - locs.length);
         res.numUnderReplicatedBlocks += 1;
-        under++;
+        underReplicatedPerFile++;
         if (!showFiles) {
           out.print("\n" + file.getPath().toString() + ": ");
         }
@@ -188,19 +193,38 @@
                     targetFileReplication + " but found " +
                     locs.length + " replica(s).");
       }
-      report.append(i + ". " + id + " len=" + block.getNumBytes());
-      if ( locs.length == 0) {
+      // verify block placement policy
+      int missingRacks = ReplicationTargetChooser.verifyBlockPlacement(
+                    lBlk, targetFileReplication, nn.getNetworkTopology());
+      if (missingRacks > 0) {
+        res.numMisReplicatedBlocks++;
+        misReplicatedPerFile++;
+        if (!showFiles) {
+          if(underReplicatedPerFile == 0)
+            out.println();
+          out.print(file.getPath().toString() + ": ");
+        }
+        out.println(" Replica placement policy is violated for " + 
+                    block.getBlockName() +
+                    ". Block should be additionally replicated on " + 
+                    missingRacks + " more rack(s).");
+      }
+      report.append(i + ". " + blkName + " len=" + block.getNumBytes());
+      if (locs.length == 0) {
         report.append(" MISSING!");
         res.addMissing(block.getBlockName(), block.getNumBytes());
         missing++;
         missize += block.getNumBytes();
       } else {
         report.append(" repl=" + locs.length);
-        if (showLocations) {
+        if (showLocations || showRacks) {
           StringBuffer sb = new StringBuffer("[");
           for (int j = 0; j < locs.length; j++) {
             if (j > 0) { sb.append(", "); }
-            sb.append(locs[j]);
+            if (showRacks)
+              sb.append(NodeBase.getPath(locs[j]));
+            else
+              sb.append(locs[j]);
           }
           sb.append(']');
           report.append(" " + sb.toString());
@@ -229,7 +253,7 @@
     if (showFiles) {
       if (missing > 0) {
         out.println(" MISSING " + missing + " blocks of total size " + missize + " B");
-      }  else if (under == 0) {
+      }  else if (underReplicatedPerFile == 0 && misReplicatedPerFile == 0) {
         out.println(" OK");
       }
       if (showBlocks) {
@@ -457,15 +481,20 @@
     private ArrayList<String> missingIds = new ArrayList<String>();
     private long missingSize = 0L;
     private long corruptFiles = 0L;
-    private long overReplicatedBlocks = 0L;
-    private long underReplicatedBlocks = 0L;
+    private long excessiveReplicas = 0L;
+    private long missingReplicas = 0L;
     private long numOverReplicatedBlocks = 0L;
     private long numUnderReplicatedBlocks = 0L;
+    private long numMisReplicatedBlocks = 0L;  // blocks that do not satisfy block placement policy
+    private long numMinReplicatedBlocks = 0L;  // minimally replicatedblocks
     private int replication = 0;
     private long totalBlocks = 0L;
     private long totalFiles = 0L;
     private long totalDirs = 0L;
     private long totalSize = 0L;
+    private long totalReplicas = 0L;
+    private int totalDatanodes = 0;
+    private int totalRacks = 0;
     
     /**
      * DFS is considered healthy if there are no missing blocks.
@@ -495,29 +524,28 @@
     }
     
     /** Return the number of over-replicated blocks. */
-    public long getOverReplicatedBlocks() {
-      return overReplicatedBlocks;
+    public long getExcessiveReplicas() {
+      return excessiveReplicas;
     }
     
-    public void setOverReplicatedBlocks(long overReplicatedBlocks) {
-      this.overReplicatedBlocks = overReplicatedBlocks;
+    public void setExcessiveReplicas(long overReplicatedBlocks) {
+      this.excessiveReplicas = overReplicatedBlocks;
     }
     
     /** Return the actual replication factor. */
     public float getReplicationFactor() {
-      if (totalBlocks != 0)
-        return (float) (totalBlocks * replication + overReplicatedBlocks - underReplicatedBlocks) / (float) totalBlocks;
-      else
+      if (totalBlocks == 0)
         return 0.0f;
+      return (float) (totalReplicas) / (float) totalBlocks;
     }
     
     /** Return the number of under-replicated blocks. Note: missing blocks are not counted here.*/
-    public long getUnderReplicatedBlocks() {
-      return underReplicatedBlocks;
+    public long getMissingReplicas() {
+      return missingReplicas;
     }
     
-    public void setUnderReplicatedBlocks(long underReplicatedBlocks) {
-      this.underReplicatedBlocks = underReplicatedBlocks;
+    public void setMissingReplicas(long underReplicatedBlocks) {
+      this.missingReplicas = underReplicatedBlocks;
     }
     
     /** Return total number of directories encountered during this scan. */
@@ -573,11 +601,11 @@
       StringBuffer res = new StringBuffer();
       res.append("Status: " + (isHealthy() ? "HEALTHY" : "CORRUPT"));
       res.append("\n Total size:\t" + totalSize + " B");
+      res.append("\n Total dirs:\t" + totalDirs);
+      res.append("\n Total files:\t" + totalFiles);
       res.append("\n Total blocks:\t" + totalBlocks);
       if (totalBlocks > 0) res.append(" (avg. block size "
                                       + (totalSize / totalBlocks) + " B)");
-      res.append("\n Total dirs:\t" + totalDirs);
-      res.append("\n Total files:\t" + totalFiles);
       if (missingSize > 0) {
         res.append("\n  ********************************");
         res.append("\n  CORRUPT FILES:\t" + corruptFiles);
@@ -585,12 +613,20 @@
         res.append("\n  MISSING SIZE:\t\t" + missingSize + " B");
         res.append("\n  ********************************");
       }
+      res.append("\n Minimally replicated blocks:\t" + numMinReplicatedBlocks);
+      if (totalBlocks > 0)        res.append(" (" + ((float) (numMinReplicatedBlocks * 100) / (float) totalBlocks) + " %)");
       res.append("\n Over-replicated blocks:\t" + numOverReplicatedBlocks);
-      if (totalBlocks > 0)        res.append(" (" + ((float) (overReplicatedBlocks * 100) / (float) totalBlocks) + " %)");
+      if (totalBlocks > 0)        res.append(" (" + ((float) (numOverReplicatedBlocks * 100) / (float) totalBlocks) + " %)");
       res.append("\n Under-replicated blocks:\t" + numUnderReplicatedBlocks);
       if (totalBlocks > 0)        res.append(" (" + ((float) (numUnderReplicatedBlocks * 100) / (float) totalBlocks) + " %)");
-      res.append("\n Target replication factor:\t" + replication);
-      res.append("\n Real replication factor:\t" + getReplicationFactor());
+      res.append("\n Mis-replicated blocks:\t\t" + numMisReplicatedBlocks);
+      if (totalBlocks > 0)        res.append(" (" + ((float) (numMisReplicatedBlocks * 100) / (float) totalBlocks) + " %)");
+      res.append("\n Default replication factor:\t" + replication);
+      res.append("\n Average block replication:\t" + getReplicationFactor());
+      res.append("\n Missing replicas:\t\t" + missingReplicas);
+      if (totalReplicas > 0)        res.append(" (" + ((float) (missingReplicas * 100) / (float) totalReplicas) + " %)");
+      res.append("\n Number of data-nodes:\t\t" + totalDatanodes);
+      res.append("\n Number of racks:\t\t" + totalRacks);
       return res.toString();
     }
     

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ReplicationTargetChooser.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ReplicationTargetChooser.java?rev=586264&r1=586263&r2=586264&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ReplicationTargetChooser.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/ReplicationTargetChooser.java Thu Oct 18 22:03:59 2007
@@ -471,5 +471,49 @@
     }
     return nodes;
   }
+
+  /**
+   * Verify that the block is replicated on at least 2 different racks
+   * if there is more than one rack in the the system.
+   * 
+   * @param lBlk block with locations
+   * @param cluster 
+   * @return 1 if the block must be relicated on additional rack,
+   * or 0 if the number of racks is sufficient.
+   */
+  public static int verifyBlockPlacement(LocatedBlock lBlk,
+                                         short replication,
+                                         NetworkTopology cluster) {
+    int numRacks = verifyBlockPlacement(lBlk, Math.min(2,replication), cluster);
+    return numRacks < 0 ? 0 : numRacks;
+  }
+
+  /**
+   * Verify that the block is replicated on at least minRacks different racks
+   * if there is more than minRacks rack in the the system.
+   * 
+   * @param lBlk block with locations
+   * @param minRacks number of racks the block should be replicated to
+   * @param cluster 
+   * @return the difference between the required and the actual number of racks
+   * the block is replicated to.
+   */
+  public static int verifyBlockPlacement(LocatedBlock lBlk,
+                                         int minRacks,
+                                         NetworkTopology cluster) {
+    DatanodeInfo[] locs = lBlk.getLocations();
+    if (locs == null)
+      locs = new DatanodeInfo[0];
+    int numRacks = cluster.getNumOfRacks();
+    if(numRacks <= 1) // only one rack
+      return 0;
+    minRacks = Math.min(minRacks, numRacks);
+    // 1. Check that all locations are different.
+    // 2. Count locations on different racks.
+    Set<String> racks = new TreeSet<String>();
+    for (DatanodeInfo dn : locs)
+      racks.add(dn.getNetworkLocation());
+    return minRacks - racks.size();
+  }
 } //end of Replicator