You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2012/04/06 07:07:38 UTC

svn commit: r1310174 [2/3] - in /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project: dev-support/ hadoop-hdfs-httpfs/ hadoop-hdfs/ hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ hadoop-hdfs/src/main/java/org/apache/hadoo...

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Apr  6 05:07:33 2012
@@ -164,6 +164,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer;
 import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
+import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
@@ -346,10 +347,27 @@ public class FSNamesystem implements Nam
    * @throws IOException if loading fails
    */
   public static FSNamesystem loadFromDisk(Configuration conf)
-    throws IOException {
+      throws IOException {
     Collection<URI> namespaceDirs = FSNamesystem.getNamespaceDirs(conf);
     List<URI> namespaceEditsDirs = 
       FSNamesystem.getNamespaceEditsDirs(conf);
+    return loadFromDisk(conf, namespaceDirs, namespaceEditsDirs);
+  }
+
+  /**
+   * Instantiates an FSNamesystem loaded from the image and edits
+   * directories passed.
+   * 
+   * @param conf the Configuration which specifies the storage directories
+   *             from which to load
+   * @param namespaceDirs directories to load the fsimages
+   * @param namespaceEditsDirs directories to load the edits from
+   * @return an FSNamesystem which contains the loaded namespace
+   * @throws IOException if loading fails
+   */
+  public static FSNamesystem loadFromDisk(Configuration conf,
+      Collection<URI> namespaceDirs, List<URI> namespaceEditsDirs)
+      throws IOException {
 
     if (namespaceDirs.size() == 1) {
       LOG.warn("Only one " + DFS_NAMENODE_NAME_DIR_KEY
@@ -370,8 +388,10 @@ public class FSNamesystem implements Nam
       HAUtil.isHAEnabled(conf, nameserviceId));
     long timeTakenToLoadFSImage = now() - loadStart;
     LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
-    NameNode.getNameNodeMetrics().setFsImageLoadTime(
-                              (int) timeTakenToLoadFSImage);
+    NameNodeMetrics nnMetrics = NameNode.getNameNodeMetrics();
+    if (nnMetrics != null) {
+      nnMetrics.setFsImageLoadTime((int) timeTakenToLoadFSImage);
+    }
     return namesystem;
   }
 

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java Fri Apr  6 05:07:33 2012
@@ -62,7 +62,7 @@ public class FileChecksumServlets {
           ? ((DatanodeInfo)host).getHostName() : host.getIpAddr();
       final String scheme = request.getScheme();
       final int port = "https".equals(scheme)
-          ? (Integer)getServletContext().getAttribute("datanode.https.port")
+          ? (Integer)getServletContext().getAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY)
           : host.getInfoPort();
       final String encodedPath = ServletUtil.getRawPath(request, "/fileChecksum");
 

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java Fri Apr  6 05:07:33 2012
@@ -27,6 +27,7 @@ import javax.servlet.http.HttpServletRes
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -62,7 +63,7 @@ public class FileDataServlet extends Dfs
       hostname = host.getIpAddr();
     }
     final int port = "https".equals(scheme)
-      ? (Integer)getServletContext().getAttribute("datanode.https.port")
+      ? (Integer)getServletContext().getAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY)
       : host.getInfoPort();
 
     String dtParam = "";

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Apr  6 05:07:33 2012
@@ -40,6 +40,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Trash;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -58,6 +60,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
@@ -70,6 +73,9 @@ import org.apache.hadoop.tools.GetUserMa
 import org.apache.hadoop.util.ServicePlugin;
 import org.apache.hadoop.util.StringUtils;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Lists;
+
 /**********************************************************
  * NameNode serves as both directory namespace manager and
  * "inode table" for the Hadoop DFS.  There is a single NameNode
@@ -156,7 +162,8 @@ public class NameNode {
     DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
     DFS_NAMENODE_BACKUP_ADDRESS_KEY,
     DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
-    DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY
+    DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY,
+    DFS_HA_FENCE_METHODS_KEY
   };
   
   public long getProtocolVersion(String protocol, 
@@ -729,6 +736,67 @@ public class NameNode {
                 + "to true in order to format this filesystem");
     }
   }
+  
+  @VisibleForTesting
+  public static boolean initializeSharedEdits(Configuration conf) {
+    return initializeSharedEdits(conf, true);
+  }
+  
+  @VisibleForTesting
+  public static boolean initializeSharedEdits(Configuration conf,
+      boolean force) {
+    return initializeSharedEdits(conf, force, false);
+  }
+  
+  /**
+   * Format a new shared edits dir.
+   * 
+   * @param conf configuration
+   * @param force format regardless of whether or not the shared edits dir exists
+   * @param interactive prompt the user when a dir exists
+   * @return true if the command aborts, false otherwise
+   */
+  private static boolean initializeSharedEdits(Configuration conf,
+      boolean force, boolean interactive) {
+    NNStorage existingStorage = null;
+    try {
+      FSNamesystem fsns = FSNamesystem.loadFromDisk(conf,
+          FSNamesystem.getNamespaceDirs(conf),
+          FSNamesystem.getNamespaceEditsDirs(conf, false));
+      
+      existingStorage = fsns.getFSImage().getStorage();
+      
+      Collection<URI> sharedEditsDirs = FSNamesystem.getSharedEditsDirs(conf);
+      if (!confirmFormat(sharedEditsDirs, force, interactive)) {
+        return true; // aborted
+      }
+      NNStorage newSharedStorage = new NNStorage(conf,
+          Lists.<URI>newArrayList(),
+          sharedEditsDirs);
+      
+      newSharedStorage.format(new NamespaceInfo(
+          existingStorage.getNamespaceID(),
+          existingStorage.getClusterID(),
+          existingStorage.getBlockPoolID(),
+          existingStorage.getCTime(),
+          existingStorage.getDistributedUpgradeVersion()));
+    } catch (Exception e) {
+      LOG.error("Could not format shared edits dir", e);
+      return true; // aborted
+    } finally {
+      // Have to unlock storage explicitly for the case when we're running in a
+      // unit test, which runs in the same JVM as NNs.
+      if (existingStorage != null) {
+        try {
+          existingStorage.unlockAll();
+        } catch (IOException ioe) {
+          LOG.warn("Could not unlock storage directories", ioe);
+          return true; // aborted
+        }
+      }
+    }
+    return false; // did not abort
+  }
 
   private static boolean finalize(Configuration conf,
                                boolean isConfirmationNeeded
@@ -763,7 +831,8 @@ public class NameNode {
       StartupOption.ROLLBACK.getName() + "] | [" +
       StartupOption.FINALIZE.getName() + "] | [" +
       StartupOption.IMPORT.getName() + "] | [" +
-      StartupOption.BOOTSTRAPSTANDBY.getName() + "]");
+      StartupOption.BOOTSTRAPSTANDBY.getName() + "] | [" +
+      StartupOption.INITIALIZESHAREDEDITS.getName() + "]");
   }
 
   private static StartupOption parseArguments(String args[]) {
@@ -804,6 +873,9 @@ public class NameNode {
       } else if (StartupOption.BOOTSTRAPSTANDBY.getName().equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.BOOTSTRAPSTANDBY;
         return startOpt;
+      } else if (StartupOption.INITIALIZESHAREDEDITS.getName().equalsIgnoreCase(cmd)) {
+        startOpt = StartupOption.INITIALIZESHAREDEDITS;
+        return startOpt;
       } else {
         return null;
       }
@@ -868,29 +940,39 @@ public class NameNode {
     }
 
     switch (startOpt) {
-      case FORMAT:
+      case FORMAT: {
         boolean aborted = format(conf, false);
         System.exit(aborted ? 1 : 0);
         return null; // avoid javac warning
-      case GENCLUSTERID:
+      }
+      case GENCLUSTERID: {
         System.err.println("Generating new cluster id:");
         System.out.println(NNStorage.newClusterID());
         System.exit(0);
         return null;
-      case FINALIZE:
-        aborted = finalize(conf, true);
+      }
+      case FINALIZE: {
+        boolean aborted = finalize(conf, true);
         System.exit(aborted ? 1 : 0);
         return null; // avoid javac warning
-      case BOOTSTRAPSTANDBY:
+      }
+      case BOOTSTRAPSTANDBY: {
         String toolArgs[] = Arrays.copyOfRange(argv, 1, argv.length);
         int rc = BootstrapStandby.run(toolArgs, conf);
         System.exit(rc);
         return null; // avoid warning
+      }
+      case INITIALIZESHAREDEDITS: {
+        boolean aborted = initializeSharedEdits(conf, false, true);
+        System.exit(aborted ? 1 : 0);
+        return null; // avoid warning
+      }
       case BACKUP:
-      case CHECKPOINT:
+      case CHECKPOINT: {
         NamenodeRole role = startOpt.toNodeRole();
         DefaultMetricsSystem.initialize(role.toString().replace(" ", ""));
         return new BackupNode(conf, role);
+      }
       default:
         DefaultMetricsSystem.initialize("NameNode");
         return new NameNode(conf);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java Fri Apr  6 05:07:33 2012
@@ -165,10 +165,11 @@ public class NameNodeHttpServer {
             httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth,
                 useKrb);
             // assume same ssl port for all datanodes
-            InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf
-                .get(DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 50475));
-            httpServer.setAttribute("datanode.https.port", datanodeSslPort
-                .getPort());
+            InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(
+                conf.get(DFS_DATANODE_HTTPS_ADDRESS_KEY,
+                    infoHost + ":" + DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT));
+            httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY,
+                datanodeSslPort.getPort());
           }
           httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn);
           httpServer.setAttribute(NAMENODE_ADDRESS_ATTRIBUTE_KEY,

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java Fri Apr  6 05:07:33 2012
@@ -24,6 +24,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.common.Storage;
@@ -52,18 +53,9 @@ implements Writable, NodeRegistration {
   private StorageInfo storageInfo;
   private ExportedBlockKeys exportedKeys;
 
-  /**
-   * Default constructor.
-   */
   public DatanodeRegistration() {
-    this("");
-  }
-  
-  /**
-   * Create DatanodeRegistration
-   */
-  public DatanodeRegistration(String ipAddr) {
-    this(ipAddr, new StorageInfo(), new ExportedBlockKeys());
+    this("", DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+        new StorageInfo(), new ExportedBlockKeys());
   }
   
   public DatanodeRegistration(DatanodeID dn, StorageInfo info,
@@ -72,10 +64,14 @@ implements Writable, NodeRegistration {
     this.storageInfo = info;
     this.exportedKeys = keys;
   }
-  
-  public DatanodeRegistration(String ipAddr, StorageInfo info,
+
+  public DatanodeRegistration(String ipAddr, int xferPort) {
+    this(ipAddr, xferPort, new StorageInfo(), new ExportedBlockKeys());
+  }
+
+  public DatanodeRegistration(String ipAddr, int xferPort, StorageInfo info,
       ExportedBlockKeys keys) {
-    super(ipAddr);
+    super(ipAddr, xferPort);
     this.storageInfo = info;
     this.exportedKeys = keys;
   }
@@ -114,7 +110,7 @@ implements Writable, NodeRegistration {
   @Override
   public String toString() {
     return getClass().getSimpleName()
-      + "(" + ipAddr
+      + "(" + getIpAddr()
       + ", storageID=" + storageID
       + ", infoPort=" + infoPort
       + ", ipcPort=" + ipcPort

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java Fri Apr  6 05:07:33 2012
@@ -50,14 +50,19 @@ public class NamespaceInfo extends Stora
     super();
     buildVersion = null;
   }
-  
-  public NamespaceInfo(int nsID, String clusterID, String bpID, 
-      long cT, int duVersion) {
+
+  public NamespaceInfo(int nsID, String clusterID, String bpID,
+      long cT, int duVersion, String buildVersion) {
     super(HdfsConstants.LAYOUT_VERSION, nsID, clusterID, cT);
     blockPoolID = bpID;
-    buildVersion = Storage.getBuildVersion();
+    this.buildVersion = buildVersion;
     this.distributedUpgradeVersion = duVersion;
   }
+
+  public NamespaceInfo(int nsID, String clusterID, String bpID, 
+      long cT, int duVersion) {
+    this(nsID, clusterID, bpID, cT, duVersion, Storage.getBuildVersion());
+  }
   
   public String getBuildVersion() {
     return buildVersion;

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java Fri Apr  6 05:07:33 2012
@@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.ha.BadFencingConfigurationException;
 import org.apache.hadoop.ha.HAServiceTarget;
 import org.apache.hadoop.ha.NodeFencer;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -76,7 +77,8 @@ public class NNHAServiceTarget extends H
     this.addr = NetUtils.createSocketAddr(serviceAddr,
         NameNode.DEFAULT_PORT);
     try {
-      this.fencer = NodeFencer.create(targetConf);
+      this.fencer = NodeFencer.create(targetConf,
+          DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY);
     } catch (BadFencingConfigurationException e) {
       this.fenceConfigError = e;
     }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java Fri Apr  6 05:07:33 2012
@@ -17,104 +17,51 @@
  */
 package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
 
-import java.io.FileOutputStream;
-import java.io.DataOutputStream;
+import java.io.File;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 
 /**
  * BinaryEditsVisitor implements a binary EditsVisitor
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-public class BinaryEditsVisitor extends EditsVisitor {
-  final private DataOutputStream out;
+public class BinaryEditsVisitor implements OfflineEditsVisitor {
+  final private EditLogFileOutputStream elfos;
 
   /**
-   * Create a processor that writes to a given file and
-   * reads using a given Tokenizer
+   * Create a processor that writes to a given file
    *
    * @param filename Name of file to write output to
-   * @param tokenizer Input tokenizer
    */
-  public BinaryEditsVisitor(String filename, Tokenizer tokenizer)
-    throws IOException {
-
-    this(filename, tokenizer, false);
-  }
-
-  /**
-   * Create a processor that writes to a given file and reads using
-   * a given Tokenizer, may also print to screen
-   *
-   * @param filename Name of file to write output to
-   * @param tokenizer Input tokenizer
-   * @param printToScreen Mirror output to screen? (ignored for binary)
-   */
-  public BinaryEditsVisitor(String filename,
-    Tokenizer tokenizer,
-    boolean printToScreen) throws IOException {
-
-    super(tokenizer);
-    out = new DataOutputStream(new FileOutputStream(filename));
+  public BinaryEditsVisitor(String outputName) throws IOException {
+    this.elfos = new EditLogFileOutputStream(new File(outputName), 0);
+    elfos.create();
   }
 
   /**
    * Start the visitor (initialization)
    */
   @Override
-  void start() throws IOException {
-    // nothing to do for binary format
+  public void start(int version) throws IOException {
   }
 
   /**
    * Finish the visitor
    */
   @Override
-  void finish() throws IOException {
-    close();
-  }
-
-  /**
-   * Finish the visitor and indicate an error
-   */
-  @Override
-  void finishAbnormally() throws IOException {
-    System.err.println("Error processing EditLog file.  Exiting.");
-    close();
-  }
-
-  /**
-   * Close output stream and prevent further writing
-   */
-  private void close() throws IOException {
-    out.close();
-  }
-
-  /**
-   * Visit a enclosing element (element that has other elements in it)
-   */
-  @Override
-  void visitEnclosingElement(Tokenizer.Token value) throws IOException {
-    // nothing to do for binary format
+  public void close(Throwable error) throws IOException {
+    elfos.setReadyToFlush();
+    elfos.flushAndSync();
+    elfos.close();
   }
 
-  /**
-   * End of eclosing element
-   */
-  @Override
-  void leaveEnclosingElement() throws IOException {
-    // nothing to do for binary format
-  }  
-
-  /**
-   * Visit a Token
-   */
   @Override
-  Tokenizer.Token visit(Tokenizer.Token value) throws IOException {
-    value.toBinary(out);
-    return value;
+  public void visitOp(FSEditLogOp op) throws IOException {
+    elfos.write(op);
   }
-}
+}
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java Fri Apr  6 05:07:33 2012
@@ -18,12 +18,16 @@
 package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
 
 import java.io.EOFException;
+import java.io.File;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
+import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
+import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsLoader.OfflineEditsLoaderFactory;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -33,6 +37,7 @@ import org.apache.commons.cli.OptionBuil
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.PosixParser;
+import org.xml.sax.SAXParseException;
 
 /**
  * This class implements an offline edits viewer, tool that
@@ -42,29 +47,9 @@ import org.apache.commons.cli.PosixParse
 @InterfaceStability.Unstable
 public class OfflineEditsViewer extends Configured implements Tool {
 
-  private EditsLoader  editsLoader;
   private final static String defaultProcessor = "xml";
 
   /**
-   * Set editsLoader
-   *
-   * @param editsLoader EditsLoader
-   */
-  private void setEditsLoader(EditsLoader editsLoader) {
-    this.editsLoader = editsLoader;
-  }
-
-  /**
-   * Process EditLog file.
-   *
-   * @param visitor use this visitor to process the file
-   */
-  public void go(EditsVisitor visitor) throws IOException  {
-    setEditsLoader(EditsLoader.LoaderFactory.getLoader(visitor));
-    editsLoader.loadEdits();
-  }
-
-  /**
    * Print help.
    */  
   private void printHelp() {
@@ -90,6 +75,9 @@ public class OfflineEditsViewer extends 
       "                       format), stats (prints statistics about\n" +
       "                       edits file)\n" +
       "-h,--help              Display usage information and exit\n" +
+      "-f,--fix-txids         Renumber the transaction IDs in the input,\n" +
+      "                       so that there are no gaps or invalid " +
+      "                       transaction IDs.\n" +
       "-v,--verbose           More verbose output, prints the input and\n" +
       "                       output filenames, for processors that write\n" +
       "                       to a file, also output to screen. On large\n" +
@@ -124,11 +112,48 @@ public class OfflineEditsViewer extends 
     
     options.addOption("p", "processor", true, "");
     options.addOption("v", "verbose", false, "");
+    options.addOption("f", "fix-txids", false, "");
     options.addOption("h", "help", false, "");
 
     return options;
   }
 
+  /** Process an edit log using the chosen processor or visitor.
+   * 
+   * @param inputFilename   The file to process
+   * @param outputFilename  The output file name
+   * @param processor       If visitor is null, the processor to use
+   * @param visitor         If non-null, the visitor to use.
+   * 
+   * @return                0 on success; error code otherwise
+   */
+  public int go(String inputFileName, String outputFileName, String processor,
+      boolean printToScreen, boolean fixTxIds, OfflineEditsVisitor visitor)
+  {
+    if (printToScreen) {
+      System.out.println("input  [" + inputFileName  + "]");
+      System.out.println("output [" + outputFileName + "]");
+    }
+    try {
+      if (visitor == null) {
+        visitor = OfflineEditsVisitorFactory.getEditsVisitor(
+            outputFileName, processor, printToScreen);
+      }
+      boolean xmlInput = inputFileName.endsWith(".xml");
+      OfflineEditsLoader loader = OfflineEditsLoaderFactory.
+          createLoader(visitor, inputFileName, xmlInput);
+      if (fixTxIds) {
+        loader.setFixTxIds();
+      }
+      loader.loadEdits();
+    } catch(Exception e) {
+      System.err.println("Encountered exception. Exiting: " + e.getMessage());
+      e.printStackTrace(System.err);
+      return -1;
+    }
+    return 0;
+  }
+
   /**
    * Main entry point for ToolRunner (see ToolRunner docs)
    *
@@ -137,17 +162,13 @@ public class OfflineEditsViewer extends 
    */
   @Override
   public int run(String[] argv) throws Exception {
-    int exitCode = 0;
-
     Options options = buildOptions();
     if(argv.length == 0) {
       printHelp();
       return -1;
     }
-
     CommandLineParser parser = new PosixParser();
     CommandLine cmd;
-
     try {
       cmd = parser.parse(options, argv);
     } catch (ParseException e) {
@@ -156,37 +177,20 @@ public class OfflineEditsViewer extends 
       printHelp();
       return -1;
     }
-
     if(cmd.hasOption("h")) { // print help and exit
       printHelp();
       return -1;
     }
-
-    boolean printToScreen    = false;
-    String inputFilenameArg  = cmd.getOptionValue("i");
-    String outputFilenameArg = cmd.getOptionValue("o");
-    String processor         = cmd.getOptionValue("p");
-    if(processor == null) { processor = defaultProcessor; }
-
-    if(cmd.hasOption("v")) { // print output to screen too
-      printToScreen = true;
-      System.out.println("input  [" + inputFilenameArg  + "]");
-      System.out.println("output [" + outputFilenameArg + "]");
+    String inputFileName = cmd.getOptionValue("i");
+    String outputFileName = cmd.getOptionValue("o");
+    String processor = cmd.getOptionValue("p");
+    if(processor == null) {
+      processor = defaultProcessor;
     }
-
-    try {
-      go(EditsVisitorFactory.getEditsVisitor(
-        outputFilenameArg,
-        processor,
-        TokenizerFactory.getTokenizer(inputFilenameArg),
-        printToScreen));
-    } catch (EOFException e) {
-      System.err.println("Input file ended unexpectedly. Exiting");
-    } catch(IOException e) {
-      System.err.println("Encountered exception. Exiting: " + e.getMessage());
-    }
-
-    return exitCode;
+    boolean printToScreen = cmd.hasOption("v");
+    boolean fixTxIds = cmd.hasOption("f");
+    return go(inputFileName, outputFileName, processor,
+        printToScreen, fixTxIds, null);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java Fri Apr  6 05:07:33 2012
@@ -19,12 +19,15 @@ package org.apache.hadoop.hdfs.tools.off
 
 import java.io.FileWriter;
 import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
 import java.util.Map;
 import java.util.HashMap;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
 
 /**
@@ -34,26 +37,14 @@ import org.apache.hadoop.hdfs.server.nam
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-public class StatisticsEditsVisitor extends EditsVisitor {
-  private boolean printToScreen = false;
-  private boolean okToWrite = false;
-  final private FileWriter fw;
+public class StatisticsEditsVisitor implements OfflineEditsVisitor {
+  final private PrintStream out;
 
-  public final Map<FSEditLogOpCodes, Long> opCodeCount =
+  private int version = -1;
+  private final Map<FSEditLogOpCodes, Long> opCodeCount =
     new HashMap<FSEditLogOpCodes, Long>();
 
   /**
-   * Create a processor that writes to the file named.
-   *
-   * @param filename Name of file to write output to
-   */
-  public StatisticsEditsVisitor(String filename, Tokenizer tokenizer)
-    throws IOException {
-
-    this(filename, tokenizer, false);
-  }
-
-  /**
    * Create a processor that writes to the file named and may or may not
    * also output to the screen, as specified.
    *
@@ -61,103 +52,29 @@ public class StatisticsEditsVisitor exte
    * @param tokenizer Input tokenizer
    * @param printToScreen Mirror output to screen?
    */
-  public StatisticsEditsVisitor(String filename,
-    Tokenizer tokenizer,
-    boolean printToScreen) throws IOException {
-
-    super(tokenizer);
-    this.printToScreen = printToScreen;
-    fw = new FileWriter(filename);
-    okToWrite = true;
+  public StatisticsEditsVisitor(OutputStream out) throws IOException {
+    this.out = new PrintStream(out);
   }
 
-  /**
-   * Start the visitor (initialization)
-   */
+  /** Start the visitor */
   @Override
-  void start() throws IOException {
-    // nothing to do
+  public void start(int version) throws IOException {
+    this.version = version;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.hadoop.hdfs.tools.offlineEditsViewer.EditsVisitor#finish()
-   */
-  @Override
-  void finish() throws IOException {
-    write(getStatisticsString());
-    close();
-  }
-
-  /* (non-Javadoc)
-   * @see org.apache.hadoop.hdfs.tools.offlineEditsViewer.EditsVisitor#finishAbnormally()
-   */
-  @Override
-  void finishAbnormally() throws IOException {
-    close();
-  }
-
-  /**
-   * Close output stream and prevent further writing
-   */
-  private void close() throws IOException {
-    fw.close();
-    okToWrite = false;
-  }
-
-  /**
-   * Visit a enclosing element (element that has other elements in it)
-   */
-  @Override
-  void visitEnclosingElement(Tokenizer.Token value) throws IOException {
-    // nothing to do
-  }
-
-  /**
-   * End of eclosing element
-   */
+  /** Close the visitor */
   @Override
-  void leaveEnclosingElement() throws IOException {
-    // nothing to do
-  }  
-
-  /**
-   * Visit a Token, calculate statistics
-   *
-   * @param value a Token to visit
-   */
-  @Override
-  Tokenizer.Token visit(Tokenizer.Token value) throws IOException {
-    // count the opCodes
-    if(value.getEditsElement() == EditsElement.OPCODE) {
-      if(value instanceof Tokenizer.ByteToken) {
-        incrementOpCodeCount(
-          FSEditLogOpCodes.fromByte(((Tokenizer.ByteToken)value).value));
-      } else {
-        throw new IOException("Token for EditsElement.OPCODE should be " +
-          "of type Tokenizer.ByteToken, not " + value.getClass());
-      }
+  public void close(Throwable error) throws IOException {
+    out.print(getStatisticsString());
+    if (error != null) {
+      out.print("EXITING ON ERROR: " + error.toString() + "\n");
     }
-    return value;
+    out.close();
   }
 
-  /**
-   * Write parameter to output file (and possibly screen).
-   *
-   * @param toWrite Text to write to file
-   */
-  protected void write(String toWrite) throws IOException  {
-    if(!okToWrite)
-      throw new IOException("file not open for writing.");
-
-    if(printToScreen)
-      System.out.print(toWrite);
-
-    try {
-      fw.write(toWrite);
-    } catch (IOException e) {
-      okToWrite = false;
-      throw e;
-    }
+  @Override
+  public void visitOp(FSEditLogOp op) throws IOException {
+    incrementOpCodeCount(op.opCode);
   }
 
   /**
@@ -189,13 +106,16 @@ public class StatisticsEditsVisitor exte
    */
   public String getStatisticsString() {
     StringBuffer sb = new StringBuffer();
+    sb.append(String.format(
+        "    %-30.30s      : %d%n",
+        "VERSION", version));
     for(FSEditLogOpCodes opCode : FSEditLogOpCodes.values()) {
       sb.append(String.format(
         "    %-30.30s (%3d): %d%n",
-        opCode,
+        opCode.toString(),
         opCode.getOpCode(),
         opCodeCount.get(opCode)));
     }
     return sb.toString();
   }
-}
+}
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java Fri Apr  6 05:07:33 2012
@@ -18,12 +18,19 @@
 package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
 
 import java.io.IOException;
-import java.util.LinkedList;
+import java.io.OutputStream;
 
-import org.apache.hadoop.hdfs.tools.offlineImageViewer.DepthCounter;
+import org.apache.hadoop.hdfs.util.XMLUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.xml.sax.ContentHandler;
+import org.xml.sax.SAXException;
+import org.xml.sax.helpers.AttributesImpl;
+
+import com.sun.org.apache.xml.internal.serialize.OutputFormat;
+import com.sun.org.apache.xml.internal.serialize.XMLSerializer;
 
 /**
  * An XmlEditsVisitor walks over an EditLog structure and writes out
@@ -31,140 +38,85 @@ import org.apache.hadoop.classification.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-public class XmlEditsVisitor extends TextEditsVisitor {
-  final private LinkedList<EditsElement> tagQ =
-    new LinkedList<EditsElement>();
-
-  final private DepthCounter depthCounter = new DepthCounter();
+public class XmlEditsVisitor implements OfflineEditsVisitor {
+  private OutputStream out;
+  private ContentHandler contentHandler;
 
   /**
    * Create a processor that writes to the file named and may or may not
    * also output to the screen, as specified.
    *
    * @param filename Name of file to write output to
-   * @param tokenizer Input tokenizer
+   * @param printToScreen Mirror output to screen?
    */
-  public XmlEditsVisitor(String filename, Tokenizer tokenizer)
-    throws IOException {
-
-    super(filename, tokenizer, false);
-  }
-
-  /**
-   * Create a processor that writes to the file named and may or may not
-   * also output to the screen, as specified.
-   *
-   * @param filename Name of file to write output to
-   * @param tokenizer Input tokenizer
-   * @param printToScreen Mirror output to screen? (ignored for binary)
-   */
-  public XmlEditsVisitor(String filename,
-    Tokenizer tokenizer,
-    boolean printToScreen) throws IOException {
-
-    super(filename, tokenizer, printToScreen);
+  public XmlEditsVisitor(OutputStream out)
+      throws IOException {
+    this.out = out;
+    OutputFormat outFormat = new OutputFormat("XML", "UTF-8", true);
+    outFormat.setIndenting(true);
+    outFormat.setIndent(2);
+    outFormat.setDoctype(null, null);
+    XMLSerializer serializer = new XMLSerializer(out, outFormat);
+    contentHandler = serializer.asContentHandler();
+    try {
+      contentHandler.startDocument();
+      contentHandler.startElement("", "", "EDITS", new AttributesImpl());
+    } catch (SAXException e) {
+      throw new IOException("SAX error: " + e.getMessage());
+    }
   }
 
   /**
    * Start visitor (initialization)
    */
   @Override
-  void start() throws IOException {
-    write("<?xml version=\"1.0\"?>\n");
-  }
-
-  /**
-   * Finish visitor
-   */
-  @Override
-  void finish() throws IOException {
-    super.finish();
-  }
-
-  /**
-   * Finish with error
-   */
-  @Override
-  void finishAbnormally() throws IOException {
-    write("\n<!-- Error processing EditLog file.  Exiting -->\n");
-    super.finishAbnormally();
-  }
-
-  /**
-   * Visit a Token
-   *
-   * @param value a Token to visit
-   */
-  @Override
-  Tokenizer.Token visit(Tokenizer.Token value) throws IOException {
-    writeTag(value.getEditsElement().toString(), value.toString());
-    return value;
+  public void start(int version) throws IOException {
+    try {
+      contentHandler.startElement("", "", "EDITS_VERSION", new AttributesImpl());
+      StringBuilder bld = new StringBuilder();
+      bld.append(version);
+      addString(bld.toString());
+      contentHandler.endElement("", "", "EDITS_VERSION");
+    }
+    catch (SAXException e) {
+      throw new IOException("SAX error: " + e.getMessage());
+    }
   }
 
-  /**
-   * Visit an enclosing element (element that cntains other elements)
-   *
-   * @param value a Token to visit
-   */
-  @Override
-  void visitEnclosingElement(Tokenizer.Token value) throws IOException {
-    printIndents();
-    write("<" + value.getEditsElement().toString() + ">\n");
-    tagQ.push(value.getEditsElement());
-    depthCounter.incLevel();
+  public void addString(String str) throws SAXException {
+    int slen = str.length();
+    char arr[] = new char[slen];
+    str.getChars(0, slen, arr, 0);
+    contentHandler.characters(arr, 0, slen);
   }
-
+  
   /**
-   * Leave enclosing element
+   * Finish visitor
    */
   @Override
-  void leaveEnclosingElement() throws IOException {
-    depthCounter.decLevel();
-    if(tagQ.size() == 0)
-      throw new IOException("Tried to exit non-existent enclosing element " +
-                "in EditLog file");
-
-    EditsElement element = tagQ.pop();
-    printIndents();
-    write("</" + element.toString() + ">\n");
-  }
-
-  /**
-   * Write an XML tag
-   *
-   * @param tag a tag name
-   * @param value a tag value
-   */
-  private void writeTag(String tag, String value) throws IOException {
-    printIndents();
-    if(value.length() > 0) {
-      write("<" + tag + ">" + value + "</" + tag + ">\n");
-    } else {
-      write("<" + tag + "/>\n");
+  public void close(Throwable error) throws IOException {
+    try {
+      contentHandler.endElement("", "", "EDITS");
+      if (error != null) {
+        String msg = error.getMessage();
+        XMLUtils.addSaxString(contentHandler, "ERROR",
+            (msg == null) ? "null" : msg);
+      }
+      contentHandler.endDocument();
     }
+    catch (SAXException e) {
+      throw new IOException("SAX error: " + e.getMessage());
+    }
+    out.close();
   }
 
-  // prepared values that printIndents is likely to use
-  final private static String [] indents = {
-     "",
-     "  ",
-     "    ",
-     "      ",
-     "        ",
-     "          ",
-     "            " };
-
-  /**
-   * Prints the leading spaces based on depth level
-   */
-  private void printIndents() throws IOException {
+  @Override
+  public void visitOp(FSEditLogOp op) throws IOException {
     try {
-      write(indents[depthCounter.getLevel()]);
-    } catch (IndexOutOfBoundsException e) {
-      // unlikely needed so can be slow
-      for(int i = 0; i < depthCounter.getLevel(); i++)
-        write("  ");
+      op.outputToXml(contentHandler);
+    }
+    catch (SAXException e) {
+      throw new IOException("SAX error: " + e.getMessage());
     }
-   
   }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Fri Apr  6 05:07:33 2012
@@ -305,7 +305,7 @@ public class JsonUtil {
     }
 
     return new DatanodeInfo(
-        (String)m.get("name"),
+        (String)m.get("ipAddr"),
         (String)m.get("hostName"),
         (String)m.get("storageID"),
         (int)(long)(Long)m.get("xferPort"),

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1309568-1310173

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto Fri Apr  6 05:07:33 2012
@@ -27,16 +27,25 @@ option java_generate_equals_and_hash = t
 import "hdfs.proto";
 
 /**
- * registration - the registration info of the active NameNode
- * firstTxnId - the first txid in the rolled edit log
+ * Journal information used by the journal receiver to identify a journal.
+ */
+message JournalInfoProto {
+  required string clusterID = 1;     // ID of the cluster
+  optional uint32 layoutVersion = 2; // Layout version
+  optional uint32 namespaceID = 3;    // Namespace ID
+}
+
+/**
+ * JournalInfo - the information about the journal
+ * firstTxnId - the first txid in the journal records
  * numTxns - Number of transactions in editlog
  * records - bytes containing serialized journal records
  */
 message JournalRequestProto {
-  required NamenodeRegistrationProto registration = 1; // Registration info
-  required uint64 firstTxnId = 2; // Transaction ID
-  required uint32 numTxns = 3;    // Transaction ID
-  required bytes records = 4;     // Journal record
+  required JournalInfoProto journalInfo = 1;
+  required uint64 firstTxnId = 2;
+  required uint32 numTxns = 3;
+  required bytes records = 4;
 }
 
 /**
@@ -46,12 +55,12 @@ message JournalResponseProto { 
 }
 
 /**
- * registration - the registration info of the active NameNode
+ * JournalInfo - the information about the journal
  * txid - first txid in the new log
  */
 message StartLogSegmentRequestProto {
-  required NamenodeRegistrationProto registration = 1; // Registration info
-  required uint64 txid = 2; // Transaction ID
+  required JournalInfoProto journalInfo = 1;
+  required uint64 txid = 2;
 }
 
 /**

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1309568-1310173

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1309568-1310173

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1309568-1310173

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1309568-1310173

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Fri Apr  6 05:07:33 2012
@@ -395,9 +395,9 @@ public class TestDFSUtil {
     conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     UserGroupInformation.setConfiguration(conf);
     String httpsport = DFSUtil.getInfoServer(null, conf, true);
-    assertEquals("0.0.0.0:50470", httpsport);
+    assertEquals("0.0.0.0:"+DFS_NAMENODE_HTTPS_PORT_DEFAULT, httpsport);
     String httpport = DFSUtil.getInfoServer(null, conf, false);
-    assertEquals("0.0.0.0:50070", httpport);
+    assertEquals("0.0.0.0:"+DFS_NAMENODE_HTTP_PORT_DEFAULT, httpport);
   }
   
   @Test

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java Fri Apr  6 05:07:33 2012
@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.CommonConfig
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -516,7 +517,8 @@ public class TestDecommission {
     // Now empty hosts file and ensure the datanode is disallowed
     // from talking to namenode, resulting in it's shutdown.
     ArrayList<String>list = new ArrayList<String>();
-    list.add("invalidhost");
+    final String badHostname = "BOGUSHOST";
+    list.add(badHostname);
     writeConfigFile(hostsFile, list);
     
     for (int j = 0; j < numNameNodes; j++) {
@@ -530,6 +532,17 @@ public class TestDecommission {
         info = client.datanodeReport(DatanodeReportType.LIVE);
       }
       assertEquals("Number of live nodes should be 0", 0, info.length);
+      
+      // Test that non-live and bogus hostnames are considered "dead".
+      // The dead report should have an entry for (1) the DN  that is
+      // now considered dead because it is no longer allowed to connect
+      // and (2) the bogus entry in the hosts file (these entries are
+      // always added last)
+      info = client.datanodeReport(DatanodeReportType.DEAD);
+      assertEquals("There should be 2 dead nodes", 2, info.length);
+      DatanodeID id = cluster.getDataNodes().get(0).getDatanodeId();
+      assertEquals(id.getHostName(), info[0].getHostName());
+      assertEquals(badHostname, info[1].getHostName());
     }
   }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java Fri Apr  6 05:07:33 2012
@@ -60,7 +60,7 @@ public class TestReplaceDatanodeOnFailur
     final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
     datanodes[0] = new DatanodeInfo[0];
     for(int i = 0; i < infos.length; ) {
-      infos[i] = new DatanodeInfo(new DatanodeID("dn" + i));
+      infos[i] = new DatanodeInfo(new DatanodeID("dn" + i, 100));
       i++;
       datanodes[i] = new DatanodeInfo[i];
       System.arraycopy(infos, 0, datanodes[i], 0, datanodes[i].length);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java Fri Apr  6 05:07:33 2012
@@ -115,7 +115,7 @@ public class TestBPOfferService {
             0, HdfsConstants.LAYOUT_VERSION))
       .when(mock).versionRequest();
     
-    Mockito.doReturn(new DatanodeRegistration("fake-node"))
+    Mockito.doReturn(new DatanodeRegistration("fake-node", 100))
       .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
     
     Mockito.doAnswer(new HeartbeatAnswer(nnIdx))

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Fri Apr  6 05:07:33 2012
@@ -779,9 +779,9 @@ public class NNThroughputBenchmark {
     }
 
     TinyDatanode(int dnIdx, int blockCapacity) throws IOException {
+      String ipAddr = DNS.getDefaultIP("default");
       String hostName = DNS.getDefaultHost("default", "default");
-      dnRegistration = new DatanodeRegistration(hostName);
-      dnRegistration.setXferPort(getNodePort(dnIdx));
+      dnRegistration = new DatanodeRegistration(ipAddr, getNodePort(dnIdx));
       dnRegistration.setHostName(hostName);
       this.blocks = new ArrayList<Block>(blockCapacity);
       this.nrBlocks = 0;
@@ -894,10 +894,10 @@ public class NNThroughputBenchmark {
         for(int t = 0; t < blockTargets.length; t++) {
           DatanodeInfo dnInfo = blockTargets[t];
           DatanodeRegistration receivedDNReg;
-          receivedDNReg = new DatanodeRegistration(dnInfo.getIpAddr());
+          receivedDNReg =
+            new DatanodeRegistration(dnInfo.getIpAddr(), dnInfo.getXferPort());
           receivedDNReg.setStorageInfo(
-                          new DataStorage(nsInfo, dnInfo.getStorageID()));
-          receivedDNReg.setXferPort(dnInfo.getXferPort());
+            new DataStorage(nsInfo, dnInfo.getStorageID()));
           receivedDNReg.setInfoPort(dnInfo.getInfoPort());
           receivedDNReg.setIpcPort(dnInfo.getIpcPort());
           ReceivedDeletedBlockInfo[] rdBlocks = {

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java Fri Apr  6 05:07:33 2012
@@ -28,6 +28,8 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.test.GenericTestUtils;
 
 /**
  * This class tests the validation of the configuration object when passed 
@@ -72,6 +74,7 @@ public class TestValidateConfigurationSe
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:9000");
     DFSTestUtil.formatNameNode(conf);
     NameNode nameNode = new NameNode(conf); // should be OK!
+    nameNode.stop();
   }
 
   /**
@@ -82,16 +85,30 @@ public class TestValidateConfigurationSe
   public void testGenericKeysForNameNodeFormat()
       throws IOException {
     Configuration conf = new HdfsConfiguration();
-    FileSystem.setDefaultUri(conf, "hdfs://localhost:8070");
+
+    // Set ephemeral ports 
+    conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
+        "127.0.0.1:0");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
+        "127.0.0.1:0");
+    
     conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "ns1");
-    String nameDir = System.getProperty("java.io.tmpdir") + "/test.dfs.name";
-    File dir = new File(nameDir);
+    
+    // Set a nameservice-specific configuration for name dir
+    File dir = new File(MiniDFSCluster.getBaseDirectory(),
+        "testGenericKeysForNameNodeFormat");
     if (dir.exists()) {
       FileUtil.fullyDelete(dir);
     }
-    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + ".ns1", nameDir);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + ".ns1",
+        dir.getAbsolutePath());
+    
+    // Format and verify the right dir is formatted.
     DFSTestUtil.formatNameNode(conf);
+    GenericTestUtils.assertExists(dir);
+
+    // Ensure that the same dir is picked up by the running NN
     NameNode nameNode = new NameNode(conf);
-    FileUtil.fullyDelete(dir);
+    nameNode.stop();
   }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java Fri Apr  6 05:07:33 2012
@@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.codehaus.jackson.sym.NameN;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java Fri Apr  6 05:07:33 2012
@@ -29,6 +29,7 @@ import org.apache.hadoop.ha.NodeFencer;
 import org.apache.hadoop.ha.ZKFailoverController;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -56,7 +57,7 @@ public class TestDFSZKFailoverController
     // Specify the quorum per-nameservice, to ensure that these configs
     // can be nameservice-scoped.
     conf.set(ZKFailoverController.ZK_QUORUM_KEY + ".ns1", hostPort);
-    conf.set(NodeFencer.CONF_METHODS_KEY,
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
         AlwaysSucceedFencer.class.getName());
 
     MiniDFSNNTopology topology = new MiniDFSNNTopology()

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java Fri Apr  6 05:07:33 2012
@@ -158,7 +158,7 @@ public class TestDFSHAAdmin {
   public void testFailoverWithFencerConfigured() throws Exception {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     HdfsConfiguration conf = getHAConf();
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
     assertEquals(0, runTool("-failover", "nn1", "nn2"));
   }
@@ -167,7 +167,7 @@ public class TestDFSHAAdmin {
   public void testFailoverWithFencerAndNameservice() throws Exception {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     HdfsConfiguration conf = getHAConf();
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
     assertEquals(0, runTool("-ns", "ns1", "-failover", "nn1", "nn2"));
   }
@@ -176,7 +176,7 @@ public class TestDFSHAAdmin {
   public void testFailoverWithFencerConfiguredAndForce() throws Exception {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     HdfsConfiguration conf = getHAConf();
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
     assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
   }
@@ -185,7 +185,7 @@ public class TestDFSHAAdmin {
   public void testFailoverWithForceActive() throws Exception {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     HdfsConfiguration conf = getHAConf();
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
     assertEquals(0, runTool("-failover", "nn1", "nn2", "--forceactive"));
   }
@@ -194,7 +194,7 @@ public class TestDFSHAAdmin {
   public void testFailoverWithInvalidFenceArg() throws Exception {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     HdfsConfiguration conf = getHAConf();
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
     assertEquals(-1, runTool("-failover", "nn1", "nn2", "notforcefence"));
   }
@@ -209,7 +209,7 @@ public class TestDFSHAAdmin {
   public void testFailoverWithFenceAndBadFencer() throws Exception {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     HdfsConfiguration conf = getHAConf();
-    conf.set(NodeFencer.CONF_METHODS_KEY, "foobar!");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "foobar!");
     tool.setConf(conf);
     assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
   }
@@ -218,7 +218,7 @@ public class TestDFSHAAdmin {
   public void testForceFenceOptionListedBeforeArgs() throws Exception {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     HdfsConfiguration conf = getHAConf();
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
     assertEquals(0, runTool("-failover", "--forcefence", "nn1", "nn2"));
   }
@@ -240,7 +240,41 @@ public class TestDFSHAAdmin {
     assertEquals(-1, runTool("-checkHealth", "nn1"));
     assertOutputContains("Health check failed: fake health check failure");
   }
+  
+  /**
+   * Test that the fencing configuration can be overridden per-nameservice
+   * or per-namenode
+   */
+  @Test
+  public void testFencingConfigPerNameNode() throws Exception {
+    Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
+
+    final String nsSpecificKey = DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY + "." + NSID;
+    final String nnSpecificKey = nsSpecificKey + ".nn1";
+    
+    HdfsConfiguration conf = getHAConf();
+    // Set the default fencer to succeed
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
+    tool.setConf(conf);
+    assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
+    
+    // Set the NN-specific fencer to fail. Should fail to fence.
+    conf.set(nnSpecificKey, "shell(false)");
+    tool.setConf(conf);
+    assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
+    conf.unset(nnSpecificKey);
 
+    // Set an NS-specific fencer to fail. Should fail.
+    conf.set(nsSpecificKey, "shell(false)");
+    tool.setConf(conf);
+    assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
+    
+    // Set the NS-specific fencer to succeed. Should succeed
+    conf.set(nsSpecificKey, "shell(true)");
+    tool.setConf(conf);
+    assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
+  }
+  
   private Object runTool(String ... args) throws Exception {
     errOutBytes.reset();
     LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args));

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java Fri Apr  6 05:07:33 2012
@@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -113,7 +114,7 @@ public class TestDFSHAAdminMiniCluster {
   
   @Test
   public void testTryFailoverToSafeMode() throws Exception {
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
 
     NameNodeAdapter.enterSafeMode(cluster.getNameNode(0), false);
@@ -135,7 +136,7 @@ public class TestDFSHAAdminMiniCluster {
     // tmp file, so we can verify that the args were substituted right
     File tmpFile = File.createTempFile("testFencer", ".txt");
     tmpFile.deleteOnExit();
-    conf.set(NodeFencer.CONF_METHODS_KEY,
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
         "shell(echo -n $target_nameserviceid.$target_namenodeid " +
         "$target_port $dfs_ha_namenode_id > " +
         tmpFile.getAbsolutePath() + ")");
@@ -168,19 +169,19 @@ public class TestDFSHAAdminMiniCluster {
 
           
     // Test failover with not fencer and forcefence option
-    conf.unset(NodeFencer.CONF_METHODS_KEY);
+    conf.unset(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY);
     tool.setConf(conf);
     assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
     assertFalse(tmpFile.exists());
 
     // Test failover with bad fencer and forcefence option
-    conf.set(NodeFencer.CONF_METHODS_KEY, "foobar!");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "foobar!");
     tool.setConf(conf);
     assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
     assertFalse(tmpFile.exists());
 
     // Test failover with force fence listed before the other arguments
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
     assertEquals(0, runTool("-failover", "--forcefence", "nn1", "nn2"));
   }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java Fri Apr  6 05:07:33 2012
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
 
+import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.File;
 import java.nio.ByteBuffer;
@@ -33,8 +34,6 @@ import org.apache.commons.logging.LogFac
 
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
 import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer;
-import org.apache.hadoop.hdfs.tools.offlineEditsViewer.TokenizerFactory;
-import org.apache.hadoop.hdfs.tools.offlineEditsViewer.EditsVisitorFactory;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 
 import org.apache.hadoop.hdfs.server.namenode.OfflineEditsViewerHelper;
@@ -158,11 +157,8 @@ public class TestOfflineEditsViewer {
     LOG.info("Running oev [" + inFilename + "] [" + outFilename + "]");
 
     OfflineEditsViewer oev = new OfflineEditsViewer();
-    oev.go( EditsVisitorFactory.getEditsVisitor(
-      outFilename,
-      processor,
-      TokenizerFactory.getTokenizer(inFilename),
-      false));
+    if (oev.go(inFilename, outFilename, processor, true, false, null) != 0)
+      throw new RuntimeException("oev failed");
   }
 
   /**
@@ -173,14 +169,11 @@ public class TestOfflineEditsViewer {
    */
   private boolean hasAllOpCodes(String inFilename) throws IOException {
     String outFilename = inFilename + ".stats";
-    StatisticsEditsVisitor visitor =
-      (StatisticsEditsVisitor)EditsVisitorFactory.getEditsVisitor(
-        outFilename,
-        "stats",
-        TokenizerFactory.getTokenizer(inFilename),
-        false);
+    FileOutputStream fout = new FileOutputStream(outFilename);
+    StatisticsEditsVisitor visitor = new StatisticsEditsVisitor(fout);
     OfflineEditsViewer oev = new OfflineEditsViewer();
-    oev.go(visitor);
+    if (oev.go(inFilename, outFilename, "stats", false, false, visitor) != 0)
+      return false;
     LOG.info("Statistics for " + inFilename + "\n" +
       visitor.getStatisticsString());
     
@@ -190,6 +183,8 @@ public class TestOfflineEditsViewer {
       if(obsoleteOpCodes.containsKey(opCode)) {
         continue;
       }
+      if (opCode == FSEditLogOpCodes.OP_INVALID)
+        continue;
       Long count = visitor.getStatistics().get(opCode);
       if((count == null) || (count == 0)) {
         hasAllOpCodes = false;

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java?rev=1310174&r1=1310173&r2=1310174&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java Fri Apr  6 05:07:33 2012
@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.io.InputStreamReader;
 import java.net.HttpURLConnection;
 import java.net.URL;
+import java.util.Arrays;
 import java.util.Map;
 
 import javax.servlet.http.HttpServletResponse;
@@ -133,8 +134,20 @@ public class TestWebHdfsFileSystemContra
     final BlockLocation[] expected = cluster.getFileSystem().getFileBlockLocations(
         new Path(f), 0L, 1L);
     assertEquals(expected.length, computed.length);
-    for(int i = 0; i < computed.length; i++) {
+    for (int i = 0; i < computed.length; i++) {
       assertEquals(expected[i].toString(), computed[i].toString());
+      // Check names
+      String names1[] = expected[i].getNames();
+      String names2[] = computed[i].getNames();
+      Arrays.sort(names1);
+      Arrays.sort(names2);
+      Assert.assertArrayEquals("Names differ", names1, names2);
+      // Check topology
+      String topos1[] = expected[i].getTopologyPaths();
+      String topos2[] = computed[i].getTopologyPaths();
+      Arrays.sort(topos1);
+      Arrays.sort(topos2);
+      Assert.assertArrayEquals("Topology differs", topos1, topos2);
     }
   }