You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/04/05 22:16:20 UTC

svn commit: r1310048 [2/3] - in /hadoop/common/branches/HDFS-3092/hadoop-hdfs-project: dev-support/ hadoop-hdfs-httpfs/ hadoop-hdfs/ hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ hadoop-hdfs/src/main/java/org/apache/hadoo...

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Apr  5 20:16:15 2012
@@ -25,15 +25,17 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT;
@@ -49,15 +51,13 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT;
@@ -150,9 +150,9 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
@@ -164,6 +164,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer;
 import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
+import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
@@ -260,30 +261,28 @@ public class FSNamesystem implements Nam
 
   static final int DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED = 100;
   static int BLOCK_DELETION_INCREMENT = 1000;
-  private boolean isPermissionEnabled;
-  private boolean persistBlocks;
-  private UserGroupInformation fsOwner;
-  private String supergroup;
-  private boolean standbyShouldCheckpoint;
+  private final boolean isPermissionEnabled;
+  private final boolean persistBlocks;
+  private final UserGroupInformation fsOwner;
+  private final String supergroup;
+  private final boolean standbyShouldCheckpoint;
   
   // Scan interval is not configurable.
   private static final long DELEGATION_TOKEN_REMOVER_SCAN_INTERVAL =
     TimeUnit.MILLISECONDS.convert(1, TimeUnit.HOURS);
-  private DelegationTokenSecretManager dtSecretManager;
-  private boolean alwaysUseDelegationTokensForTests;
+  private final DelegationTokenSecretManager dtSecretManager;
+  private final boolean alwaysUseDelegationTokensForTests;
   
 
-  //
-  // Stores the correct file name hierarchy
-  //
+  /** The namespace tree. */
   FSDirectory dir;
-  private BlockManager blockManager;
-  private DatanodeStatistics datanodeStatistics;
+  private final BlockManager blockManager;
+  private final DatanodeStatistics datanodeStatistics;
 
   // Block pool ID used by this namenode
   private String blockPoolId;
 
-  LeaseManager leaseManager = new LeaseManager(this); 
+  final LeaseManager leaseManager = new LeaseManager(this); 
 
   Daemon smmthread = null;  // SafeModeMonitor thread
   
@@ -291,23 +290,23 @@ public class FSNamesystem implements Nam
 
   private volatile boolean hasResourcesAvailable = false;
   private volatile boolean fsRunning = true;
-  long systemStart = 0;
+  
+  /** The start time of the namesystem. */
+  private final long startTime = now();
 
-  //resourceRecheckInterval is how often namenode checks for the disk space availability
-  private long resourceRecheckInterval;
+  /** The interval of namenode checking for the disk space availability */
+  private final long resourceRecheckInterval;
 
   // The actual resource checker instance.
   NameNodeResourceChecker nnResourceChecker;
 
-  private FsServerDefaults serverDefaults;
-
-  private boolean supportAppends;
-  private ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure = 
-      ReplaceDatanodeOnFailure.DEFAULT;
+  private final FsServerDefaults serverDefaults;
+  private final boolean supportAppends;
+  private final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
 
   private volatile SafeModeInfo safeMode;  // safe mode information
 
-  private long maxFsObjects = 0;          // maximum number of fs objects
+  private final long maxFsObjects;          // maximum number of fs objects
 
   /**
    * The global generation stamp for this file system. 
@@ -315,10 +314,10 @@ public class FSNamesystem implements Nam
   private final GenerationStamp generationStamp = new GenerationStamp();
 
   // precision of access times.
-  private long accessTimePrecision = 0;
+  private final long accessTimePrecision;
 
-  // lock to protect FSNamesystem.
-  private ReentrantReadWriteLock fsLock;
+  /** Lock to protect FSNamesystem. */
+  private ReentrantReadWriteLock fsLock = new ReentrantReadWriteLock(true);
 
   /**
    * Used when this NN is in standby state to read from the shared edit log.
@@ -336,9 +335,7 @@ public class FSNamesystem implements Nam
    */
   private HAContext haContext;
 
-  private boolean haEnabled;
-
-  private final Configuration conf;
+  private final boolean haEnabled;
     
   /**
    * Instantiates an FSNamesystem loaded from the image and edits
@@ -350,10 +347,27 @@ public class FSNamesystem implements Nam
    * @throws IOException if loading fails
    */
   public static FSNamesystem loadFromDisk(Configuration conf)
-    throws IOException {
+      throws IOException {
     Collection<URI> namespaceDirs = FSNamesystem.getNamespaceDirs(conf);
     List<URI> namespaceEditsDirs = 
       FSNamesystem.getNamespaceEditsDirs(conf);
+    return loadFromDisk(conf, namespaceDirs, namespaceEditsDirs);
+  }
+
+  /**
+   * Instantiates an FSNamesystem loaded from the image and edits
+   * directories passed.
+   * 
+   * @param conf the Configuration which specifies the storage directories
+   *             from which to load
+   * @param namespaceDirs directories to load the fsimages
+   * @param namespaceEditsDirs directories to load the edits from
+   * @return an FSNamesystem which contains the loaded namespace
+   * @throws IOException if loading fails
+   */
+  public static FSNamesystem loadFromDisk(Configuration conf,
+      Collection<URI> namespaceDirs, List<URI> namespaceEditsDirs)
+      throws IOException {
 
     if (namespaceDirs.size() == 1) {
       LOG.warn("Only one " + DFS_NAMENODE_NAME_DIR_KEY
@@ -374,8 +388,10 @@ public class FSNamesystem implements Nam
       HAUtil.isHAEnabled(conf, nameserviceId));
     long timeTakenToLoadFSImage = now() - loadStart;
     LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
-    NameNode.getNameNodeMetrics().setFsImageLoadTime(
-                              (int) timeTakenToLoadFSImage);
+    NameNodeMetrics nnMetrics = NameNode.getNameNodeMetrics();
+    if (nnMetrics != null) {
+      nnMetrics.setFsImageLoadTime((int) timeTakenToLoadFSImage);
+    }
     return namesystem;
   }
 
@@ -390,9 +406,71 @@ public class FSNamesystem implements Nam
    * @throws IOException on bad configuration
    */
   FSNamesystem(Configuration conf, FSImage fsImage) throws IOException {
-    this.conf = conf;
     try {
-      initialize(conf, fsImage);
+      resourceRecheckInterval = conf.getLong(
+          DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
+          DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);
+
+      this.blockManager = new BlockManager(this, this, conf);
+      this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
+
+      this.fsOwner = UserGroupInformation.getCurrentUser();
+      this.supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY, 
+                                 DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
+      this.isPermissionEnabled = conf.getBoolean(DFS_PERMISSIONS_ENABLED_KEY,
+                                                 DFS_PERMISSIONS_ENABLED_DEFAULT);
+      LOG.info("fsOwner             = " + fsOwner);
+      LOG.info("supergroup          = " + supergroup);
+      LOG.info("isPermissionEnabled = " + isPermissionEnabled);
+
+      final boolean persistBlocks = conf.getBoolean(DFS_PERSIST_BLOCKS_KEY,
+                                                    DFS_PERSIST_BLOCKS_DEFAULT);
+      // block allocation has to be persisted in HA using a shared edits directory
+      // so that the standby has up-to-date namespace information
+      String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
+      this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId);  
+      this.persistBlocks = persistBlocks || (haEnabled && HAUtil.usesSharedEditsDir(conf));
+      
+      // Sanity check the HA-related config.
+      if (nameserviceId != null) {
+        LOG.info("Determined nameservice ID: " + nameserviceId);
+      }
+      LOG.info("HA Enabled: " + haEnabled);
+      if (!haEnabled && HAUtil.usesSharedEditsDir(conf)) {
+        LOG.warn("Configured NNs:\n" + DFSUtil.nnAddressesAsString(conf));
+        throw new IOException("Invalid configuration: a shared edits dir " +
+            "must not be specified if HA is not enabled.");
+      }
+
+      this.serverDefaults = new FsServerDefaults(
+          conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
+          conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),
+          conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT),
+          (short) conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT),
+          conf.getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT));
+      
+      this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY, 
+                                       DFS_NAMENODE_MAX_OBJECTS_DEFAULT);
+
+      this.accessTimePrecision = conf.getLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0);
+      this.supportAppends = conf.getBoolean(DFS_SUPPORT_APPEND_KEY, DFS_SUPPORT_APPEND_DEFAULT);
+      LOG.info("Append Enabled: " + haEnabled);
+
+      this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
+      
+      this.standbyShouldCheckpoint = conf.getBoolean(
+          DFS_HA_STANDBY_CHECKPOINTS_KEY, DFS_HA_STANDBY_CHECKPOINTS_DEFAULT);
+      
+      // For testing purposes, allow the DT secret manager to be started regardless
+      // of whether security is enabled.
+      alwaysUseDelegationTokensForTests = conf.getBoolean(
+          DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,
+          DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT);
+
+      this.dtSecretManager = createDelegationTokenSecretManager(conf);
+      this.dir = new FSDirectory(fsImage, this, conf);
+      this.safeMode = new SafeModeInfo(conf);
+
     } catch(IOException e) {
       LOG.error(getClass().getSimpleName() + " initialization failed.", e);
       close();
@@ -400,24 +478,6 @@ public class FSNamesystem implements Nam
     }
   }
 
-  /**
-   * Initialize FSNamesystem.
-   */
-  private void initialize(Configuration conf, FSImage fsImage)
-      throws IOException {
-    resourceRecheckInterval = conf.getLong(
-        DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
-        DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);
-    this.systemStart = now();
-    this.blockManager = new BlockManager(this, this, conf);
-    this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
-    this.fsLock = new ReentrantReadWriteLock(true); // fair locking
-    setConfigurationParameters(conf);
-    dtSecretManager = createDelegationTokenSecretManager(conf);
-    this.dir = new FSDirectory(fsImage, this, conf);
-    this.safeMode = new SafeModeInfo(conf);
-  }
-
   void loadFSImage(StartupOption startOpt, FSImage fsImage, boolean haEnabled)
       throws IOException {
     // format before starting up if requested
@@ -601,13 +661,13 @@ public class FSNamesystem implements Nam
   }
   
   /** Start services required in standby state */
-  void startStandbyServices() {
+  void startStandbyServices(final Configuration conf) {
     LOG.info("Starting services required for standby state");
     if (!dir.fsImage.editLog.isOpenForRead()) {
       // During startup, we're already open for read.
       dir.fsImage.editLog.initSharedJournalsForRead();
     }
-    editLogTailer = new EditLogTailer(this);
+    editLogTailer = new EditLogTailer(this, conf);
     editLogTailer.start();
     if (standbyShouldCheckpoint) {
       standbyCheckpointer = new StandbyCheckpointer(conf, this);
@@ -768,10 +828,6 @@ public class FSNamesystem implements Nam
         DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
     return Util.stringCollectionAsURIs(dirNames);
   }
-  
-  public Configuration getConf() {
-    return conf;
-  }
 
   @Override
   public void readLock() {
@@ -806,69 +862,6 @@ public class FSNamesystem implements Nam
     return hasReadLock() || hasWriteLock();
   }
 
-
-  /**
-   * Initializes some of the members from configuration
-   */
-  private void setConfigurationParameters(Configuration conf) 
-                                          throws IOException {
-    fsOwner = UserGroupInformation.getCurrentUser();
-    
-    LOG.info("fsOwner=" + fsOwner);
-
-    this.supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY, 
-                               DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
-    this.isPermissionEnabled = conf.getBoolean(DFS_PERMISSIONS_ENABLED_KEY,
-                                               DFS_PERMISSIONS_ENABLED_DEFAULT);
-    LOG.info("supergroup=" + supergroup);
-    LOG.info("isPermissionEnabled=" + isPermissionEnabled);
-
-    this.persistBlocks = conf.getBoolean(DFS_PERSIST_BLOCKS_KEY,
-                                         DFS_PERSIST_BLOCKS_DEFAULT);
-    // block allocation has to be persisted in HA using a shared edits directory
-    // so that the standby has up-to-date namespace information
-    String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
-    this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId);  
-    this.persistBlocks |= haEnabled && HAUtil.usesSharedEditsDir(conf);
-    
-    // Sanity check the HA-related config.
-    if (nameserviceId != null) {
-      LOG.info("Determined nameservice ID: " + nameserviceId);
-    }
-    LOG.info("HA Enabled: " + haEnabled);
-    if (!haEnabled && HAUtil.usesSharedEditsDir(conf)) {
-      LOG.warn("Configured NNs:\n" + DFSUtil.nnAddressesAsString(conf));
-      throw new IOException("Invalid configuration: a shared edits dir " +
-          "must not be specified if HA is not enabled.");
-    }
-
-    this.serverDefaults = new FsServerDefaults(
-        conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
-        conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),
-        conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT),
-        (short) conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT),
-        conf.getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT));
-    
-    this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY, 
-                                     DFS_NAMENODE_MAX_OBJECTS_DEFAULT);
-
-    this.accessTimePrecision = conf.getLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0);
-    this.supportAppends = conf.getBoolean(DFS_SUPPORT_APPEND_KEY,
-        DFS_SUPPORT_APPEND_DEFAULT);
-
-    this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
-    
-    this.standbyShouldCheckpoint = conf.getBoolean(
-        DFS_HA_STANDBY_CHECKPOINTS_KEY,
-        DFS_HA_STANDBY_CHECKPOINTS_DEFAULT);
-    
-    // For testing purposes, allow the DT secret manager to be started regardless
-    // of whether security is enabled.
-    alwaysUseDelegationTokensForTests = 
-      conf.getBoolean(DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,
-          DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT);
-  }
-
   NamespaceInfo getNamespaceInfo() {
     readLock();
     try {
@@ -2761,7 +2754,7 @@ public class FSNamesystem implements Nam
   }
 
   private Lease reassignLease(Lease lease, String src, String newHolder,
-      INodeFileUnderConstruction pendingFile) throws IOException {
+      INodeFileUnderConstruction pendingFile) {
     assert hasWriteLock();
     if(newHolder == null)
       return lease;
@@ -3329,7 +3322,7 @@ public class FSNamesystem implements Nam
   }
 
   Date getStartTime() {
-    return new Date(systemStart); 
+    return new Date(startTime); 
   }
     
   void finalizeUpgrade() throws IOException {
@@ -3506,7 +3499,7 @@ public class FSNamesystem implements Nam
       if (!isPopulatingReplQueues() && !isInStandbyState()) {
         initializeReplQueues();
       }
-      long timeInSafemode = now() - systemStart;
+      long timeInSafemode = now() - startTime;
       NameNode.stateChangeLog.info("STATE* Leaving safe mode after " 
                                     + timeInSafemode/1000 + " secs.");
       NameNode.getNameNodeMetrics().setSafeModeTime((int) timeInSafemode);
@@ -4876,7 +4869,7 @@ public class FSNamesystem implements Nam
    * 
    * @param key new delegation key.
    */
-  public void logUpdateMasterKey(DelegationKey key) throws IOException {
+  public void logUpdateMasterKey(DelegationKey key) {
     
     assert !isInSafeMode() :
       "this should never be called while in safemode, since we stop " +
@@ -4889,7 +4882,7 @@ public class FSNamesystem implements Nam
   }
   
   private void logReassignLease(String leaseHolder, String src,
-      String newHolder) throws IOException {
+      String newHolder) {
     writeLock();
     try {
       getEditLog().logReassignLease(leaseHolder, src, newHolder);

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Thu Apr  5 20:16:15 2012
@@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
@@ -70,6 +71,9 @@ import org.apache.hadoop.tools.GetUserMa
 import org.apache.hadoop.util.ServicePlugin;
 import org.apache.hadoop.util.StringUtils;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Lists;
+
 /**********************************************************
  * NameNode serves as both directory namespace manager and
  * "inode table" for the Hadoop DFS.  There is a single NameNode
@@ -729,6 +733,67 @@ public class NameNode {
                 + "to true in order to format this filesystem");
     }
   }
+  
+  @VisibleForTesting
+  public static boolean initializeSharedEdits(Configuration conf) {
+    return initializeSharedEdits(conf, true);
+  }
+  
+  @VisibleForTesting
+  public static boolean initializeSharedEdits(Configuration conf,
+      boolean force) {
+    return initializeSharedEdits(conf, force, false);
+  }
+  
+  /**
+   * Format a new shared edits dir.
+   * 
+   * @param conf configuration
+   * @param force format regardless of whether or not the shared edits dir exists
+   * @param interactive prompt the user when a dir exists
+   * @return true if the command aborts, false otherwise
+   */
+  private static boolean initializeSharedEdits(Configuration conf,
+      boolean force, boolean interactive) {
+    NNStorage existingStorage = null;
+    try {
+      FSNamesystem fsns = FSNamesystem.loadFromDisk(conf,
+          FSNamesystem.getNamespaceDirs(conf),
+          FSNamesystem.getNamespaceEditsDirs(conf, false));
+      
+      existingStorage = fsns.getFSImage().getStorage();
+      
+      Collection<URI> sharedEditsDirs = FSNamesystem.getSharedEditsDirs(conf);
+      if (!confirmFormat(sharedEditsDirs, force, interactive)) {
+        return true; // aborted
+      }
+      NNStorage newSharedStorage = new NNStorage(conf,
+          Lists.<URI>newArrayList(),
+          sharedEditsDirs);
+      
+      newSharedStorage.format(new NamespaceInfo(
+          existingStorage.getNamespaceID(),
+          existingStorage.getClusterID(),
+          existingStorage.getBlockPoolID(),
+          existingStorage.getCTime(),
+          existingStorage.getDistributedUpgradeVersion()));
+    } catch (Exception e) {
+      LOG.error("Could not format shared edits dir", e);
+      return true; // aborted
+    } finally {
+      // Have to unlock storage explicitly for the case when we're running in a
+      // unit test, which runs in the same JVM as NNs.
+      if (existingStorage != null) {
+        try {
+          existingStorage.unlockAll();
+        } catch (IOException ioe) {
+          LOG.warn("Could not unlock storage directories", ioe);
+          return true; // aborted
+        }
+      }
+    }
+    return false; // did not abort
+  }
 
   private static boolean finalize(Configuration conf,
                                boolean isConfirmationNeeded
@@ -763,7 +828,8 @@ public class NameNode {
       StartupOption.ROLLBACK.getName() + "] | [" +
       StartupOption.FINALIZE.getName() + "] | [" +
       StartupOption.IMPORT.getName() + "] | [" +
-      StartupOption.BOOTSTRAPSTANDBY.getName() + "]");
+      StartupOption.BOOTSTRAPSTANDBY.getName() + "] | [" +
+      StartupOption.INITIALIZESHAREDEDITS.getName() + "]");
   }
 
   private static StartupOption parseArguments(String args[]) {
@@ -804,6 +870,9 @@ public class NameNode {
       } else if (StartupOption.BOOTSTRAPSTANDBY.getName().equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.BOOTSTRAPSTANDBY;
         return startOpt;
+      } else if (StartupOption.INITIALIZESHAREDEDITS.getName().equalsIgnoreCase(cmd)) {
+        startOpt = StartupOption.INITIALIZESHAREDEDITS;
+        return startOpt;
       } else {
         return null;
       }
@@ -868,29 +937,39 @@ public class NameNode {
     }
 
     switch (startOpt) {
-      case FORMAT:
+      case FORMAT: {
         boolean aborted = format(conf, false);
         System.exit(aborted ? 1 : 0);
         return null; // avoid javac warning
-      case GENCLUSTERID:
+      }
+      case GENCLUSTERID: {
         System.err.println("Generating new cluster id:");
         System.out.println(NNStorage.newClusterID());
         System.exit(0);
         return null;
-      case FINALIZE:
-        aborted = finalize(conf, true);
+      }
+      case FINALIZE: {
+        boolean aborted = finalize(conf, true);
         System.exit(aborted ? 1 : 0);
         return null; // avoid javac warning
-      case BOOTSTRAPSTANDBY:
+      }
+      case BOOTSTRAPSTANDBY: {
         String toolArgs[] = Arrays.copyOfRange(argv, 1, argv.length);
         int rc = BootstrapStandby.run(toolArgs, conf);
         System.exit(rc);
         return null; // avoid warning
+      }
+      case INITIALIZESHAREDEDITS: {
+        boolean aborted = initializeSharedEdits(conf, false, true);
+        System.exit(aborted ? 1 : 0);
+        return null; // avoid warning
+      }
       case BACKUP:
-      case CHECKPOINT:
+      case CHECKPOINT: {
         NamenodeRole role = startOpt.toNodeRole();
         DefaultMetricsSystem.initialize(role.toString().replace(" ", ""));
         return new BackupNode(conf, role);
+      }
       default:
         DefaultMetricsSystem.initialize("NameNode");
         return new NameNode(conf);
@@ -1061,7 +1140,7 @@ public class NameNode {
 
     @Override
     public void startStandbyServices() throws IOException {
-      namesystem.startStandbyServices();
+      namesystem.startStandbyServices(conf);
     }
 
     @Override

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java Thu Apr  5 20:16:15 2012
@@ -61,6 +61,7 @@ public class EditLogTailer {
   
   private final EditLogTailerThread tailerThread;
   
+  private final Configuration conf;
   private final FSNamesystem namesystem;
   private FSEditLog editLog;
   
@@ -98,13 +99,12 @@ public class EditLogTailer {
    */
   private long sleepTimeMs;
   
-  public EditLogTailer(FSNamesystem namesystem) {
+  public EditLogTailer(FSNamesystem namesystem, Configuration conf) {
     this.tailerThread = new EditLogTailerThread();
+    this.conf = conf;
     this.namesystem = namesystem;
     this.editLog = namesystem.getEditLog();
     
-
-    Configuration conf = namesystem.getConf();
     lastLoadTimestamp = now();
 
     logRollPeriodMs = conf.getInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY,
@@ -129,14 +129,12 @@ public class EditLogTailer {
   }
   
   private InetSocketAddress getActiveNodeAddress() {
-    Configuration conf = namesystem.getConf();
     Configuration activeConf = HAUtil.getConfForOtherNode(conf);
     return NameNode.getServiceAddress(activeConf, true);
   }
   
   private NamenodeProtocol getActiveNodeProxy() throws IOException {
     if (cachedActiveProxy == null) {
-      Configuration conf = namesystem.getConf();
       NamenodeProtocolPB proxy = 
         RPC.waitForProxy(NamenodeProtocolPB.class,
             RPC.getProtocolVersion(NamenodeProtocolPB.class), activeAddr, conf);

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java Thu Apr  5 20:16:15 2012
@@ -50,14 +50,19 @@ public class NamespaceInfo extends Stora
     super();
     buildVersion = null;
   }
-  
-  public NamespaceInfo(int nsID, String clusterID, String bpID, 
-      long cT, int duVersion) {
+
+  public NamespaceInfo(int nsID, String clusterID, String bpID,
+      long cT, int duVersion, String buildVersion) {
     super(HdfsConstants.LAYOUT_VERSION, nsID, clusterID, cT);
     blockPoolID = bpID;
-    buildVersion = Storage.getBuildVersion();
+    this.buildVersion = buildVersion;
     this.distributedUpgradeVersion = duVersion;
   }
+
+  public NamespaceInfo(int nsID, String clusterID, String bpID, 
+      long cT, int duVersion) {
+    this(nsID, clusterID, bpID, cT, duVersion, Storage.getBuildVersion());
+  }
   
   public String getBuildVersion() {
     return buildVersion;

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Thu Apr  5 20:16:15 2012
@@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.security.t
 import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
 import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
 import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
+import org.apache.hadoop.hdfs.web.URLUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
@@ -224,8 +225,7 @@ public class DelegationTokenFetcher {
       
       URL remoteURL = new URL(url.toString());
       SecurityUtil.fetchServiceTicket(remoteURL);
-      URLConnection connection = remoteURL.openConnection();
-
+      URLConnection connection = URLUtils.openConnection(remoteURL);
       InputStream in = connection.getInputStream();
       Credentials ts = new Credentials();
       dis = new DataInputStream(in);
@@ -265,7 +265,7 @@ public class DelegationTokenFetcher {
     try {
       URL url = new URL(buf.toString());
       SecurityUtil.fetchServiceTicket(url);
-      connection = (HttpURLConnection) url.openConnection();
+      connection = (HttpURLConnection)URLUtils.openConnection(url);
       if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
         throw new IOException("Error renewing token: " + 
             connection.getResponseMessage());
@@ -359,7 +359,7 @@ public class DelegationTokenFetcher {
     try {
       URL url = new URL(buf.toString());
       SecurityUtil.fetchServiceTicket(url);
-      connection = (HttpURLConnection) url.openConnection();
+      connection = (HttpURLConnection)URLUtils.openConnection(url);
       if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
         throw new IOException("Error cancelling token: " + 
             connection.getResponseMessage());

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java Thu Apr  5 20:16:15 2012
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.tools;
 
 import java.net.InetSocketAddress;
+import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.ha.BadFencingConfigurationException;
@@ -28,6 +29,8 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.net.NetUtils;
 
+import com.google.common.base.Preconditions;
+
 /**
  * One of the NN NameNodes acting as the target of an administrative command
  * (e.g. failover).
@@ -35,14 +38,36 @@ import org.apache.hadoop.net.NetUtils;
 @InterfaceAudience.Private
 public class NNHAServiceTarget extends HAServiceTarget {
 
+  // Keys added to the fencing script environment
+  private static final String NAMESERVICE_ID_KEY = "nameserviceid";
+  private static final String NAMENODE_ID_KEY = "namenodeid";
+  
   private final InetSocketAddress addr;
   private NodeFencer fencer;
   private BadFencingConfigurationException fenceConfigError;
+  private final String nnId;
+  private final String nsId;
 
   public NNHAServiceTarget(HdfsConfiguration conf,
       String nsId, String nnId) {
+    Preconditions.checkNotNull(nnId);
+    
+    if (nsId == null) {
+      nsId = DFSUtil.getOnlyNameServiceIdOrNull(conf);
+      if (nsId == null) {
+        throw new IllegalArgumentException(
+            "Unable to determine the nameservice id.");
+      }
+    }
+    assert nsId != null;
+    
+    // Make a copy of the conf, and override configs based on the
+    // target node -- not the node we happen to be running on.
+    HdfsConfiguration targetConf = new HdfsConfiguration(conf);
+    NameNode.initializeGenericKeys(targetConf, nsId, nnId);
+    
     String serviceAddr = 
-      DFSUtil.getNamenodeServiceAddr(conf, nsId, nnId);
+      DFSUtil.getNamenodeServiceAddr(targetConf, nsId, nnId);
     if (serviceAddr == null) {
       throw new IllegalArgumentException(
           "Unable to determine service address for namenode '" + nnId + "'");
@@ -50,10 +75,12 @@ public class NNHAServiceTarget extends H
     this.addr = NetUtils.createSocketAddr(serviceAddr,
         NameNode.DEFAULT_PORT);
     try {
-      this.fencer = NodeFencer.create(conf);
+      this.fencer = NodeFencer.create(targetConf);
     } catch (BadFencingConfigurationException e) {
       this.fenceConfigError = e;
     }
+    this.nnId = nnId;
+    this.nsId = nsId;
   }
 
   /**
@@ -81,4 +108,19 @@ public class NNHAServiceTarget extends H
     return "NameNode at " + addr;
   }
 
+  public String getNameServiceId() {
+    return this.nsId;
+  }
+  
+  public String getNameNodeId() {
+    return this.nnId;
+  }
+
+  @Override
+  protected void addFencingParameters(Map<String, String> ret) {
+    super.addFencingParameters(ret);
+    
+    ret.put(NAMESERVICE_ID_KEY, getNameServiceId());
+    ret.put(NAMENODE_ID_KEY, getNameNodeId());
+  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java Thu Apr  5 20:16:15 2012
@@ -17,104 +17,51 @@
  */
 package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
 
-import java.io.FileOutputStream;
-import java.io.DataOutputStream;
+import java.io.File;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 
 /**
  * BinaryEditsVisitor implements a binary EditsVisitor
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-public class BinaryEditsVisitor extends EditsVisitor {
-  final private DataOutputStream out;
+public class BinaryEditsVisitor implements OfflineEditsVisitor {
+  final private EditLogFileOutputStream elfos;
 
   /**
-   * Create a processor that writes to a given file and
-   * reads using a given Tokenizer
+   * Create a processor that writes to a given file
    *
    * @param filename Name of file to write output to
-   * @param tokenizer Input tokenizer
    */
-  public BinaryEditsVisitor(String filename, Tokenizer tokenizer)
-    throws IOException {
-
-    this(filename, tokenizer, false);
-  }
-
-  /**
-   * Create a processor that writes to a given file and reads using
-   * a given Tokenizer, may also print to screen
-   *
-   * @param filename Name of file to write output to
-   * @param tokenizer Input tokenizer
-   * @param printToScreen Mirror output to screen? (ignored for binary)
-   */
-  public BinaryEditsVisitor(String filename,
-    Tokenizer tokenizer,
-    boolean printToScreen) throws IOException {
-
-    super(tokenizer);
-    out = new DataOutputStream(new FileOutputStream(filename));
+  public BinaryEditsVisitor(String outputName) throws IOException {
+    this.elfos = new EditLogFileOutputStream(new File(outputName), 0);
+    elfos.create();
   }
 
   /**
    * Start the visitor (initialization)
    */
   @Override
-  void start() throws IOException {
-    // nothing to do for binary format
+  public void start(int version) throws IOException {
   }
 
   /**
    * Finish the visitor
    */
   @Override
-  void finish() throws IOException {
-    close();
-  }
-
-  /**
-   * Finish the visitor and indicate an error
-   */
-  @Override
-  void finishAbnormally() throws IOException {
-    System.err.println("Error processing EditLog file.  Exiting.");
-    close();
-  }
-
-  /**
-   * Close output stream and prevent further writing
-   */
-  private void close() throws IOException {
-    out.close();
-  }
-
-  /**
-   * Visit a enclosing element (element that has other elements in it)
-   */
-  @Override
-  void visitEnclosingElement(Tokenizer.Token value) throws IOException {
-    // nothing to do for binary format
+  public void close(Throwable error) throws IOException {
+    elfos.setReadyToFlush();
+    elfos.flushAndSync();
+    elfos.close();
   }
 
-  /**
-   * End of eclosing element
-   */
-  @Override
-  void leaveEnclosingElement() throws IOException {
-    // nothing to do for binary format
-  }  
-
-  /**
-   * Visit a Token
-   */
   @Override
-  Tokenizer.Token visit(Tokenizer.Token value) throws IOException {
-    value.toBinary(out);
-    return value;
+  public void visitOp(FSEditLogOp op) throws IOException {
+    elfos.write(op);
   }
-}
+}
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java Thu Apr  5 20:16:15 2012
@@ -18,12 +18,16 @@
 package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
 
 import java.io.EOFException;
+import java.io.File;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
+import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
+import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsLoader.OfflineEditsLoaderFactory;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -33,6 +37,7 @@ import org.apache.commons.cli.OptionBuil
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.PosixParser;
+import org.xml.sax.SAXParseException;
 
 /**
  * This class implements an offline edits viewer, tool that
@@ -42,29 +47,9 @@ import org.apache.commons.cli.PosixParse
 @InterfaceStability.Unstable
 public class OfflineEditsViewer extends Configured implements Tool {
 
-  private EditsLoader  editsLoader;
   private final static String defaultProcessor = "xml";
 
   /**
-   * Set editsLoader
-   *
-   * @param editsLoader EditsLoader
-   */
-  private void setEditsLoader(EditsLoader editsLoader) {
-    this.editsLoader = editsLoader;
-  }
-
-  /**
-   * Process EditLog file.
-   *
-   * @param visitor use this visitor to process the file
-   */
-  public void go(EditsVisitor visitor) throws IOException  {
-    setEditsLoader(EditsLoader.LoaderFactory.getLoader(visitor));
-    editsLoader.loadEdits();
-  }
-
-  /**
    * Print help.
    */  
   private void printHelp() {
@@ -90,6 +75,9 @@ public class OfflineEditsViewer extends 
       "                       format), stats (prints statistics about\n" +
       "                       edits file)\n" +
       "-h,--help              Display usage information and exit\n" +
+      "-f,--fix-txids         Renumber the transaction IDs in the input,\n" +
+      "                       so that there are no gaps or invalid " +
+      "                       transaction IDs.\n" +
       "-v,--verbose           More verbose output, prints the input and\n" +
       "                       output filenames, for processors that write\n" +
       "                       to a file, also output to screen. On large\n" +
@@ -124,11 +112,48 @@ public class OfflineEditsViewer extends 
     
     options.addOption("p", "processor", true, "");
     options.addOption("v", "verbose", false, "");
+    options.addOption("f", "fix-txids", false, "");
     options.addOption("h", "help", false, "");
 
     return options;
   }
 
+  /** Process an edit log using the chosen processor or visitor.
+   * 
+   * @param inputFilename   The file to process
+   * @param outputFilename  The output file name
+   * @param processor       If visitor is null, the processor to use
+   * @param visitor         If non-null, the visitor to use.
+   * 
+   * @return                0 on success; error code otherwise
+   */
+  public int go(String inputFileName, String outputFileName, String processor,
+      boolean printToScreen, boolean fixTxIds, OfflineEditsVisitor visitor)
+  {
+    if (printToScreen) {
+      System.out.println("input  [" + inputFileName  + "]");
+      System.out.println("output [" + outputFileName + "]");
+    }
+    try {
+      if (visitor == null) {
+        visitor = OfflineEditsVisitorFactory.getEditsVisitor(
+            outputFileName, processor, printToScreen);
+      }
+      boolean xmlInput = inputFileName.endsWith(".xml");
+      OfflineEditsLoader loader = OfflineEditsLoaderFactory.
+          createLoader(visitor, inputFileName, xmlInput);
+      if (fixTxIds) {
+        loader.setFixTxIds();
+      }
+      loader.loadEdits();
+    } catch(Exception e) {
+      System.err.println("Encountered exception. Exiting: " + e.getMessage());
+      e.printStackTrace(System.err);
+      return -1;
+    }
+    return 0;
+  }
+
   /**
    * Main entry point for ToolRunner (see ToolRunner docs)
    *
@@ -137,17 +162,13 @@ public class OfflineEditsViewer extends 
    */
   @Override
   public int run(String[] argv) throws Exception {
-    int exitCode = 0;
-
     Options options = buildOptions();
     if(argv.length == 0) {
       printHelp();
       return -1;
     }
-
     CommandLineParser parser = new PosixParser();
     CommandLine cmd;
-
     try {
       cmd = parser.parse(options, argv);
     } catch (ParseException e) {
@@ -156,37 +177,20 @@ public class OfflineEditsViewer extends 
       printHelp();
       return -1;
     }
-
     if(cmd.hasOption("h")) { // print help and exit
       printHelp();
       return -1;
     }
-
-    boolean printToScreen    = false;
-    String inputFilenameArg  = cmd.getOptionValue("i");
-    String outputFilenameArg = cmd.getOptionValue("o");
-    String processor         = cmd.getOptionValue("p");
-    if(processor == null) { processor = defaultProcessor; }
-
-    if(cmd.hasOption("v")) { // print output to screen too
-      printToScreen = true;
-      System.out.println("input  [" + inputFilenameArg  + "]");
-      System.out.println("output [" + outputFilenameArg + "]");
+    String inputFileName = cmd.getOptionValue("i");
+    String outputFileName = cmd.getOptionValue("o");
+    String processor = cmd.getOptionValue("p");
+    if(processor == null) {
+      processor = defaultProcessor;
     }
-
-    try {
-      go(EditsVisitorFactory.getEditsVisitor(
-        outputFilenameArg,
-        processor,
-        TokenizerFactory.getTokenizer(inputFilenameArg),
-        printToScreen));
-    } catch (EOFException e) {
-      System.err.println("Input file ended unexpectedly. Exiting");
-    } catch(IOException e) {
-      System.err.println("Encountered exception. Exiting: " + e.getMessage());
-    }
-
-    return exitCode;
+    boolean printToScreen = cmd.hasOption("v");
+    boolean fixTxIds = cmd.hasOption("f");
+    return go(inputFileName, outputFileName, processor,
+        printToScreen, fixTxIds, null);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java Thu Apr  5 20:16:15 2012
@@ -19,12 +19,15 @@ package org.apache.hadoop.hdfs.tools.off
 
 import java.io.FileWriter;
 import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
 import java.util.Map;
 import java.util.HashMap;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
 
 /**
@@ -34,26 +37,14 @@ import org.apache.hadoop.hdfs.server.nam
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-public class StatisticsEditsVisitor extends EditsVisitor {
-  private boolean printToScreen = false;
-  private boolean okToWrite = false;
-  final private FileWriter fw;
+public class StatisticsEditsVisitor implements OfflineEditsVisitor {
+  final private PrintStream out;
 
-  public final Map<FSEditLogOpCodes, Long> opCodeCount =
+  private int version = -1;
+  private final Map<FSEditLogOpCodes, Long> opCodeCount =
     new HashMap<FSEditLogOpCodes, Long>();
 
   /**
-   * Create a processor that writes to the file named.
-   *
-   * @param filename Name of file to write output to
-   */
-  public StatisticsEditsVisitor(String filename, Tokenizer tokenizer)
-    throws IOException {
-
-    this(filename, tokenizer, false);
-  }
-
-  /**
    * Create a processor that writes to the file named and may or may not
    * also output to the screen, as specified.
    *
@@ -61,103 +52,29 @@ public class StatisticsEditsVisitor exte
    * @param tokenizer Input tokenizer
    * @param printToScreen Mirror output to screen?
    */
-  public StatisticsEditsVisitor(String filename,
-    Tokenizer tokenizer,
-    boolean printToScreen) throws IOException {
-
-    super(tokenizer);
-    this.printToScreen = printToScreen;
-    fw = new FileWriter(filename);
-    okToWrite = true;
+  public StatisticsEditsVisitor(OutputStream out) throws IOException {
+    this.out = new PrintStream(out);
   }
 
-  /**
-   * Start the visitor (initialization)
-   */
+  /** Start the visitor */
   @Override
-  void start() throws IOException {
-    // nothing to do
+  public void start(int version) throws IOException {
+    this.version = version;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.hadoop.hdfs.tools.offlineEditsViewer.EditsVisitor#finish()
-   */
-  @Override
-  void finish() throws IOException {
-    write(getStatisticsString());
-    close();
-  }
-
-  /* (non-Javadoc)
-   * @see org.apache.hadoop.hdfs.tools.offlineEditsViewer.EditsVisitor#finishAbnormally()
-   */
-  @Override
-  void finishAbnormally() throws IOException {
-    close();
-  }
-
-  /**
-   * Close output stream and prevent further writing
-   */
-  private void close() throws IOException {
-    fw.close();
-    okToWrite = false;
-  }
-
-  /**
-   * Visit a enclosing element (element that has other elements in it)
-   */
-  @Override
-  void visitEnclosingElement(Tokenizer.Token value) throws IOException {
-    // nothing to do
-  }
-
-  /**
-   * End of eclosing element
-   */
+  /** Close the visitor */
   @Override
-  void leaveEnclosingElement() throws IOException {
-    // nothing to do
-  }  
-
-  /**
-   * Visit a Token, calculate statistics
-   *
-   * @param value a Token to visit
-   */
-  @Override
-  Tokenizer.Token visit(Tokenizer.Token value) throws IOException {
-    // count the opCodes
-    if(value.getEditsElement() == EditsElement.OPCODE) {
-      if(value instanceof Tokenizer.ByteToken) {
-        incrementOpCodeCount(
-          FSEditLogOpCodes.fromByte(((Tokenizer.ByteToken)value).value));
-      } else {
-        throw new IOException("Token for EditsElement.OPCODE should be " +
-          "of type Tokenizer.ByteToken, not " + value.getClass());
-      }
+  public void close(Throwable error) throws IOException {
+    out.print(getStatisticsString());
+    if (error != null) {
+      out.print("EXITING ON ERROR: " + error.toString() + "\n");
     }
-    return value;
+    out.close();
   }
 
-  /**
-   * Write parameter to output file (and possibly screen).
-   *
-   * @param toWrite Text to write to file
-   */
-  protected void write(String toWrite) throws IOException  {
-    if(!okToWrite)
-      throw new IOException("file not open for writing.");
-
-    if(printToScreen)
-      System.out.print(toWrite);
-
-    try {
-      fw.write(toWrite);
-    } catch (IOException e) {
-      okToWrite = false;
-      throw e;
-    }
+  @Override
+  public void visitOp(FSEditLogOp op) throws IOException {
+    incrementOpCodeCount(op.opCode);
   }
 
   /**
@@ -189,13 +106,16 @@ public class StatisticsEditsVisitor exte
    */
   public String getStatisticsString() {
     StringBuffer sb = new StringBuffer();
+    sb.append(String.format(
+        "    %-30.30s      : %d%n",
+        "VERSION", version));
     for(FSEditLogOpCodes opCode : FSEditLogOpCodes.values()) {
       sb.append(String.format(
         "    %-30.30s (%3d): %d%n",
-        opCode,
+        opCode.toString(),
         opCode.getOpCode(),
         opCodeCount.get(opCode)));
     }
     return sb.toString();
   }
-}
+}
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java Thu Apr  5 20:16:15 2012
@@ -18,12 +18,19 @@
 package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
 
 import java.io.IOException;
-import java.util.LinkedList;
+import java.io.OutputStream;
 
-import org.apache.hadoop.hdfs.tools.offlineImageViewer.DepthCounter;
+import org.apache.hadoop.hdfs.util.XMLUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.xml.sax.ContentHandler;
+import org.xml.sax.SAXException;
+import org.xml.sax.helpers.AttributesImpl;
+
+import com.sun.org.apache.xml.internal.serialize.OutputFormat;
+import com.sun.org.apache.xml.internal.serialize.XMLSerializer;
 
 /**
  * An XmlEditsVisitor walks over an EditLog structure and writes out
@@ -31,140 +38,85 @@ import org.apache.hadoop.classification.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-public class XmlEditsVisitor extends TextEditsVisitor {
-  final private LinkedList<EditsElement> tagQ =
-    new LinkedList<EditsElement>();
-
-  final private DepthCounter depthCounter = new DepthCounter();
+public class XmlEditsVisitor implements OfflineEditsVisitor {
+  private OutputStream out;
+  private ContentHandler contentHandler;
 
   /**
    * Create a processor that writes to the file named and may or may not
    * also output to the screen, as specified.
    *
    * @param filename Name of file to write output to
-   * @param tokenizer Input tokenizer
+   * @param printToScreen Mirror output to screen?
    */
-  public XmlEditsVisitor(String filename, Tokenizer tokenizer)
-    throws IOException {
-
-    super(filename, tokenizer, false);
-  }
-
-  /**
-   * Create a processor that writes to the file named and may or may not
-   * also output to the screen, as specified.
-   *
-   * @param filename Name of file to write output to
-   * @param tokenizer Input tokenizer
-   * @param printToScreen Mirror output to screen? (ignored for binary)
-   */
-  public XmlEditsVisitor(String filename,
-    Tokenizer tokenizer,
-    boolean printToScreen) throws IOException {
-
-    super(filename, tokenizer, printToScreen);
+  public XmlEditsVisitor(OutputStream out)
+      throws IOException {
+    this.out = out;
+    OutputFormat outFormat = new OutputFormat("XML", "UTF-8", true);
+    outFormat.setIndenting(true);
+    outFormat.setIndent(2);
+    outFormat.setDoctype(null, null);
+    XMLSerializer serializer = new XMLSerializer(out, outFormat);
+    contentHandler = serializer.asContentHandler();
+    try {
+      contentHandler.startDocument();
+      contentHandler.startElement("", "", "EDITS", new AttributesImpl());
+    } catch (SAXException e) {
+      throw new IOException("SAX error: " + e.getMessage());
+    }
   }
 
   /**
    * Start visitor (initialization)
    */
   @Override
-  void start() throws IOException {
-    write("<?xml version=\"1.0\"?>\n");
-  }
-
-  /**
-   * Finish visitor
-   */
-  @Override
-  void finish() throws IOException {
-    super.finish();
-  }
-
-  /**
-   * Finish with error
-   */
-  @Override
-  void finishAbnormally() throws IOException {
-    write("\n<!-- Error processing EditLog file.  Exiting -->\n");
-    super.finishAbnormally();
-  }
-
-  /**
-   * Visit a Token
-   *
-   * @param value a Token to visit
-   */
-  @Override
-  Tokenizer.Token visit(Tokenizer.Token value) throws IOException {
-    writeTag(value.getEditsElement().toString(), value.toString());
-    return value;
+  public void start(int version) throws IOException {
+    try {
+      contentHandler.startElement("", "", "EDITS_VERSION", new AttributesImpl());
+      StringBuilder bld = new StringBuilder();
+      bld.append(version);
+      addString(bld.toString());
+      contentHandler.endElement("", "", "EDITS_VERSION");
+    }
+    catch (SAXException e) {
+      throw new IOException("SAX error: " + e.getMessage());
+    }
   }
 
-  /**
-   * Visit an enclosing element (element that cntains other elements)
-   *
-   * @param value a Token to visit
-   */
-  @Override
-  void visitEnclosingElement(Tokenizer.Token value) throws IOException {
-    printIndents();
-    write("<" + value.getEditsElement().toString() + ">\n");
-    tagQ.push(value.getEditsElement());
-    depthCounter.incLevel();
+  public void addString(String str) throws SAXException {
+    int slen = str.length();
+    char arr[] = new char[slen];
+    str.getChars(0, slen, arr, 0);
+    contentHandler.characters(arr, 0, slen);
   }
-
+  
   /**
-   * Leave enclosing element
+   * Finish visitor
    */
   @Override
-  void leaveEnclosingElement() throws IOException {
-    depthCounter.decLevel();
-    if(tagQ.size() == 0)
-      throw new IOException("Tried to exit non-existent enclosing element " +
-                "in EditLog file");
-
-    EditsElement element = tagQ.pop();
-    printIndents();
-    write("</" + element.toString() + ">\n");
-  }
-
-  /**
-   * Write an XML tag
-   *
-   * @param tag a tag name
-   * @param value a tag value
-   */
-  private void writeTag(String tag, String value) throws IOException {
-    printIndents();
-    if(value.length() > 0) {
-      write("<" + tag + ">" + value + "</" + tag + ">\n");
-    } else {
-      write("<" + tag + "/>\n");
+  public void close(Throwable error) throws IOException {
+    try {
+      contentHandler.endElement("", "", "EDITS");
+      if (error != null) {
+        String msg = error.getMessage();
+        XMLUtils.addSaxString(contentHandler, "ERROR",
+            (msg == null) ? "null" : msg);
+      }
+      contentHandler.endDocument();
     }
+    catch (SAXException e) {
+      throw new IOException("SAX error: " + e.getMessage());
+    }
+    out.close();
   }
 
-  // prepared values that printIndents is likely to use
-  final private static String [] indents = {
-     "",
-     "  ",
-     "    ",
-     "      ",
-     "        ",
-     "          ",
-     "            " };
-
-  /**
-   * Prints the leading spaces based on depth level
-   */
-  private void printIndents() throws IOException {
+  @Override
+  public void visitOp(FSEditLogOp op) throws IOException {
     try {
-      write(indents[depthCounter.getLevel()]);
-    } catch (IndexOutOfBoundsException e) {
-      // unlikely needed so can be slow
-      for(int i = 0; i < depthCounter.getLevel(); i++)
-        write("  ");
+      op.outputToXml(contentHandler);
+    }
+    catch (SAXException e) {
+      throw new IOException("SAX error: " + e.getMessage());
     }
-   
   }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Thu Apr  5 20:16:15 2012
@@ -513,18 +513,13 @@ public class JsonUtil {
     final byte[] bytes = StringUtils.hexStringToByte((String)m.get("bytes"));
 
     final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes));
-    final int bytesPerCRC = in.readInt();
-    final long crcPerBlock = in.readLong();
-    final MD5Hash md5 = MD5Hash.read(in);
-    final MD5MD5CRC32FileChecksum checksum = new MD5MD5CRC32FileChecksum(
-        bytesPerCRC, crcPerBlock, md5);
+    final MD5MD5CRC32FileChecksum checksum = new MD5MD5CRC32FileChecksum();
+    checksum.readFields(in);
 
     //check algorithm name
-    final String alg = "MD5-of-" + crcPerBlock + "MD5-of-" + bytesPerCRC + "CRC32";
-    if (!alg.equals(algorithm)) {
-      throw new IOException("Algorithm not matched: algorithm=" + algorithm
-          + ", crcPerBlock=" + crcPerBlock
-          + ", bytesPerCRC=" + bytesPerCRC);
+    if (!checksum.getAlgorithmName().equals(algorithm)) {
+      throw new IOException("Algorithm not matched. Expected " + algorithm
+          + ", Received " + checksum.getAlgorithmName());
     }
     //check length
     if (length != checksum.getLength()) {
@@ -534,4 +529,4 @@ public class JsonUtil {
 
     return checksum;
   }
-}
\ No newline at end of file
+}

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1308633-1310044

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1308633-1310044

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1308633-1310044

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1308633-1310044

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1308633-1310044

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Thu Apr  5 20:16:15 2012
@@ -731,7 +731,9 @@ public class MiniDFSCluster {
       Preconditions.checkArgument(!dstDir.equals(srcDir));
       File dstDirF = new File(dstDir);
       if (dstDirF.exists()) {
-        Files.deleteRecursively(dstDirF);
+        if (!FileUtil.fullyDelete(dstDirF)) {
+          throw new IOException("Unable to delete: " + dstDirF);
+        }
       }
       LOG.info("Copying namedir from primary node dir "
           + srcDir + " to " + dstDir);

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Thu Apr  5 20:16:15 2012
@@ -179,6 +179,17 @@ public class TestDFSUtil {
     assertEquals("nn1", it.next().toString());
     assertEquals("nn2", it.next().toString());
   }
+  
+  @Test
+  public void testGetOnlyNameServiceIdOrNull() {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2");
+    assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf));
+    conf.set(DFS_FEDERATION_NAMESERVICES, "");
+    assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf));
+    conf.set(DFS_FEDERATION_NAMESERVICES, "ns1");
+    assertEquals("ns1", DFSUtil.getOnlyNameServiceIdOrNull(conf));
+  }
 
   /**
    * Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java Thu Apr  5 20:16:15 2012
@@ -23,8 +23,8 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Set;
 
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.Daemon;
@@ -143,8 +143,7 @@ public class BlockManagerTestUtil {
    * {@link DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY} to
    * a high value to ensure that all work is calculated.
    */
-  public static int computeAllPendingWork(BlockManager bm)
-    throws IOException {
+  public static int computeAllPendingWork(BlockManager bm) {
     int work = computeInvalidationWork(bm);
     work += bm.computeReplicationWork(Integer.MAX_VALUE);
     return work;

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Thu Apr  5 20:16:15 2012
@@ -37,6 +37,7 @@ import java.util.Set;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -181,7 +182,9 @@ public abstract class FSImageTestUtil {
   public static FSEditLog createStandaloneEditLog(File logDir)
       throws IOException {
     assertTrue(logDir.mkdirs() || logDir.exists());
-    Files.deleteDirectoryContents(logDir);
+    if (!FileUtil.fullyDeleteContents(logDir)) {
+      throw new IOException("Unable to delete contents of " + logDir);
+    }
     NNStorage storage = Mockito.mock(NNStorage.class);
     StorageDirectory sd 
       = FSImageTestUtil.mockStorageDirectory(logDir, NameNodeDirType.EDITS);

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Thu Apr  5 20:16:15 2012
@@ -179,12 +179,22 @@ public class TestBackupNode {
       
       // do some edits
       assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down")));
-      
+  
       // start a new backup node
       backup = startBackupNode(conf, StartupOption.BACKUP, 1);
 
       testBNInSync(cluster, backup, 4);
       assertNotNull(backup.getNamesystem().getFileInfo("/edit-while-bn-down", false));
+      
+      // Trigger an unclean shutdown of the backup node. Backup node will not
+      // unregister from the active when this is done simulating a node crash.
+      backup.stop(false);
+           
+      // do some edits on the active. This should go through without failing.
+      // This will verify that active is still up and can add entries to
+      // master editlog.
+      assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down-2")));
+      
     } finally {
       LOG.info("Shutting down...");
       if (backup != null) backup.stop();

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java Thu Apr  5 20:16:15 2012
@@ -28,6 +28,8 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.test.GenericTestUtils;
 
 /**
  * This class tests the validation of the configuration object when passed 
@@ -72,6 +74,7 @@ public class TestValidateConfigurationSe
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:9000");
     DFSTestUtil.formatNameNode(conf);
     NameNode nameNode = new NameNode(conf); // should be OK!
+    nameNode.stop();
   }
 
   /**
@@ -82,16 +85,30 @@ public class TestValidateConfigurationSe
   public void testGenericKeysForNameNodeFormat()
       throws IOException {
     Configuration conf = new HdfsConfiguration();
-    FileSystem.setDefaultUri(conf, "hdfs://localhost:8070");
+
+    // Set ephemeral ports 
+    conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
+        "127.0.0.1:0");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
+        "127.0.0.1:0");
+    
     conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "ns1");
-    String nameDir = System.getProperty("java.io.tmpdir") + "/test.dfs.name";
-    File dir = new File(nameDir);
+    
+    // Set a nameservice-specific configuration for name dir
+    File dir = new File(MiniDFSCluster.getBaseDirectory(),
+        "testGenericKeysForNameNodeFormat");
     if (dir.exists()) {
       FileUtil.fullyDelete(dir);
     }
-    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + ".ns1", nameDir);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + ".ns1",
+        dir.getAbsolutePath());
+    
+    // Format and verify the right dir is formatted.
     DFSTestUtil.formatNameNode(conf);
+    GenericTestUtils.assertExists(dir);
+
+    // Ensure that the same dir is picked up by the running NN
     NameNode nameNode = new NameNode(conf);
-    FileUtil.fullyDelete(dir);
+    nameNode.stop();
   }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java Thu Apr  5 20:16:15 2012
@@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.codehaus.jackson.sym.NameN;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java Thu Apr  5 20:16:15 2012
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.tools;
 import static org.junit.Assert.*;
 
 import java.io.ByteArrayOutputStream;
+import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
 
@@ -41,6 +42,7 @@ import org.junit.Test;
 
 import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
+import com.google.common.io.Files;
 
 /**
  * Tests for HAAdmin command with {@link MiniDFSCluster} set up in HA mode.
@@ -59,6 +61,8 @@ public class TestDFSHAAdminMiniCluster {
 
   private String errOutput;
 
+  private int nn1Port;
+
   @Before
   public void setup() throws IOException {
     conf = new Configuration();
@@ -69,6 +73,8 @@ public class TestDFSHAAdminMiniCluster {
     tool.setConf(conf);
     tool.setErrOut(new PrintStream(errOutBytes));
     cluster.waitActive();
+    
+    nn1Port = cluster.getNameNodePort(0);
   }
 
   @After
@@ -124,9 +130,17 @@ public class TestDFSHAAdminMiniCluster {
   public void testFencer() throws Exception { 
     // Test failover with no fencer
     assertEquals(-1, runTool("-failover", "nn1", "nn2"));
-    
+
+    // Set up fencer to write info about the fencing target into a
+    // tmp file, so we can verify that the args were substituted right
+    File tmpFile = File.createTempFile("testFencer", ".txt");
+    tmpFile.deleteOnExit();
+    conf.set(NodeFencer.CONF_METHODS_KEY,
+        "shell(echo -n $target_nameserviceid.$target_namenodeid " +
+        "$target_port $dfs_ha_namenode_id > " +
+        tmpFile.getAbsolutePath() + ")");
+
     // Test failover with fencer
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
     assertEquals(0, runTool("-transitionToActive", "nn1"));
     assertEquals(0, runTool("-failover", "nn1", "nn2"));
@@ -134,21 +148,36 @@ public class TestDFSHAAdminMiniCluster {
     // Test failover with fencer and nameservice
     assertEquals(0, runTool("-ns", "minidfs-ns", "-failover", "nn2", "nn1"));
 
+    // Fencer has not run yet, since none of the above required fencing 
+    assertEquals("", Files.toString(tmpFile, Charsets.UTF_8));
+
     // Test failover with fencer and forcefence option
     assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
-      
+    
+    // The fence script should run with the configuration from the target
+    // node, rather than the configuration from the fencing node
+    assertEquals("minidfs-ns.nn1 " + nn1Port + " nn1",
+        Files.toString(tmpFile, Charsets.UTF_8));
+    tmpFile.delete();
+    
     // Test failover with forceactive option
     assertEquals(0, runTool("-failover", "nn2", "nn1", "--forceactive"));
+
+    // Fencing should not occur, since it was graceful
+    assertFalse(tmpFile.exists());
+
           
     // Test failover with not fencer and forcefence option
     conf.unset(NodeFencer.CONF_METHODS_KEY);
     tool.setConf(conf);
     assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
-    
+    assertFalse(tmpFile.exists());
+
     // Test failover with bad fencer and forcefence option
     conf.set(NodeFencer.CONF_METHODS_KEY, "foobar!");
     tool.setConf(conf);
     assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
+    assertFalse(tmpFile.exists());
 
     // Test failover with force fence listed before the other arguments
     conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java?rev=1310048&r1=1310047&r2=1310048&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java Thu Apr  5 20:16:15 2012
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
 
+import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.File;
 import java.nio.ByteBuffer;
@@ -33,8 +34,6 @@ import org.apache.commons.logging.LogFac
 
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
 import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer;
-import org.apache.hadoop.hdfs.tools.offlineEditsViewer.TokenizerFactory;
-import org.apache.hadoop.hdfs.tools.offlineEditsViewer.EditsVisitorFactory;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 
 import org.apache.hadoop.hdfs.server.namenode.OfflineEditsViewerHelper;
@@ -158,11 +157,8 @@ public class TestOfflineEditsViewer {
     LOG.info("Running oev [" + inFilename + "] [" + outFilename + "]");
 
     OfflineEditsViewer oev = new OfflineEditsViewer();
-    oev.go( EditsVisitorFactory.getEditsVisitor(
-      outFilename,
-      processor,
-      TokenizerFactory.getTokenizer(inFilename),
-      false));
+    if (oev.go(inFilename, outFilename, processor, true, false, null) != 0)
+      throw new RuntimeException("oev failed");
   }
 
   /**
@@ -173,14 +169,11 @@ public class TestOfflineEditsViewer {
    */
   private boolean hasAllOpCodes(String inFilename) throws IOException {
     String outFilename = inFilename + ".stats";
-    StatisticsEditsVisitor visitor =
-      (StatisticsEditsVisitor)EditsVisitorFactory.getEditsVisitor(
-        outFilename,
-        "stats",
-        TokenizerFactory.getTokenizer(inFilename),
-        false);
+    FileOutputStream fout = new FileOutputStream(outFilename);
+    StatisticsEditsVisitor visitor = new StatisticsEditsVisitor(fout);
     OfflineEditsViewer oev = new OfflineEditsViewer();
-    oev.go(visitor);
+    if (oev.go(inFilename, outFilename, "stats", false, false, visitor) != 0)
+      return false;
     LOG.info("Statistics for " + inFilename + "\n" +
       visitor.getStatisticsString());
     
@@ -190,6 +183,8 @@ public class TestOfflineEditsViewer {
       if(obsoleteOpCodes.containsKey(opCode)) {
         continue;
       }
+      if (opCode == FSEditLogOpCodes.OP_INVALID)
+        continue;
       Long count = visitor.getStatistics().get(opCode);
       if((count == null) || (count == 0)) {
         hasAllOpCodes = false;