You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2012/04/04 22:00:18 UTC

svn commit: r1309576 - in /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/client/ src/main/java/org/apache/hadoop/hdfs/server/blockmanagemen...

Author: todd
Date: Wed Apr  4 20:00:15 2012
New Revision: 1309576

URL: http://svn.apache.org/viewvc?rev=1309576&view=rev
Log:
Merge trunk into auto-HA branch

Resolved some trivial conflicts in NNHAServiceTarget

Added:
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/
      - copied from r1309567, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
      - copied unchanged from r1309567, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
      - copied unchanged from r1309567, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
Modified:
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1309162-1309567

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1309576&r1=1309575&r2=1309576&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Apr  4 20:00:15 2012
@@ -22,9 +22,6 @@ Trunk (unreleased changes)
 
     HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh)
 
-    HDFS-2564. Cleanup unnecessary exceptions thrown and unnecessary casts.
-    (Hari Mankude via eli)
-
     HDFS-2857. Cleanup BlockInfo class. (suresh)
 
     HDFS-2786. Fix host-based token incompatibilities in DFSUtil. (Kihwal Lee
@@ -185,6 +182,8 @@ Release 2.0.0 - UNRELEASED 
     HDFS-3148. The client should be able to use multiple local interfaces
     for data transfer. (eli)
 
+    HDFS-3000. Add a public API for setting quotas. (atm)
+
   IMPROVEMENTS
 
     HDFS-2018. Move all journal stream management code into one place.
@@ -306,6 +305,17 @@ Release 2.0.0 - UNRELEASED 
 
     HDFS-3120. Enable hsync and hflush by default. (eli)
 
+    HDFS-3187. Upgrade guava to 11.0.2 (todd)
+
+    HDFS-3168. Remove unnecessary "throw IOException" and change fields to
+    final in FSNamesystem and BlockManager.  (szetszwo)
+
+    HDFS-2564. Cleanup unnecessary exceptions thrown and unnecessary casts.
+    (Hari Mankude via eli)
+
+    HDFS-3084. FenceMethod.tryFence() and ShellCommandFencer should pass
+    namenodeId as well as host:port (todd)
+
   OPTIMIZATIONS
 
     HDFS-3024. Improve performance of stringification in addStoredBlock (todd)

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1309162-1309567

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1309576&r1=1309575&r2=1309576&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Wed Apr  4 20:00:15 2012
@@ -1026,13 +1026,7 @@ public class DFSUtil {
       String nsId, String nnId) {
 
     if (nsId == null) {
-      Collection<String> nsIds = getNameServiceIds(conf);
-      if (1 == nsIds.size()) {
-        nsId = nsIds.toArray(new String[1])[0];
-      } else {
-        // No nameservice ID was given and more than one is configured
-        return null;
-      }
+      nsId = getOnlyNameServiceIdOrNull(conf);
     }
 
     String serviceAddrKey = concatSuffixes(
@@ -1047,4 +1041,18 @@ public class DFSUtil {
     }
     return serviceRpcAddr;
   }
+
+  /**
+   * If the configuration refers to only a single nameservice, return the
+   * name of that nameservice. If it refers to 0 or more than 1, return null.
+   */
+  public static String getOnlyNameServiceIdOrNull(Configuration conf) {
+    Collection<String> nsIds = getNameServiceIds(conf);
+    if (1 == nsIds.size()) {
+      return nsIds.toArray(new String[1])[0];
+    } else {
+      // No nameservice ID was given and more than one is configured
+      return null;
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1309576&r1=1309575&r2=1309576&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Wed Apr  4 20:00:15 2012
@@ -253,9 +253,9 @@ public class BlockManager {
     this.replicationRecheckInterval = 
       conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
                   DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L;
-    LOG.info("defaultReplication = " + defaultReplication);
-    LOG.info("maxReplication     = " + maxReplication);
-    LOG.info("minReplication     = " + minReplication);
+    LOG.info("defaultReplication         = " + defaultReplication);
+    LOG.info("maxReplication             = " + maxReplication);
+    LOG.info("minReplication             = " + minReplication);
     LOG.info("maxReplicationStreams      = " + maxReplicationStreams);
     LOG.info("shouldCheckForEnoughRacks  = " + shouldCheckForEnoughRacks);
     LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
@@ -1030,7 +1030,7 @@ public class BlockManager {
    *
    * @return number of blocks scheduled for replication during this iteration.
    */
-  int computeReplicationWork(int blocksToProcess) throws IOException {
+  int computeReplicationWork(int blocksToProcess) {
     List<List<Block>> blocksToReplicate = null;
     namesystem.writeLock();
     try {
@@ -2174,7 +2174,7 @@ assert storedBlock.findDatanode(dn) < 0 
   
   /** Set replication for the blocks. */
   public void setReplication(final short oldRepl, final short newRepl,
-      final String src, final Block... blocks) throws IOException {
+      final String src, final Block... blocks) {
     if (newRepl == oldRepl) {
       return;
     }
@@ -2937,8 +2937,6 @@ assert storedBlock.findDatanode(dn) < 0 
         } catch (InterruptedException ie) {
           LOG.warn("ReplicationMonitor thread received InterruptedException.", ie);
           break;
-        } catch (IOException ie) {
-          LOG.warn("ReplicationMonitor thread received exception. " , ie);
         } catch (Throwable t) {
           LOG.warn("ReplicationMonitor thread received Runtime exception. ", t);
           Runtime.getRuntime().exit(-1);
@@ -2956,14 +2954,14 @@ assert storedBlock.findDatanode(dn) < 0 
    * @return number of blocks scheduled for replication or removal.
    * @throws IOException
    */
-  int computeDatanodeWork() throws IOException {
-    int workFound = 0;
+  int computeDatanodeWork() {
     // Blocks should not be replicated or removed if in safe mode.
     // It's OK to check safe mode here w/o holding lock, in the worst
     // case extra replications will be scheduled, and these will get
     // fixed up later.
-    if (namesystem.isInSafeMode())
-      return workFound;
+    if (namesystem.isInSafeMode()) {
+      return 0;
+    }
 
     final int numlive = heartbeatManager.getLiveDatanodeCount();
     final int blocksToProcess = numlive
@@ -2971,7 +2969,7 @@ assert storedBlock.findDatanode(dn) < 0 
     final int nodesToProcess = (int) Math.ceil(numlive
         * ReplicationMonitor.INVALIDATE_WORK_PCT_PER_ITERATION / 100.0);
 
-    workFound = this.computeReplicationWork(blocksToProcess);
+    int workFound = this.computeReplicationWork(blocksToProcess);
 
     // Update counters
     namesystem.writeLock();

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1309576&r1=1309575&r2=1309576&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Apr  4 20:00:15 2012
@@ -25,15 +25,17 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT;
@@ -49,15 +51,13 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT;
@@ -150,9 +150,9 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
@@ -260,30 +260,28 @@ public class FSNamesystem implements Nam
 
   static final int DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED = 100;
   static int BLOCK_DELETION_INCREMENT = 1000;
-  private boolean isPermissionEnabled;
-  private boolean persistBlocks;
-  private UserGroupInformation fsOwner;
-  private String supergroup;
-  private boolean standbyShouldCheckpoint;
+  private final boolean isPermissionEnabled;
+  private final boolean persistBlocks;
+  private final UserGroupInformation fsOwner;
+  private final String supergroup;
+  private final boolean standbyShouldCheckpoint;
   
   // Scan interval is not configurable.
   private static final long DELEGATION_TOKEN_REMOVER_SCAN_INTERVAL =
     TimeUnit.MILLISECONDS.convert(1, TimeUnit.HOURS);
-  private DelegationTokenSecretManager dtSecretManager;
-  private boolean alwaysUseDelegationTokensForTests;
+  private final DelegationTokenSecretManager dtSecretManager;
+  private final boolean alwaysUseDelegationTokensForTests;
   
 
-  //
-  // Stores the correct file name hierarchy
-  //
+  /** The namespace tree. */
   FSDirectory dir;
-  private BlockManager blockManager;
-  private DatanodeStatistics datanodeStatistics;
+  private final BlockManager blockManager;
+  private final DatanodeStatistics datanodeStatistics;
 
   // Block pool ID used by this namenode
   private String blockPoolId;
 
-  LeaseManager leaseManager = new LeaseManager(this); 
+  final LeaseManager leaseManager = new LeaseManager(this); 
 
   Daemon smmthread = null;  // SafeModeMonitor thread
   
@@ -291,23 +289,23 @@ public class FSNamesystem implements Nam
 
   private volatile boolean hasResourcesAvailable = false;
   private volatile boolean fsRunning = true;
-  long systemStart = 0;
+  
+  /** The start time of the namesystem. */
+  private final long startTime = now();
 
-  //resourceRecheckInterval is how often namenode checks for the disk space availability
-  private long resourceRecheckInterval;
+  /** The interval of namenode checking for the disk space availability */
+  private final long resourceRecheckInterval;
 
   // The actual resource checker instance.
   NameNodeResourceChecker nnResourceChecker;
 
-  private FsServerDefaults serverDefaults;
-
-  private boolean supportAppends;
-  private ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure = 
-      ReplaceDatanodeOnFailure.DEFAULT;
+  private final FsServerDefaults serverDefaults;
+  private final boolean supportAppends;
+  private final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
 
   private volatile SafeModeInfo safeMode;  // safe mode information
 
-  private long maxFsObjects = 0;          // maximum number of fs objects
+  private final long maxFsObjects;          // maximum number of fs objects
 
   /**
    * The global generation stamp for this file system. 
@@ -315,10 +313,10 @@ public class FSNamesystem implements Nam
   private final GenerationStamp generationStamp = new GenerationStamp();
 
   // precision of access times.
-  private long accessTimePrecision = 0;
+  private final long accessTimePrecision;
 
-  // lock to protect FSNamesystem.
-  private ReentrantReadWriteLock fsLock;
+  /** Lock to protect FSNamesystem. */
+  private ReentrantReadWriteLock fsLock = new ReentrantReadWriteLock(true);
 
   /**
    * Used when this NN is in standby state to read from the shared edit log.
@@ -336,9 +334,7 @@ public class FSNamesystem implements Nam
    */
   private HAContext haContext;
 
-  private boolean haEnabled;
-
-  private final Configuration conf;
+  private final boolean haEnabled;
     
   /**
    * Instantiates an FSNamesystem loaded from the image and edits
@@ -390,9 +386,71 @@ public class FSNamesystem implements Nam
    * @throws IOException on bad configuration
    */
   FSNamesystem(Configuration conf, FSImage fsImage) throws IOException {
-    this.conf = conf;
     try {
-      initialize(conf, fsImage);
+      resourceRecheckInterval = conf.getLong(
+          DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
+          DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);
+
+      this.blockManager = new BlockManager(this, this, conf);
+      this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
+
+      this.fsOwner = UserGroupInformation.getCurrentUser();
+      this.supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY, 
+                                 DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
+      this.isPermissionEnabled = conf.getBoolean(DFS_PERMISSIONS_ENABLED_KEY,
+                                                 DFS_PERMISSIONS_ENABLED_DEFAULT);
+      LOG.info("fsOwner             = " + fsOwner);
+      LOG.info("supergroup          = " + supergroup);
+      LOG.info("isPermissionEnabled = " + isPermissionEnabled);
+
+      final boolean persistBlocks = conf.getBoolean(DFS_PERSIST_BLOCKS_KEY,
+                                                    DFS_PERSIST_BLOCKS_DEFAULT);
+      // block allocation has to be persisted in HA using a shared edits directory
+      // so that the standby has up-to-date namespace information
+      String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
+      this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId);  
+      this.persistBlocks = persistBlocks || (haEnabled && HAUtil.usesSharedEditsDir(conf));
+      
+      // Sanity check the HA-related config.
+      if (nameserviceId != null) {
+        LOG.info("Determined nameservice ID: " + nameserviceId);
+      }
+      LOG.info("HA Enabled: " + haEnabled);
+      if (!haEnabled && HAUtil.usesSharedEditsDir(conf)) {
+        LOG.warn("Configured NNs:\n" + DFSUtil.nnAddressesAsString(conf));
+        throw new IOException("Invalid configuration: a shared edits dir " +
+            "must not be specified if HA is not enabled.");
+      }
+
+      this.serverDefaults = new FsServerDefaults(
+          conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
+          conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),
+          conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT),
+          (short) conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT),
+          conf.getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT));
+      
+      this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY, 
+                                       DFS_NAMENODE_MAX_OBJECTS_DEFAULT);
+
+      this.accessTimePrecision = conf.getLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0);
+      this.supportAppends = conf.getBoolean(DFS_SUPPORT_APPEND_KEY, DFS_SUPPORT_APPEND_DEFAULT);
+      LOG.info("Append Enabled: " + haEnabled);
+
+      this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
+      
+      this.standbyShouldCheckpoint = conf.getBoolean(
+          DFS_HA_STANDBY_CHECKPOINTS_KEY, DFS_HA_STANDBY_CHECKPOINTS_DEFAULT);
+      
+      // For testing purposes, allow the DT secret manager to be started regardless
+      // of whether security is enabled.
+      alwaysUseDelegationTokensForTests = conf.getBoolean(
+          DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,
+          DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT);
+
+      this.dtSecretManager = createDelegationTokenSecretManager(conf);
+      this.dir = new FSDirectory(fsImage, this, conf);
+      this.safeMode = new SafeModeInfo(conf);
+
     } catch(IOException e) {
       LOG.error(getClass().getSimpleName() + " initialization failed.", e);
       close();
@@ -400,24 +458,6 @@ public class FSNamesystem implements Nam
     }
   }
 
-  /**
-   * Initialize FSNamesystem.
-   */
-  private void initialize(Configuration conf, FSImage fsImage)
-      throws IOException {
-    resourceRecheckInterval = conf.getLong(
-        DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
-        DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);
-    this.systemStart = now();
-    this.blockManager = new BlockManager(this, this, conf);
-    this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
-    this.fsLock = new ReentrantReadWriteLock(true); // fair locking
-    setConfigurationParameters(conf);
-    dtSecretManager = createDelegationTokenSecretManager(conf);
-    this.dir = new FSDirectory(fsImage, this, conf);
-    this.safeMode = new SafeModeInfo(conf);
-  }
-
   void loadFSImage(StartupOption startOpt, FSImage fsImage, boolean haEnabled)
       throws IOException {
     // format before starting up if requested
@@ -601,13 +641,13 @@ public class FSNamesystem implements Nam
   }
   
   /** Start services required in standby state */
-  void startStandbyServices() {
+  void startStandbyServices(final Configuration conf) {
     LOG.info("Starting services required for standby state");
     if (!dir.fsImage.editLog.isOpenForRead()) {
       // During startup, we're already open for read.
       dir.fsImage.editLog.initSharedJournalsForRead();
     }
-    editLogTailer = new EditLogTailer(this);
+    editLogTailer = new EditLogTailer(this, conf);
     editLogTailer.start();
     if (standbyShouldCheckpoint) {
       standbyCheckpointer = new StandbyCheckpointer(conf, this);
@@ -768,10 +808,6 @@ public class FSNamesystem implements Nam
         DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
     return Util.stringCollectionAsURIs(dirNames);
   }
-  
-  public Configuration getConf() {
-    return conf;
-  }
 
   @Override
   public void readLock() {
@@ -806,69 +842,6 @@ public class FSNamesystem implements Nam
     return hasReadLock() || hasWriteLock();
   }
 
-
-  /**
-   * Initializes some of the members from configuration
-   */
-  private void setConfigurationParameters(Configuration conf) 
-                                          throws IOException {
-    fsOwner = UserGroupInformation.getCurrentUser();
-    
-    LOG.info("fsOwner=" + fsOwner);
-
-    this.supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY, 
-                               DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
-    this.isPermissionEnabled = conf.getBoolean(DFS_PERMISSIONS_ENABLED_KEY,
-                                               DFS_PERMISSIONS_ENABLED_DEFAULT);
-    LOG.info("supergroup=" + supergroup);
-    LOG.info("isPermissionEnabled=" + isPermissionEnabled);
-
-    this.persistBlocks = conf.getBoolean(DFS_PERSIST_BLOCKS_KEY,
-                                         DFS_PERSIST_BLOCKS_DEFAULT);
-    // block allocation has to be persisted in HA using a shared edits directory
-    // so that the standby has up-to-date namespace information
-    String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
-    this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId);  
-    this.persistBlocks |= haEnabled && HAUtil.usesSharedEditsDir(conf);
-    
-    // Sanity check the HA-related config.
-    if (nameserviceId != null) {
-      LOG.info("Determined nameservice ID: " + nameserviceId);
-    }
-    LOG.info("HA Enabled: " + haEnabled);
-    if (!haEnabled && HAUtil.usesSharedEditsDir(conf)) {
-      LOG.warn("Configured NNs:\n" + DFSUtil.nnAddressesAsString(conf));
-      throw new IOException("Invalid configuration: a shared edits dir " +
-          "must not be specified if HA is not enabled.");
-    }
-
-    this.serverDefaults = new FsServerDefaults(
-        conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
-        conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),
-        conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT),
-        (short) conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT),
-        conf.getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT));
-    
-    this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY, 
-                                     DFS_NAMENODE_MAX_OBJECTS_DEFAULT);
-
-    this.accessTimePrecision = conf.getLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0);
-    this.supportAppends = conf.getBoolean(DFS_SUPPORT_APPEND_KEY,
-        DFS_SUPPORT_APPEND_DEFAULT);
-
-    this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
-    
-    this.standbyShouldCheckpoint = conf.getBoolean(
-        DFS_HA_STANDBY_CHECKPOINTS_KEY,
-        DFS_HA_STANDBY_CHECKPOINTS_DEFAULT);
-    
-    // For testing purposes, allow the DT secret manager to be started regardless
-    // of whether security is enabled.
-    alwaysUseDelegationTokensForTests = 
-      conf.getBoolean(DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,
-          DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT);
-  }
-
   NamespaceInfo getNamespaceInfo() {
     readLock();
     try {
@@ -2761,7 +2734,7 @@ public class FSNamesystem implements Nam
   }
 
   private Lease reassignLease(Lease lease, String src, String newHolder,
-      INodeFileUnderConstruction pendingFile) throws IOException {
+      INodeFileUnderConstruction pendingFile) {
     assert hasWriteLock();
     if(newHolder == null)
       return lease;
@@ -3329,7 +3302,7 @@ public class FSNamesystem implements Nam
   }
 
   Date getStartTime() {
-    return new Date(systemStart); 
+    return new Date(startTime); 
   }
     
   void finalizeUpgrade() throws IOException {
@@ -3506,7 +3479,7 @@ public class FSNamesystem implements Nam
       if (!isPopulatingReplQueues() && !isInStandbyState()) {
         initializeReplQueues();
       }
-      long timeInSafemode = now() - systemStart;
+      long timeInSafemode = now() - startTime;
       NameNode.stateChangeLog.info("STATE* Leaving safe mode after " 
                                     + timeInSafemode/1000 + " secs.");
       NameNode.getNameNodeMetrics().setSafeModeTime((int) timeInSafemode);
@@ -4876,7 +4849,7 @@ public class FSNamesystem implements Nam
    * 
    * @param key new delegation key.
    */
-  public void logUpdateMasterKey(DelegationKey key) throws IOException {
+  public void logUpdateMasterKey(DelegationKey key) {
     
     assert !isInSafeMode() :
       "this should never be called while in safemode, since we stop " +
@@ -4889,7 +4862,7 @@ public class FSNamesystem implements Nam
   }
   
   private void logReassignLease(String leaseHolder, String src,
-      String newHolder) throws IOException {
+      String newHolder) {
     writeLock();
     try {
       getEditLog().logReassignLease(leaseHolder, src, newHolder);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1309576&r1=1309575&r2=1309576&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Wed Apr  4 20:00:15 2012
@@ -1061,7 +1061,7 @@ public class NameNode {
 
     @Override
     public void startStandbyServices() throws IOException {
-      namesystem.startStandbyServices();
+      namesystem.startStandbyServices(conf);
     }
 
     @Override

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java?rev=1309576&r1=1309575&r2=1309576&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java Wed Apr  4 20:00:15 2012
@@ -61,6 +61,7 @@ public class EditLogTailer {
   
   private final EditLogTailerThread tailerThread;
   
+  private final Configuration conf;
   private final FSNamesystem namesystem;
   private FSEditLog editLog;
   
@@ -98,13 +99,12 @@ public class EditLogTailer {
    */
   private long sleepTimeMs;
   
-  public EditLogTailer(FSNamesystem namesystem) {
+  public EditLogTailer(FSNamesystem namesystem, Configuration conf) {
     this.tailerThread = new EditLogTailerThread();
+    this.conf = conf;
     this.namesystem = namesystem;
     this.editLog = namesystem.getEditLog();
     
-
-    Configuration conf = namesystem.getConf();
     lastLoadTimestamp = now();
 
     logRollPeriodMs = conf.getInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY,
@@ -129,14 +129,12 @@ public class EditLogTailer {
   }
   
   private InetSocketAddress getActiveNodeAddress() {
-    Configuration conf = namesystem.getConf();
     Configuration activeConf = HAUtil.getConfForOtherNode(conf);
     return NameNode.getServiceAddress(activeConf, true);
   }
   
   private NamenodeProtocol getActiveNodeProxy() throws IOException {
     if (cachedActiveProxy == null) {
-      Configuration conf = namesystem.getConf();
       NamenodeProtocolPB proxy = 
         RPC.waitForProxy(NamenodeProtocolPB.class,
             RPC.getProtocolVersion(NamenodeProtocolPB.class), activeAddr, conf);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java?rev=1309576&r1=1309575&r2=1309576&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java Wed Apr  4 20:00:15 2012
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.tools;
 
 import java.net.InetSocketAddress;
+import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -25,9 +26,12 @@ import org.apache.hadoop.ha.BadFencingCo
 import org.apache.hadoop.ha.HAServiceTarget;
 import org.apache.hadoop.ha.NodeFencer;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.net.NetUtils;
 
+import com.google.common.base.Preconditions;
+
 /**
  * One of the NN NameNodes acting as the target of an administrative command
  * (e.g. failover).
@@ -35,16 +39,36 @@ import org.apache.hadoop.net.NetUtils;
 @InterfaceAudience.Private
 public class NNHAServiceTarget extends HAServiceTarget {
 
+  // Keys added to the fencing script environment
+  private static final String NAMESERVICE_ID_KEY = "nameserviceid";
+  private static final String NAMENODE_ID_KEY = "namenodeid";
+  
   private final InetSocketAddress addr;
   private NodeFencer fencer;
   private BadFencingConfigurationException fenceConfigError;
   private final String nnId;
   private final String nsId;
 
-  public NNHAServiceTarget(Configuration localNNConf,
+  public NNHAServiceTarget(Configuration conf,
       String nsId, String nnId) {
+    Preconditions.checkNotNull(nnId);
+    
+    if (nsId == null) {
+      nsId = DFSUtil.getOnlyNameServiceIdOrNull(conf);
+      if (nsId == null) {
+        throw new IllegalArgumentException(
+            "Unable to determine the nameservice id.");
+      }
+    }
+    assert nsId != null;
+    
+    // Make a copy of the conf, and override configs based on the
+    // target node -- not the node we happen to be running on.
+    HdfsConfiguration targetConf = new HdfsConfiguration(conf);
+    NameNode.initializeGenericKeys(targetConf, nsId, nnId);
+    
     String serviceAddr = 
-      DFSUtil.getNamenodeServiceAddr(localNNConf, nsId, nnId);
+      DFSUtil.getNamenodeServiceAddr(targetConf, nsId, nnId);
     if (serviceAddr == null) {
       throw new IllegalArgumentException(
           "Unable to determine service address for namenode '" + nnId + "'");
@@ -52,7 +76,7 @@ public class NNHAServiceTarget extends H
     this.addr = NetUtils.createSocketAddr(serviceAddr,
         NameNode.DEFAULT_PORT);
     try {
-      this.fencer = NodeFencer.create(localNNConf);
+      this.fencer = NodeFencer.create(targetConf);
     } catch (BadFencingConfigurationException e) {
       this.fenceConfigError = e;
     }
@@ -96,4 +120,12 @@ public class NNHAServiceTarget extends H
   public String getNameNodeId() {
     return this.nnId;
   }
+
+  @Override
+  protected void addFencingParameters(Map<String, String> ret) {
+    super.addFencingParameters(ret);
+    
+    ret.put(NAMESERVICE_ID_KEY, getNameServiceId());
+    ret.put(NAMENODE_ID_KEY, getNameNodeId());
+  }
 }

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1309162-1309567

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1309162-1309567

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1309162-1309567

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1309162-1309567

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1309162-1309567

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1309576&r1=1309575&r2=1309576&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Wed Apr  4 20:00:15 2012
@@ -731,7 +731,9 @@ public class MiniDFSCluster {
       Preconditions.checkArgument(!dstDir.equals(srcDir));
       File dstDirF = new File(dstDir);
       if (dstDirF.exists()) {
-        Files.deleteRecursively(dstDirF);
+        if (!FileUtil.fullyDelete(dstDirF)) {
+          throw new IOException("Unable to delete: " + dstDirF);
+        }
       }
       LOG.info("Copying namedir from primary node dir "
           + srcDir + " to " + dstDir);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1309576&r1=1309575&r2=1309576&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Wed Apr  4 20:00:15 2012
@@ -179,6 +179,17 @@ public class TestDFSUtil {
     assertEquals("nn1", it.next().toString());
     assertEquals("nn2", it.next().toString());
   }
+  
+  @Test
+  public void testGetOnlyNameServiceIdOrNull() {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2");
+    assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf));
+    conf.set(DFS_FEDERATION_NAMESERVICES, "");
+    assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf));
+    conf.set(DFS_FEDERATION_NAMESERVICES, "ns1");
+    assertEquals("ns1", DFSUtil.getOnlyNameServiceIdOrNull(conf));
+  }
 
   /**
    * Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java?rev=1309576&r1=1309575&r2=1309576&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java Wed Apr  4 20:00:15 2012
@@ -23,8 +23,8 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Set;
 
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.Daemon;
@@ -143,8 +143,7 @@ public class BlockManagerTestUtil {
    * {@link DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY} to
    * a high value to ensure that all work is calculated.
    */
-  public static int computeAllPendingWork(BlockManager bm)
-    throws IOException {
+  public static int computeAllPendingWork(BlockManager bm) {
     int work = computeInvalidationWork(bm);
     work += bm.computeReplicationWork(Integer.MAX_VALUE);
     return work;

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1309576&r1=1309575&r2=1309576&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Wed Apr  4 20:00:15 2012
@@ -37,6 +37,7 @@ import java.util.Set;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -181,7 +182,9 @@ public abstract class FSImageTestUtil {
   public static FSEditLog createStandaloneEditLog(File logDir)
       throws IOException {
     assertTrue(logDir.mkdirs() || logDir.exists());
-    Files.deleteDirectoryContents(logDir);
+    if (!FileUtil.fullyDeleteContents(logDir)) {
+      throw new IOException("Unable to delete contents of " + logDir);
+    }
     NNStorage storage = Mockito.mock(NNStorage.class);
     StorageDirectory sd 
       = FSImageTestUtil.mockStorageDirectory(logDir, NameNodeDirType.EDITS);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java?rev=1309576&r1=1309575&r2=1309576&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java Wed Apr  4 20:00:15 2012
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.tools;
 import static org.junit.Assert.*;
 
 import java.io.ByteArrayOutputStream;
+import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
 
@@ -41,6 +42,7 @@ import org.junit.Test;
 
 import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
+import com.google.common.io.Files;
 
 /**
  * Tests for HAAdmin command with {@link MiniDFSCluster} set up in HA mode.
@@ -59,6 +61,8 @@ public class TestDFSHAAdminMiniCluster {
 
   private String errOutput;
 
+  private int nn1Port;
+
   @Before
   public void setup() throws IOException {
     conf = new Configuration();
@@ -69,6 +73,8 @@ public class TestDFSHAAdminMiniCluster {
     tool.setConf(conf);
     tool.setErrOut(new PrintStream(errOutBytes));
     cluster.waitActive();
+    
+    nn1Port = cluster.getNameNodePort(0);
   }
 
   @After
@@ -124,9 +130,17 @@ public class TestDFSHAAdminMiniCluster {
   public void testFencer() throws Exception { 
     // Test failover with no fencer
     assertEquals(-1, runTool("-failover", "nn1", "nn2"));
-    
+
+    // Set up fencer to write info about the fencing target into a
+    // tmp file, so we can verify that the args were substituted right
+    File tmpFile = File.createTempFile("testFencer", ".txt");
+    tmpFile.deleteOnExit();
+    conf.set(NodeFencer.CONF_METHODS_KEY,
+        "shell(echo -n $target_nameserviceid.$target_namenodeid " +
+        "$target_port $dfs_ha_namenode_id > " +
+        tmpFile.getAbsolutePath() + ")");
+
     // Test failover with fencer
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
     assertEquals(0, runTool("-transitionToActive", "nn1"));
     assertEquals(0, runTool("-failover", "nn1", "nn2"));
@@ -134,21 +148,36 @@ public class TestDFSHAAdminMiniCluster {
     // Test failover with fencer and nameservice
     assertEquals(0, runTool("-ns", "minidfs-ns", "-failover", "nn2", "nn1"));
 
+    // Fencer has not run yet, since none of the above required fencing 
+    assertEquals("", Files.toString(tmpFile, Charsets.UTF_8));
+
     // Test failover with fencer and forcefence option
     assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
-      
+    
+    // The fence script should run with the configuration from the target
+    // node, rather than the configuration from the fencing node
+    assertEquals("minidfs-ns.nn1 " + nn1Port + " nn1",
+        Files.toString(tmpFile, Charsets.UTF_8));
+    tmpFile.delete();
+    
     // Test failover with forceactive option
     assertEquals(0, runTool("-failover", "nn2", "nn1", "--forceactive"));
+
+    // Fencing should not occur, since it was graceful
+    assertFalse(tmpFile.exists());
+
           
     // Test failover with not fencer and forcefence option
     conf.unset(NodeFencer.CONF_METHODS_KEY);
     tool.setConf(conf);
     assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
-    
+    assertFalse(tmpFile.exists());
+
     // Test failover with bad fencer and forcefence option
     conf.set(NodeFencer.CONF_METHODS_KEY, "foobar!");
     tool.setConf(conf);
     assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
+    assertFalse(tmpFile.exists());
 
     // Test failover with force fence listed before the other arguments
     conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");