You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2012/04/04 20:43:11 UTC

svn commit: r1309529 - in /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/tools/ src/test/java/org/apache/hadoop/hdfs/ src/test/java/org/apache/hadoop/hdfs/...

Author: todd
Date: Wed Apr  4 18:43:10 2012
New Revision: 1309529

URL: http://svn.apache.org/viewvc?rev=1309529&view=rev
Log:
HDFS-3084. FenceMethod.tryFence() and ShellCommandFencer should pass namenodeId as well as host:port. Contributed by Todd Lipcon.

Modified:
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1309529&r1=1309528&r2=1309529&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Apr  4 18:43:10 2012
@@ -211,6 +211,9 @@ Release 2.0.0 - UNRELEASED
     HDFS-2564. Cleanup unnecessary exceptions thrown and unnecessary casts.
     (Hari Mankude via eli)
 
+    HDFS-3084. FenceMethod.tryFence() and ShellCommandFencer should pass
+    namenodeId as well as host:port (todd)
+
   OPTIMIZATIONS
 
     HDFS-2477. Optimize computing the diff between a block report and the

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1309529&r1=1309528&r2=1309529&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Wed Apr  4 18:43:10 2012
@@ -1033,13 +1033,7 @@ public class DFSUtil {
       String nsId, String nnId) {
 
     if (nsId == null) {
-      Collection<String> nsIds = getNameServiceIds(conf);
-      if (1 == nsIds.size()) {
-        nsId = nsIds.toArray(new String[1])[0];
-      } else {
-        // No nameservice ID was given and more than one is configured
-        return null;
-      }
+      nsId = getOnlyNameServiceIdOrNull(conf);
     }
 
     String serviceAddrKey = concatSuffixes(
@@ -1054,4 +1048,18 @@ public class DFSUtil {
     }
     return serviceRpcAddr;
   }
+
+  /**
+   * If the configuration refers to only a single nameservice, return the
+   * name of that nameservice. If it refers to 0 or more than 1, return null.
+   */
+  public static String getOnlyNameServiceIdOrNull(Configuration conf) {
+    Collection<String> nsIds = getNameServiceIds(conf);
+    if (1 == nsIds.size()) {
+      return nsIds.toArray(new String[1])[0];
+    } else {
+      // No nameservice ID was given and more than one is configured
+      return null;
+    }
+  }
 }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java?rev=1309529&r1=1309528&r2=1309529&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java Wed Apr  4 18:43:10 2012
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.tools;
 
 import java.net.InetSocketAddress;
+import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.ha.BadFencingConfigurationException;
@@ -28,6 +29,8 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.net.NetUtils;
 
+import com.google.common.base.Preconditions;
+
 /**
  * One of the NN NameNodes acting as the target of an administrative command
  * (e.g. failover).
@@ -35,14 +38,36 @@ import org.apache.hadoop.net.NetUtils;
 @InterfaceAudience.Private
 public class NNHAServiceTarget extends HAServiceTarget {
 
+  // Keys added to the fencing script environment
+  private static final String NAMESERVICE_ID_KEY = "nameserviceid";
+  private static final String NAMENODE_ID_KEY = "namenodeid";
+  
   private final InetSocketAddress addr;
   private NodeFencer fencer;
   private BadFencingConfigurationException fenceConfigError;
+  private final String nnId;
+  private final String nsId;
 
   public NNHAServiceTarget(HdfsConfiguration conf,
       String nsId, String nnId) {
+    Preconditions.checkNotNull(nnId);
+    
+    if (nsId == null) {
+      nsId = DFSUtil.getOnlyNameServiceIdOrNull(conf);
+      if (nsId == null) {
+        throw new IllegalArgumentException(
+            "Unable to determine the nameservice id.");
+      }
+    }
+    assert nsId != null;
+    
+    // Make a copy of the conf, and override configs based on the
+    // target node -- not the node we happen to be running on.
+    HdfsConfiguration targetConf = new HdfsConfiguration(conf);
+    NameNode.initializeGenericKeys(targetConf, nsId, nnId);
+    
     String serviceAddr = 
-      DFSUtil.getNamenodeServiceAddr(conf, nsId, nnId);
+      DFSUtil.getNamenodeServiceAddr(targetConf, nsId, nnId);
     if (serviceAddr == null) {
       throw new IllegalArgumentException(
           "Unable to determine service address for namenode '" + nnId + "'");
@@ -50,10 +75,12 @@ public class NNHAServiceTarget extends H
     this.addr = NetUtils.createSocketAddr(serviceAddr,
         NameNode.DEFAULT_PORT);
     try {
-      this.fencer = NodeFencer.create(conf);
+      this.fencer = NodeFencer.create(targetConf);
     } catch (BadFencingConfigurationException e) {
       this.fenceConfigError = e;
     }
+    this.nnId = nnId;
+    this.nsId = nsId;
   }
 
   /**
@@ -81,4 +108,19 @@ public class NNHAServiceTarget extends H
     return "NameNode at " + addr;
   }
 
+  public String getNameServiceId() {
+    return this.nsId;
+  }
+  
+  public String getNameNodeId() {
+    return this.nnId;
+  }
+
+  @Override
+  protected void addFencingParameters(Map<String, String> ret) {
+    super.addFencingParameters(ret);
+    
+    ret.put(NAMESERVICE_ID_KEY, getNameServiceId());
+    ret.put(NAMENODE_ID_KEY, getNameNodeId());
+  }
 }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1309529&r1=1309528&r2=1309529&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Wed Apr  4 18:43:10 2012
@@ -179,6 +179,17 @@ public class TestDFSUtil {
     assertEquals("nn1", it.next().toString());
     assertEquals("nn2", it.next().toString());
   }
+  
+  @Test
+  public void testGetOnlyNameServiceIdOrNull() {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2");
+    assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf));
+    conf.set(DFS_FEDERATION_NAMESERVICES, "");
+    assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf));
+    conf.set(DFS_FEDERATION_NAMESERVICES, "ns1");
+    assertEquals("ns1", DFSUtil.getOnlyNameServiceIdOrNull(conf));
+  }
 
   /**
    * Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java?rev=1309529&r1=1309528&r2=1309529&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java Wed Apr  4 18:43:10 2012
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.tools;
 import static org.junit.Assert.*;
 
 import java.io.ByteArrayOutputStream;
+import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
 
@@ -41,6 +42,7 @@ import org.junit.Test;
 
 import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
+import com.google.common.io.Files;
 
 /**
  * Tests for HAAdmin command with {@link MiniDFSCluster} set up in HA mode.
@@ -59,6 +61,8 @@ public class TestDFSHAAdminMiniCluster {
 
   private String errOutput;
 
+  private int nn1Port;
+
   @Before
   public void setup() throws IOException {
     conf = new Configuration();
@@ -69,6 +73,8 @@ public class TestDFSHAAdminMiniCluster {
     tool.setConf(conf);
     tool.setErrOut(new PrintStream(errOutBytes));
     cluster.waitActive();
+    
+    nn1Port = cluster.getNameNodePort(0);
   }
 
   @After
@@ -124,9 +130,17 @@ public class TestDFSHAAdminMiniCluster {
   public void testFencer() throws Exception { 
     // Test failover with no fencer
     assertEquals(-1, runTool("-failover", "nn1", "nn2"));
-    
+
+    // Set up fencer to write info about the fencing target into a
+    // tmp file, so we can verify that the args were substituted right
+    File tmpFile = File.createTempFile("testFencer", ".txt");
+    tmpFile.deleteOnExit();
+    conf.set(NodeFencer.CONF_METHODS_KEY,
+        "shell(echo -n $target_nameserviceid.$target_namenodeid " +
+        "$target_port $dfs_ha_namenode_id > " +
+        tmpFile.getAbsolutePath() + ")");
+
     // Test failover with fencer
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
     assertEquals(0, runTool("-transitionToActive", "nn1"));
     assertEquals(0, runTool("-failover", "nn1", "nn2"));
@@ -134,21 +148,36 @@ public class TestDFSHAAdminMiniCluster {
     // Test failover with fencer and nameservice
     assertEquals(0, runTool("-ns", "minidfs-ns", "-failover", "nn2", "nn1"));
 
+    // Fencer has not run yet, since none of the above required fencing 
+    assertEquals("", Files.toString(tmpFile, Charsets.UTF_8));
+
     // Test failover with fencer and forcefence option
     assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
-      
+    
+    // The fence script should run with the configuration from the target
+    // node, rather than the configuration from the fencing node
+    assertEquals("minidfs-ns.nn1 " + nn1Port + " nn1",
+        Files.toString(tmpFile, Charsets.UTF_8));
+    tmpFile.delete();
+    
     // Test failover with forceactive option
     assertEquals(0, runTool("-failover", "nn2", "nn1", "--forceactive"));
+
+    // Fencing should not occur, since it was graceful
+    assertFalse(tmpFile.exists());
+
           
     // Test failover with not fencer and forcefence option
     conf.unset(NodeFencer.CONF_METHODS_KEY);
     tool.setConf(conf);
     assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
-    
+    assertFalse(tmpFile.exists());
+
     // Test failover with bad fencer and forcefence option
     conf.set(NodeFencer.CONF_METHODS_KEY, "foobar!");
     tool.setConf(conf);
     assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
+    assertFalse(tmpFile.exists());
 
     // Test failover with force fence listed before the other arguments
     conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");