You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/04/10 01:25:22 UTC

svn commit: r1311518 [3/3] - in /hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs: ./ dev-support/ src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/ src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/serve...

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto?rev=1311518&r1=1311517&r2=1311518&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto Mon Apr  9 23:25:17 2012
@@ -36,16 +36,18 @@ message JournalInfoProto {
 }
 
 /**
- * JournalInfo - the information about the journal
+ * journalInfo - the information about the journal
  * firstTxnId - the first txid in the journal records
  * numTxns - Number of transactions in editlog
  * records - bytes containing serialized journal records
+ * epoch - change to this represents change of journal writer
  */
 message JournalRequestProto {
   required JournalInfoProto journalInfo = 1;
   required uint64 firstTxnId = 2;
   required uint32 numTxns = 3;
   required bytes records = 4;
+  required uint64 epoch = 5;
 }
 
 /**
@@ -55,12 +57,13 @@ message JournalResponseProto { 
 }
 
 /**
- * JournalInfo - the information about the journal
+ * journalInfo - the information about the journal
  * txid - first txid in the new log
  */
 message StartLogSegmentRequestProto {
-  required JournalInfoProto journalInfo = 1;
-  required uint64 txid = 2;
+  required JournalInfoProto journalInfo = 1; // Info about the journal
+  required uint64 txid = 2; // Transaction ID
+  required uint64 epoch = 3;
 }
 
 /**
@@ -70,6 +73,27 @@ message StartLogSegmentResponseProto { 
 }
 
 /**
+ * journalInfo - the information about the journal
+ * txid - first txid in the new log
+ */
+message FenceRequestProto {
+  required JournalInfoProto journalInfo = 1; // Info about the journal
+  required uint64 epoch = 2; // Epoch - change indicates change in writer
+  optional string fencerInfo = 3; // Info about fencer for debugging
+}
+
+/**
+ * previousEpoch - previous epoch if any or zero
+ * lastTransactionId - last valid transaction Id in the journal
+ * inSync - if all journal segments are available and in sync
+ */
+message FenceResponseProto {
+  optional uint64 previousEpoch = 1;
+  optional uint64 lastTransactionId = 2;
+  optional bool inSync = 3;
+}
+
+/**
  * Protocol used to journal edits to a remote node. Currently,
  * this is used to publish edits from the NameNode to a BackupNode.
  *
@@ -89,4 +113,10 @@ service JournalProtocolService {
    */
   rpc startLogSegment(StartLogSegmentRequestProto) 
       returns (StartLogSegmentResponseProto);
+  
+  /**
+   * Request to fence a journal receiver.
+   */
+  rpc fence(FenceRequestProto)
+      returns (FenceResponseProto);
 }

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1310141-1311517

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1310141-1311517

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1310141-1311517

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1310141-1311517

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java?rev=1311518&r1=1311517&r2=1311518&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java Mon Apr  9 23:25:17 2012
@@ -30,6 +30,7 @@ import java.net.URISyntaxException;
 import javax.security.auth.login.LoginException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
@@ -48,13 +49,13 @@ import static org.junit.Assert.*;
 public class TestViewFsFileStatusHdfs {
   
   static final String testfilename = "/tmp/testFileStatusSerialziation";
+  static final String someFile = "/hdfstmp/someFileForTestGetFileChecksum";
 
-  
-  
   private static MiniDFSCluster cluster;
   private static Path defaultWorkingDirectory;
   private static Configuration CONF = new Configuration();
   private static FileSystem fHdfs;
+  private static FileSystem vfs;
   
   @BeforeClass
   public static void clusterSetupAtBegining() throws IOException,
@@ -65,18 +66,19 @@ public class TestViewFsFileStatusHdfs {
     defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" + 
         UserGroupInformation.getCurrentUser().getShortUserName()));
     fHdfs.mkdirs(defaultWorkingDirectory);
+
+    // Setup the ViewFS to be used for all tests.
+    Configuration conf = ViewFileSystemTestSetup.createConfig();
+    ConfigUtil.addLink(conf, "/vfstmp", new URI(fHdfs.getUri() + "/hdfstmp"));
+    ConfigUtil.addLink(conf, "/tmp", new URI(fHdfs.getUri() + "/tmp"));
+    vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
+    assertEquals(ViewFileSystem.class, vfs.getClass());
   }
 
   @Test
   public void testFileStatusSerialziation()
       throws IOException, URISyntaxException {
-
    long len = FileSystemTestHelper.createFile(fHdfs, testfilename);
-
-    Configuration conf = ViewFileSystemTestSetup.createConfig();
-    ConfigUtil.addLink(conf, "/tmp", new URI(fHdfs.getUri().toString() + "/tmp"));
-    FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
-    assertEquals(ViewFileSystem.class, vfs.getClass());
     FileStatus stat = vfs.getFileStatus(new Path(testfilename));
     assertEquals(len, stat.getLen());
     // check serialization/deserialization
@@ -89,9 +91,34 @@ public class TestViewFsFileStatusHdfs {
     assertEquals(len, deSer.getLen());
   }
 
+  @Test
+  public void testGetFileChecksum() throws IOException, URISyntaxException {
+    // Create two different files in HDFS
+    FileSystemTestHelper.createFile(fHdfs, someFile);
+    FileSystemTestHelper.createFile(fHdfs, FileSystemTestHelper
+      .getTestRootPath(fHdfs, someFile + "other"), 1, 512);
+    // Get checksum through ViewFS
+    FileChecksum viewFSCheckSum = vfs.getFileChecksum(
+      new Path("/vfstmp/someFileForTestGetFileChecksum"));
+    // Get checksum through HDFS. 
+    FileChecksum hdfsCheckSum = fHdfs.getFileChecksum(
+      new Path(someFile));
+    // Get checksum of different file in HDFS
+    FileChecksum otherHdfsFileCheckSum = fHdfs.getFileChecksum(
+      new Path(someFile+"other"));
+    // Checksums of the same file (got through HDFS and ViewFS should be same)
+    assertEquals("HDFS and ViewFS checksums were not the same", viewFSCheckSum,
+      hdfsCheckSum);
+    // Checksum of different files should be different.
+    assertFalse("Some other HDFS file which should not have had the same " +
+      "checksum as viewFS did!", viewFSCheckSum.equals(otherHdfsFileCheckSum));
+  }
+
   @AfterClass
   public static void cleanup() throws IOException {
     fHdfs.delete(new Path(testfilename), true);
+    fHdfs.delete(new Path(someFile), true);
+    fHdfs.delete(new Path(someFile + "other"), true);
   }
 
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1311518&r1=1311517&r2=1311518&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Mon Apr  9 23:25:17 2012
@@ -581,6 +581,10 @@ public class MiniDFSCluster {
       }
     }
     
+    if (operation == StartupOption.RECOVER) {
+      return;
+    }
+
     // Start the DataNodes
     startDataNodes(conf, numDataNodes, manageDataDfsDirs, operation, racks,
         hosts, simulatedCapacities, setupHostsFile);
@@ -781,6 +785,9 @@ public class MiniDFSCluster {
                      operation == StartupOption.REGULAR) ?
       new String[] {} : new String[] {operation.getName()};
     NameNode nn =  NameNode.createNameNode(args, conf);
+    if (operation == StartupOption.RECOVER) {
+      return;
+    }
     
     // After the NN has started, set back the bound ports into
     // the conf
@@ -956,6 +963,9 @@ public class MiniDFSCluster {
                              long[] simulatedCapacities,
                              boolean setupHostsFile,
                              boolean checkDataNodeAddrConfig) throws IOException {
+    if (operation == StartupOption.RECOVER) {
+      return;
+    }
     conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
 
     int curDatanodesNum = dataNodes.size();

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java?rev=1311518&r1=1311517&r2=1311518&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java Mon Apr  9 23:25:17 2012
@@ -17,12 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import static org.junit.Assert.*;
 import java.io.File;
 import java.io.IOException;
 
-import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -36,13 +37,15 @@ import org.apache.hadoop.hdfs.protocol.E
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.junit.Test;
 
-public class TestOverReplicatedBlocks extends TestCase {
+public class TestOverReplicatedBlocks {
   /** Test processOverReplicatedBlock can handle corrupt replicas fine.
    * It make sure that it won't treat corrupt replicas as valid ones 
    * thus prevents NN deleting valid replicas but keeping
    * corrupt ones.
    */
+  @Test
   public void testProcesOverReplicateBlock() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
@@ -113,4 +116,30 @@ public class TestOverReplicatedBlocks ex
       cluster.shutdown();
     }
   }
+  /**
+   * Test over replicated block should get invalidated when decreasing the
+   * replication for a partial block.
+   */
+  @Test
+  public void testInvalidateOverReplicatedBlock() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
+        .build();
+    try {
+      final FSNamesystem namesystem = cluster.getNamesystem();
+      final BlockManager bm = namesystem.getBlockManager();
+      FileSystem fs = cluster.getFileSystem();
+      Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
+      FSDataOutputStream out = fs.create(p, (short) 2);
+      out.writeBytes("HDFS-3119: " + p);
+      out.hsync();
+      fs.setReplication(p, (short) 1);
+      out.close();
+      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
+      assertEquals("Expected only one live replica for the block", 1, bm
+          .countNodes(block.getLocalBlock()).liveReplicas());
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java?rev=1311518&r1=1311517&r2=1311518&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java Mon Apr  9 23:25:17 2012
@@ -17,8 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -172,6 +171,13 @@ public class TestInterDatanodeProtocol {
           b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
       idp.updateReplicaUnderRecovery(b, recoveryId, newblock.getNumBytes());
       checkMetaInfo(newblock, datanode);
+      
+      // Verify correct null response trying to init recovery for a missing block
+      ExtendedBlock badBlock = new ExtendedBlock("fake-pool",
+          b.getBlockId(), 0, 0);
+      assertNull(idp.initReplicaRecovery(
+          new RecoveringBlock(badBlock,
+              locatedblock.getLocations(), recoveryId)));
     }
     finally {
       if (cluster != null) {cluster.shutdown();}

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java?rev=1311518&r1=1311517&r2=1311518&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java Mon Apr  9 23:25:17 2012
@@ -20,12 +20,18 @@ package org.apache.hadoop.hdfs.server.jo
 import java.io.IOException;
 import java.net.InetSocketAddress;
 
+import junit.framework.Assert;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.FenceResponse;
+import org.apache.hadoop.hdfs.server.protocol.FencedException;
+import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -42,7 +48,7 @@ public class TestJournalService {
    * called.
    */
   @Test
-  public void testCallBacks() throws IOException {
+  public void testCallBacks() throws Exception {
     JournalListener listener = Mockito.mock(JournalListener.class);
     JournalService service = null;
     try {
@@ -51,6 +57,7 @@ public class TestJournalService {
       service = startJournalService(listener);
       verifyRollLogsCallback(service, listener);
       verifyJournalCallback(service, listener);
+      verifyFence(service, cluster.getNameNode(0));
     } finally {
       if (service != null) {
         service.stop();
@@ -93,4 +100,28 @@ public class TestJournalService {
     Mockito.verify(l, Mockito.atLeastOnce()).journal(Mockito.eq(s),
         Mockito.anyLong(), Mockito.anyInt(), (byte[]) Mockito.any());
   }
+  
+  public void verifyFence(JournalService s, NameNode nn) throws Exception {
+    String cid = nn.getNamesystem().getClusterId();
+    int nsId = nn.getNamesystem().getFSImage().getNamespaceID();
+    int lv = nn.getNamesystem().getFSImage().getLayoutVersion();
+    
+    // Fence the journal service
+    JournalInfo info = new JournalInfo(lv, cid, nsId);
+    long currentEpoch = s.getEpoch();
+    
+    // New epoch lower than the current epoch is rejected
+    try {
+      s.fence(info, (currentEpoch - 1), "fencer");
+    } catch (FencedException ignore) { /* Ignored */ } 
+    
+    // New epoch equal to the current epoch is rejected
+    try {
+      s.fence(info, currentEpoch, "fencer");
+    } catch (FencedException ignore) { /* Ignored */ } 
+    
+    // New epoch higher than the current epoch is successful
+    FenceResponse resp = s.fence(info, currentEpoch+1, "fencer");
+    Assert.assertNotNull(resp);
+  }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1311518&r1=1311517&r2=1311518&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Mon Apr  9 23:25:17 2012
@@ -179,8 +179,8 @@ public class TestEditLog extends TestCas
   }
   
   private long testLoad(byte[] data, FSNamesystem namesys) throws IOException {
-    FSEditLogLoader loader = new FSEditLogLoader(namesys);
-    return loader.loadFSEdits(new EditLogByteInputStream(data), 1);
+    FSEditLogLoader loader = new FSEditLogLoader(namesys, 0);
+    return loader.loadFSEdits(new EditLogByteInputStream(data), 1, null);
   }
 
   /**
@@ -315,7 +315,7 @@ public class TestEditLog extends TestCas
       //
       for (Iterator<StorageDirectory> it = 
               fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
-        FSEditLogLoader loader = new FSEditLogLoader(namesystem);
+        FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);
         
         File editFile = NNStorage.getFinalizedEditsFile(it.next(), 3,
             3 + expectedTxns - 1);
@@ -323,7 +323,7 @@ public class TestEditLog extends TestCas
         
         System.out.println("Verifying file: " + editFile);
         long numEdits = loader.loadFSEdits(
-            new EditLogFileInputStream(editFile), 3);
+            new EditLogFileInputStream(editFile), 3, null);
         int numLeases = namesystem.leaseManager.countLease();
         System.out.println("Number of outstanding leases " + numLeases);
         assertEquals(0, numLeases);
@@ -774,8 +774,8 @@ public class TestEditLog extends TestCas
     }
 
     @Override
-    public FSEditLogOp readOp() throws IOException {
-      return reader.readOp();
+    protected FSEditLogOp nextOp() throws IOException {
+      return reader.readOp(false);
     }
 
     @Override
@@ -788,16 +788,11 @@ public class TestEditLog extends TestCas
       input.close();
     }
 
-    @Override // JournalStream
+    @Override
     public String getName() {
       return "AnonEditLogByteInputStream";
     }
 
-    @Override // JournalStream
-    public JournalType getType() {
-      return JournalType.FILE;
-    }
-
     @Override
     public boolean isInProgress() {
       return true;

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java?rev=1311518&r1=1311517&r2=1311518&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java Mon Apr  9 23:25:17 2012
@@ -236,9 +236,9 @@ public class TestEditLogRace {
       File editFile = new File(sd.getCurrentDir(), logFileName);
         
       System.out.println("Verifying file: " + editFile);
-      FSEditLogLoader loader = new FSEditLogLoader(namesystem);
+      FSEditLogLoader loader = new FSEditLogLoader(namesystem, startTxId);
       long numEditsThisLog = loader.loadFSEdits(new EditLogFileInputStream(editFile), 
-          startTxId);
+          startTxId, null);
       
       System.out.println("Number of edits: " + numEditsThisLog);
       assertTrue(numEdits == -1 || numEditsThisLog == numEdits);

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java?rev=1311518&r1=1311517&r2=1311518&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java Mon Apr  9 23:25:17 2012
@@ -92,8 +92,8 @@ public class TestFSEditLogLoader {
     rwf.close();
     
     StringBuilder bld = new StringBuilder();
-    bld.append("^Error replaying edit log at offset \\d+");
-    bld.append(" on transaction ID \\d+\n");
+    bld.append("^Error replaying edit log at offset \\d+.  ");
+    bld.append("Expected transaction ID was \\d+\n");
     bld.append("Recent opcode offsets: (\\d+\\s*){4}$");
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java?rev=1311518&r1=1311517&r2=1311518&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java Mon Apr  9 23:25:17 2012
@@ -143,9 +143,9 @@ public class TestSecurityTokenEditLog ex
         File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 1 + expectedTransactions - 1);
         System.out.println("Verifying file: " + editFile);
         
-        FSEditLogLoader loader = new FSEditLogLoader(namesystem);        
+        FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);        
         long numEdits = loader.loadFSEdits(
-            new EditLogFileInputStream(editFile), 1);
+            new EditLogFileInputStream(editFile), 1, null);
         assertEquals("Verification for " + editFile, expectedTransactions, numEdits);
       }
     } finally {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java?rev=1311518&r1=1311517&r2=1311518&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java Mon Apr  9 23:25:17 2012
@@ -158,7 +158,7 @@ public class TestDFSHAAdmin {
   public void testFailoverWithFencerConfigured() throws Exception {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     HdfsConfiguration conf = getHAConf();
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
     assertEquals(0, runTool("-failover", "nn1", "nn2"));
   }
@@ -167,7 +167,7 @@ public class TestDFSHAAdmin {
   public void testFailoverWithFencerAndNameservice() throws Exception {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     HdfsConfiguration conf = getHAConf();
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
     assertEquals(0, runTool("-ns", "ns1", "-failover", "nn1", "nn2"));
   }
@@ -176,7 +176,7 @@ public class TestDFSHAAdmin {
   public void testFailoverWithFencerConfiguredAndForce() throws Exception {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     HdfsConfiguration conf = getHAConf();
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
     assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
   }
@@ -185,7 +185,7 @@ public class TestDFSHAAdmin {
   public void testFailoverWithForceActive() throws Exception {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     HdfsConfiguration conf = getHAConf();
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
     assertEquals(0, runTool("-failover", "nn1", "nn2", "--forceactive"));
   }
@@ -194,7 +194,7 @@ public class TestDFSHAAdmin {
   public void testFailoverWithInvalidFenceArg() throws Exception {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     HdfsConfiguration conf = getHAConf();
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
     assertEquals(-1, runTool("-failover", "nn1", "nn2", "notforcefence"));
   }
@@ -209,7 +209,7 @@ public class TestDFSHAAdmin {
   public void testFailoverWithFenceAndBadFencer() throws Exception {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     HdfsConfiguration conf = getHAConf();
-    conf.set(NodeFencer.CONF_METHODS_KEY, "foobar!");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "foobar!");
     tool.setConf(conf);
     assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
   }
@@ -218,7 +218,7 @@ public class TestDFSHAAdmin {
   public void testForceFenceOptionListedBeforeArgs() throws Exception {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     HdfsConfiguration conf = getHAConf();
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
     assertEquals(0, runTool("-failover", "--forcefence", "nn1", "nn2"));
   }
@@ -240,7 +240,41 @@ public class TestDFSHAAdmin {
     assertEquals(-1, runTool("-checkHealth", "nn1"));
     assertOutputContains("Health check failed: fake health check failure");
   }
+  
+  /**
+   * Test that the fencing configuration can be overridden per-nameservice
+   * or per-namenode
+   */
+  @Test
+  public void testFencingConfigPerNameNode() throws Exception {
+    Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
+
+    final String nsSpecificKey = DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY + "." + NSID;
+    final String nnSpecificKey = nsSpecificKey + ".nn1";
+    
+    HdfsConfiguration conf = getHAConf();
+    // Set the default fencer to succeed
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
+    tool.setConf(conf);
+    assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
+    
+    // Set the NN-specific fencer to fail. Should fail to fence.
+    conf.set(nnSpecificKey, "shell(false)");
+    tool.setConf(conf);
+    assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
+    conf.unset(nnSpecificKey);
 
+    // Set an NS-specific fencer to fail. Should fail.
+    conf.set(nsSpecificKey, "shell(false)");
+    tool.setConf(conf);
+    assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
+    
+    // Set the NS-specific fencer to succeed. Should succeed
+    conf.set(nsSpecificKey, "shell(true)");
+    tool.setConf(conf);
+    assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
+  }
+  
   private Object runTool(String ... args) throws Exception {
     errOutBytes.reset();
     LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args));

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java?rev=1311518&r1=1311517&r2=1311518&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java Mon Apr  9 23:25:17 2012
@@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -113,7 +114,7 @@ public class TestDFSHAAdminMiniCluster {
   
   @Test
   public void testTryFailoverToSafeMode() throws Exception {
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
 
     NameNodeAdapter.enterSafeMode(cluster.getNameNode(0), false);
@@ -135,7 +136,7 @@ public class TestDFSHAAdminMiniCluster {
     // tmp file, so we can verify that the args were substituted right
     File tmpFile = File.createTempFile("testFencer", ".txt");
     tmpFile.deleteOnExit();
-    conf.set(NodeFencer.CONF_METHODS_KEY,
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
         "shell(echo -n $target_nameserviceid.$target_namenodeid " +
         "$target_port $dfs_ha_namenode_id > " +
         tmpFile.getAbsolutePath() + ")");
@@ -168,19 +169,19 @@ public class TestDFSHAAdminMiniCluster {
 
           
     // Test failover with not fencer and forcefence option
-    conf.unset(NodeFencer.CONF_METHODS_KEY);
+    conf.unset(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY);
     tool.setConf(conf);
     assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
     assertFalse(tmpFile.exists());
 
     // Test failover with bad fencer and forcefence option
-    conf.set(NodeFencer.CONF_METHODS_KEY, "foobar!");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "foobar!");
     tool.setConf(conf);
     assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
     assertFalse(tmpFile.exists());
 
     // Test failover with force fence listed before the other arguments
-    conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
     tool.setConf(conf);
     assertEquals(0, runTool("-failover", "--forcefence", "nn1", "nn2"));
   }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java?rev=1311518&r1=1311517&r2=1311518&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java Mon Apr  9 23:25:17 2012
@@ -42,6 +42,8 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.Test;
 
+import com.google.common.base.Joiner;
+
 /**
  * Test for {@link GetConf}
  */
@@ -117,7 +119,12 @@ public class TestGetConf {
     PrintStream out = new PrintStream(o, true);
     try {
       int ret = ToolRunner.run(new GetConf(conf, out, out), args);
-      assertEquals(success, ret == 0);
+      out.flush();
+      System.err.println("Output: " + o.toString());
+      assertEquals("Expected " + (success?"success":"failure") +
+          " for args: " + Joiner.on(" ").join(args) + "\n" +
+          "Output: " + o.toString(),
+          success, ret == 0);
       return o.toString();
     } finally {
       o.close();
@@ -222,7 +229,9 @@ public class TestGetConf {
     getAddressListFromTool(TestType.SECONDARY, conf, false);
     getAddressListFromTool(TestType.NNRPCADDRESSES, conf, false);
     for (Command cmd : Command.values()) {
-      CommandHandler handler = Command.getHandler(cmd.getName());
+      String arg = cmd.getName();
+      CommandHandler handler = Command.getHandler(arg);
+      assertNotNull("missing handler: " + cmd, handler);
       if (handler.key != null) {
         // First test with configuration missing the required key
         String[] args = {handler.key};
@@ -319,18 +328,36 @@ public class TestGetConf {
     verifyAddresses(conf, TestType.SECONDARY, false, secondaryAddresses);
     verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses);
   }
+  
+  @Test
+  public void testGetSpecificKey() throws Exception {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set("mykey", " myval ");
+    String[] args = {"-confKey", "mykey"};
+    assertTrue(runTool(conf, args, true).equals("myval\n"));
+  }
+  
+  @Test
+  public void testExtraArgsThrowsError() throws Exception {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set("mykey", "myval");
+    String[] args = {"-namenodes", "unexpected-arg"};
+    assertTrue(runTool(conf, args, false).contains(
+        "Did not expect argument: unexpected-arg"));
+  }
 
   /**
    * Tests commands other than {@link Command#NAMENODE}, {@link Command#BACKUP},
    * {@link Command#SECONDARY} and {@link Command#NNRPCADDRESSES}
    */
+  @Test
   public void testTool() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration(false);
     for (Command cmd : Command.values()) {
       CommandHandler handler = Command.getHandler(cmd.getName());
-      if (handler.key != null) {
+      if (handler.key != null && !"-confKey".equals(cmd.getName())) {
         // Add the key to the conf and ensure tool returns the right value
-        String[] args = {handler.key};
+        String[] args = {cmd.getName()};
         conf.set(handler.key, "value");
         assertTrue(runTool(conf, args, true).contains("value"));
       }