You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sh...@apache.org on 2013/01/01 04:39:31 UTC

svn commit: r1427290 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java

Author: shv
Date: Tue Jan  1 03:39:31 2013
New Revision: 1427290

URL: http://svn.apache.org/viewvc?rev=1427290&view=rev
Log:
HDFS-4349. Add test for reading files from BackupNode. Contributed by Konstantin Shvachko.

Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1427290&r1=1427289&r2=1427290&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Jan  1 03:39:31 2013
@@ -635,6 +635,8 @@ Release 2.0.3-alpha - Unreleased 
     HDFS-4347. Avoid infinite waiting checkpoint to complete in TestBackupNode.
     (Plamen Jeliazkov via shv)
 
+    HDFS-4349. Add test for reading files from BackupNode. (shv)
+
   BREAKDOWN OF HDFS-3077 SUBTASKS
 
     HDFS-3077. Quorum-based protocol for reading and writing edit logs.

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java?rev=1427290&r1=1427289&r2=1427290&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java Tue Jan  1 03:39:31 2013
@@ -21,6 +21,8 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.Arrays;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.server.common.Storage;
@@ -41,6 +43,7 @@ import org.apache.hadoop.security.UserGr
  *  int, int, byte[])
  */
 class EditLogBackupOutputStream extends EditLogOutputStream {
+  private static Log LOG = LogFactory.getLog(EditLogFileOutputStream.class);
   static int DEFAULT_BUFFER_SIZE = 256;
 
   private final JournalProtocol backupNode;  // RPC proxy to backup node
@@ -117,6 +120,11 @@ class EditLogBackupOutputStream extends 
   protected void flushAndSync(boolean durable) throws IOException {
     assert out.getLength() == 0 : "Output buffer is not empty";
     
+    if (doubleBuf.isFlushed()) {
+      LOG.info("Nothing to flush");
+      return;
+    }
+
     int numReadyTxns = doubleBuf.countReadyTxns();
     long firstTxToFlush = doubleBuf.getFirstReadyTxId();
     

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1427290&r1=1427289&r2=1427290&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Tue Jan  1 03:39:31 2013
@@ -417,11 +417,65 @@ public class TestBackupNode {
       // verify that file2 exists
       assertTrue(fileSys.exists(file2));
     } catch(IOException e) {
-      LOG.error("Error in TestBackupNode:", e);
+      LOG.error("Error in TestBackupNode: ", e);
       assertTrue(e.getLocalizedMessage(), false);
     } finally {
       fileSys.close();
       cluster.shutdown();
     }
   }
+
+  /**
+   * Verify that a file can be read both from NameNode and BackupNode.
+   */
+  @Test
+  public void testCanReadData() throws IOException {
+    Path file1 = new Path("/fileToRead.dat");
+    Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true);
+    MiniDFSCluster cluster = null;
+    FileSystem fileSys = null;
+    BackupNode backup = null;
+    try {
+      // Start NameNode and BackupNode
+      cluster = new MiniDFSCluster.Builder(conf)
+                                  .numDataNodes(0).format(true).build();
+      fileSys = cluster.getFileSystem();
+      long txid = cluster.getNameNodeRpc().getTransactionID();
+      backup = startBackupNode(conf, StartupOption.BACKUP, 1);
+      waitCheckpointDone(cluster, txid);
+
+      // Setup dual NameNode configuration for DataNodes
+      String rpcAddrKeyPreffix =
+          DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + ".bnCluster";
+      String nnAddr = cluster.getNameNode().getNameNodeAddressHostPortString();
+          conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+      String bnAddr = backup.getNameNodeAddressHostPortString();
+      conf.set(DFSConfigKeys.DFS_NAMESERVICES, "bnCluster");
+      conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, "bnCluster");
+      conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + ".bnCluster",
+          "nnActive, nnBackup");
+      conf.set(rpcAddrKeyPreffix + ".nnActive", nnAddr);
+      conf.set(rpcAddrKeyPreffix + ".nnBackup", bnAddr);
+      cluster.startDataNodes(conf, 3, true, StartupOption.REGULAR, null);
+
+      DFSTestUtil.createFile(
+          fileSys, file1, fileSize, fileSize, blockSize, (short)3, seed);
+
+      // Read the same file from file systems pointing to NN and BN
+      FileSystem bnFS = FileSystem.get(
+          new Path("hdfs://" + bnAddr).toUri(), conf);
+      String nnData = DFSTestUtil.readFile(fileSys, file1);
+      String bnData = DFSTestUtil.readFile(bnFS, file1);
+      assertEquals("Data read from BackupNode and NameNode is not the same.",
+          nnData, bnData);
+    } catch(IOException e) {
+      LOG.error("Error in TestBackupNode: ", e);
+      assertTrue(e.getLocalizedMessage(), false);
+    } finally {
+      if(fileSys != null) fileSys.close();
+      if(backup != null) backup.stop();
+      if(cluster != null) cluster.shutdown();
+    }
+  }
 }