You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cn...@apache.org on 2013/07/19 00:24:21 UTC

svn commit: r1504694 - in /hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/tools/ src/test/java/org/apache/hadoop/hdfs/server/namenode/

Author: cnauroth
Date: Thu Jul 18 22:24:20 2013
New Revision: 1504694

URL: http://svn.apache.org/r1504694
Log:
HDFS-4996. Merging change r1504686 from branch-2.1-beta to branch-2.1.0-beta.

Modified:
    hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
    hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java

Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1504694&r1=1504693&r2=1504694&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Jul 18 22:24:20 2013
@@ -191,6 +191,9 @@ Release 2.1.0-beta - 2013-07-02
     HDFS-4992. Make balancer's mover thread count and dispatcher thread count
     configurable.  (Max Lapan via szetszwo)
 
+    HDFS-4996. ClientProtocol#metaSave can be made idempotent by overwriting the
+    output file instead of appending to it. (cnauroth)
+
   OPTIMIZATIONS
 
   BUG FIXES

Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1504694&r1=1504693&r2=1504694&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Jul 18 22:24:20 2013
@@ -1180,7 +1180,7 @@ public class FSNamesystem implements Nam
       checkOperation(OperationCategory.UNCHECKED);
       File file = new File(System.getProperty("hadoop.log.dir"), filename);
       PrintWriter out = new PrintWriter(new BufferedWriter(
-          new OutputStreamWriter(new FileOutputStream(file, true), Charsets.UTF_8)));
+          new OutputStreamWriter(new FileOutputStream(file), Charsets.UTF_8)));
       metaSave(out);
       out.flush();
       out.close();

Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1504694&r1=1504693&r2=1504694&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Thu Jul 18 22:24:20 2013
@@ -619,6 +619,7 @@ public class DFSAdmin extends FsShell {
 
     String metaSave = "-metasave <filename>: \tSave Namenode's primary data structures\n" +
       "\t\tto <filename> in the directory specified by hadoop.log.dir property.\n" +
+      "\t\t<filename> is overwritten if it exists.\n" +
       "\t\t<filename> will contain one line for each of the following\n" +
       "\t\t\t1. Datanodes heart beating with Namenode\n" +
       "\t\t\t2. Blocks waiting to be replicated\n" +

Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java?rev=1504694&r1=1504693&r2=1504694&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java Thu Jul 18 22:24:20 2013
@@ -18,9 +18,11 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.BufferedReader;
 import java.io.DataInputStream;
+import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
@@ -31,6 +33,7 @@ import org.apache.hadoop.fs.CommonConfig
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -47,6 +50,7 @@ public class TestMetaSave {
   static final int blockSize = 8192;
   private static MiniDFSCluster cluster = null;
   private static FileSystem fileSys = null;
+  private static FSNamesystem namesystem = null;
 
   private void createFile(FileSystem fileSys, Path name) throws IOException {
     FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
@@ -72,6 +76,7 @@ public class TestMetaSave {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
     cluster.waitActive();
     fileSys = cluster.getFileSystem();
+    namesystem = cluster.getNamesystem();
   }
 
   /**
@@ -79,9 +84,6 @@ public class TestMetaSave {
    */
   @Test
   public void testMetaSave() throws IOException, InterruptedException {
-
-    final FSNamesystem namesystem = cluster.getNamesystem();
-
     for (int i = 0; i < 2; i++) {
       Path file = new Path("/filestatus" + i);
       createFile(fileSys, file);
@@ -95,9 +97,8 @@ public class TestMetaSave {
     namesystem.metaSave("metasave.out.txt");
 
     // Verification
-    String logFile = System.getProperty("hadoop.log.dir") + "/"
-        + "metasave.out.txt";
-    FileInputStream fstream = new FileInputStream(logFile);
+    FileInputStream fstream = new FileInputStream(getLogFile(
+      "metasave.out.txt"));
     DataInputStream in = new DataInputStream(fstream);
     BufferedReader reader = null;
     try {
@@ -124,9 +125,6 @@ public class TestMetaSave {
   @Test
   public void testMetasaveAfterDelete()
       throws IOException, InterruptedException {
-
-    final FSNamesystem namesystem = cluster.getNamesystem();
-
     for (int i = 0; i < 2; i++) {
       Path file = new Path("/filestatus" + i);
       createFile(fileSys, file);
@@ -142,11 +140,10 @@ public class TestMetaSave {
     namesystem.metaSave("metasaveAfterDelete.out.txt");
 
     // Verification
-    String logFile = System.getProperty("hadoop.log.dir") + "/"
-        + "metasaveAfterDelete.out.txt";
     BufferedReader reader = null;
     try {
-      FileInputStream fstream = new FileInputStream(logFile);
+      FileInputStream fstream = new FileInputStream(getLogFile(
+        "metasaveAfterDelete.out.txt"));
       DataInputStream in = new DataInputStream(fstream);
       reader = new BufferedReader(new InputStreamReader(in));
       reader.readLine();
@@ -166,6 +163,42 @@ public class TestMetaSave {
     }
   }
 
+  /**
+   * Tests that metasave overwrites the output file (not append).
+   */
+  @Test
+  public void testMetaSaveOverwrite() throws Exception {
+    // metaSave twice.
+    namesystem.metaSave("metaSaveOverwrite.out.txt");
+    namesystem.metaSave("metaSaveOverwrite.out.txt");
+
+    // Read output file.
+    FileInputStream fis = null;
+    InputStreamReader isr = null;
+    BufferedReader rdr = null;
+    try {
+      fis = new FileInputStream(getLogFile("metaSaveOverwrite.out.txt"));
+      isr = new InputStreamReader(fis);
+      rdr = new BufferedReader(isr);
+
+      // Validate that file was overwritten (not appended) by checking for
+      // presence of only one "Live Datanodes" line.
+      boolean foundLiveDatanodesLine = false;
+      String line = rdr.readLine();
+      while (line != null) {
+        if (line.startsWith("Live Datanodes")) {
+          if (foundLiveDatanodesLine) {
+            fail("multiple Live Datanodes lines, output file not overwritten");
+          }
+          foundLiveDatanodesLine = true;
+        }
+        line = rdr.readLine();
+      }
+    } finally {
+      IOUtils.cleanup(null, rdr, isr, fis);
+    }
+  }
+
   @AfterClass
   public static void tearDown() throws IOException {
     if (fileSys != null)
@@ -173,4 +206,14 @@ public class TestMetaSave {
     if (cluster != null)
       cluster.shutdown();
   }
+
+  /**
+   * Returns a File for the given name inside the log directory.
+   * 
+   * @param name String file name
+   * @return File for given name inside log directory
+   */
+  private static File getLogFile(String name) {
+    return new File(System.getProperty("hadoop.log.dir"), name);
+  }
 }