You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cm...@apache.org on 2015/06/22 23:37:22 UTC

hadoop git commit: HDFS-8480. Fix performance and timeout issues in HDFS-7929 by using hard-links to preserve old edit logs, instead of copying them. (Zhe Zhang via Colin P. McCabe)

Repository: hadoop
Updated Branches:
  refs/heads/trunk 5590e914f -> 7b424f938


HDFS-8480. Fix performance and timeout issues in HDFS-7929 by using hard-links to preserve old edit logs, instead of copying them. (Zhe Zhang via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b424f93
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b424f93
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b424f93

Branch: refs/heads/trunk
Commit: 7b424f938c3c306795d574792b086d84e4f06425
Parents: 5590e91
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Mon Jun 22 14:37:10 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Mon Jun 22 14:37:10 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../hdfs/server/namenode/NNUpgradeUtil.java     | 18 +-----
 .../org/apache/hadoop/hdfs/TestDFSUpgrade.java  | 58 +++++++++++++++-----
 3 files changed, 48 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b424f93/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 234fcf7..b9d9943 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -971,6 +971,9 @@ Release 2.7.1 - UNRELEASED
     HDFS-7164. Feature documentation for HDFS-6581. (Arpit Agarwal)
 
   OPTIMIZATIONS
+    HDFS-8480. Fix performance and timeout issues in HDFS-7929 by using
+    hard-links to preserve old edit logs, instead of copying them. (Zhe Zhang
+    via Colin P. McCabe)
 
   BUG FIXES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b424f93/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
index ee651fd..1f10bc4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 import java.io.File;
 import java.io.FilenameFilter;
 import java.io.IOException;
+import java.nio.file.Files;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -127,23 +128,8 @@ public abstract class NNUpgradeUtil {
 
     for (String s : fileNameList) {
       File prevFile = new File(tmpDir, s);
-      Preconditions.checkState(prevFile.canRead(),
-          "Edits log file " + s + " is not readable.");
       File newFile = new File(curDir, prevFile.getName());
-      Preconditions.checkState(newFile.createNewFile(),
-          "Cannot create new edits log file in " + curDir);
-      EditLogFileInputStream in = new EditLogFileInputStream(prevFile);
-      EditLogFileOutputStream out =
-          new EditLogFileOutputStream(conf, newFile, 512*1024);
-      FSEditLogOp logOp = in.nextValidOp();
-      while (logOp != null) {
-        out.write(logOp);
-        logOp = in.nextOp();
-      }
-      out.setReadyToFlush();
-      out.flushAndSync(true);
-      out.close();
-      in.close();
+      Files.createLink(newFile.toPath(), prevFile.toPath());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b424f93/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
index 544537c..8cc47c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
@@ -30,13 +30,16 @@ import static org.junit.Assert.fail;
 import java.io.File;
 import java.io.FilenameFilter;
 import java.io.IOException;
-import java.util.LinkedList;
+import java.nio.file.Files;
 import java.util.List;
 import java.util.regex.Pattern;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.inotify.Event;
+import org.apache.hadoop.hdfs.inotify.EventBatch;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@@ -45,7 +48,11 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
@@ -54,6 +61,8 @@ import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 
+import static org.apache.hadoop.hdfs.inotify.Event.CreateEvent;
+
 import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
 
@@ -466,31 +475,50 @@ public class TestDFSUpgrade {
     log("Normal NameNode upgrade", 1);
     File[] created =
         UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
-    List<String> beforeUpgrade = new LinkedList<>();
     for (final File createdDir : created) {
       List<String> fileNameList =
           IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE);
-      beforeUpgrade.addAll(fileNameList);
+      for (String fileName : fileNameList) {
+        String tmpFileName = fileName + ".tmp";
+        File existingFile = new File(createdDir, fileName);
+        File tmpFile = new File(createdDir, tmpFileName);
+        Files.move(existingFile.toPath(), tmpFile.toPath());
+        File newFile = new File(createdDir, fileName);
+        Preconditions.checkState(newFile.createNewFile(),
+            "Cannot create new edits log file in " + createdDir);
+        EditLogFileInputStream in = new EditLogFileInputStream(tmpFile,
+            HdfsServerConstants.INVALID_TXID, HdfsServerConstants.INVALID_TXID,
+            false);
+        EditLogFileOutputStream out = new EditLogFileOutputStream(conf, newFile,
+            (int)tmpFile.length());
+        out.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION + 1);
+        FSEditLogOp logOp = in.readOp();
+        while (logOp != null) {
+          out.write(logOp);
+          logOp = in.readOp();
+        }
+        out.setReadyToFlush();
+        out.flushAndSync(true);
+        out.close();
+        Files.delete(tmpFile.toPath());
+      }
     }
 
     cluster = createCluster();
 
-    List<String> afterUpgrade = new LinkedList<>();
-    for (final File createdDir : created) {
-      List<String> fileNameList =
-          IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE);
-      afterUpgrade.addAll(fileNameList);
-    }
-
-    for (String s : beforeUpgrade) {
-      assertTrue(afterUpgrade.contains(s));
-    }
-
+    DFSInotifyEventInputStream ieis =
+        cluster.getFileSystem().getInotifyEventStream(0);
+    EventBatch batch = ieis.poll();
+    Event[] events = batch.getEvents();
+    assertTrue("Should be able to get transactions before the upgrade.",
+        events.length > 0);
+    assertEquals(events[0].getEventType(), Event.EventType.CREATE);
+    assertEquals(((CreateEvent) events[0]).getPath(), "/TestUpgrade");
     cluster.shutdown();
     UpgradeUtilities.createEmptyDirs(nameNodeDirs);
   }
 
-  private static enum EditLogsFilter implements FilenameFilter {
+  private enum EditLogsFilter implements FilenameFilter {
     INSTANCE;
 
     @Override