You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zh...@apache.org on 2015/03/02 18:15:29 UTC

[13/50] [abbrv] hadoop git commit: HDFS-7843. A truncated file is corrupted after rollback from a rolling upgrade.

HDFS-7843. A truncated file is corrupted after rollback from a rolling upgrade.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/746bc377
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/746bc377
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/746bc377

Branch: refs/heads/HDFS-7285
Commit: 746bc377b7252d344b5d83f3ae862b717085db4d
Parents: 7911e1d
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Thu Feb 26 10:14:40 2015 +0800
Committer: Zhe Zhang <zh...@cloudera.com>
Committed: Mon Mar 2 09:13:51 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hdfs/server/namenode/FSNamesystem.java      |  3 ++
 .../apache/hadoop/hdfs/TestRollingUpgrade.java  | 48 ++++++++++++++------
 3 files changed, 40 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/746bc377/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e0f9267..f8b0c37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1026,6 +1026,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7763. fix zkfc hung issue due to not catching exception in a corner
     case. (Liang Xie via wang)
 
+    HDFS-7843. A truncated file is corrupted after rollback from a rolling
+    upgrade.  (szetszwo)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/746bc377/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index deaf90c..63ffd81 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2089,6 +2089,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     if(!isUpgradeFinalized()) {
       return true;
     }
+    if (isRollingUpgrade()) {
+      return true;
+    }
     return file.isBlockInLatestSnapshot(blk);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/746bc377/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
index 8e7b4b1..9746049 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
@@ -23,9 +23,11 @@ import java.io.IOException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
@@ -36,6 +38,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode;
+import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
 import org.junit.Assert;
@@ -260,42 +263,50 @@ public class TestRollingUpgrade {
     final Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
     try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
       cluster.waitActive();
 
       final Path foo = new Path("/foo");
       final Path bar = new Path("/bar");
       cluster.getFileSystem().mkdirs(foo);
 
-      startRollingUpgrade(foo, bar, cluster);
+      final Path file = new Path(foo, "file");
+      final byte[] data = new byte[1024];
+      DFSUtil.getRandom().nextBytes(data);
+      final FSDataOutputStream out = cluster.getFileSystem().create(file);
+      out.write(data, 0, data.length);
+      out.close();
+
+      startRollingUpgrade(foo, bar, file, data, cluster);
       cluster.getFileSystem().rollEdits();
       cluster.getFileSystem().rollEdits();
-      rollbackRollingUpgrade(foo, bar, cluster);
+      rollbackRollingUpgrade(foo, bar, file, data, cluster);
 
-      startRollingUpgrade(foo, bar, cluster);
+      startRollingUpgrade(foo, bar, file, data, cluster);
       cluster.getFileSystem().rollEdits();
       cluster.getFileSystem().rollEdits();
-      rollbackRollingUpgrade(foo, bar, cluster);
+      rollbackRollingUpgrade(foo, bar, file, data, cluster);
 
-      startRollingUpgrade(foo, bar, cluster);
+      startRollingUpgrade(foo, bar, file, data, cluster);
       cluster.restartNameNode();
-      rollbackRollingUpgrade(foo, bar, cluster);
+      rollbackRollingUpgrade(foo, bar, file, data, cluster);
 
-      startRollingUpgrade(foo, bar, cluster);
+      startRollingUpgrade(foo, bar, file, data, cluster);
       cluster.restartNameNode();
-      rollbackRollingUpgrade(foo, bar, cluster);
+      rollbackRollingUpgrade(foo, bar, file, data, cluster);
 
-      startRollingUpgrade(foo, bar, cluster);
-      rollbackRollingUpgrade(foo, bar, cluster);
+      startRollingUpgrade(foo, bar, file, data, cluster);
+      rollbackRollingUpgrade(foo, bar, file, data, cluster);
 
-      startRollingUpgrade(foo, bar, cluster);
-      rollbackRollingUpgrade(foo, bar, cluster);
+      startRollingUpgrade(foo, bar, file, data, cluster);
+      rollbackRollingUpgrade(foo, bar, file, data, cluster);
     } finally {
       if(cluster != null) cluster.shutdown();
     }
   }
   
   private static void startRollingUpgrade(Path foo, Path bar,
+      Path file, byte[] data,
       MiniDFSCluster cluster) throws IOException {
     final DistributedFileSystem dfs = cluster.getFileSystem();
 
@@ -305,18 +316,27 @@ public class TestRollingUpgrade {
     dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
 
     dfs.mkdirs(bar);
-    
     Assert.assertTrue(dfs.exists(foo));
     Assert.assertTrue(dfs.exists(bar));
+
+    //truncate a file
+    final int newLength = DFSUtil.getRandom().nextInt(data.length - 1) + 1;
+    dfs.truncate(file, newLength);
+    TestFileTruncate.checkBlockRecovery(file, dfs);
+    AppendTestUtil.checkFullFile(dfs, file, newLength, data);
   }
   
   private static void rollbackRollingUpgrade(Path foo, Path bar,
+      Path file, byte[] data,
       MiniDFSCluster cluster) throws IOException {
+    final DataNodeProperties dnprop = cluster.stopDataNode(0);
     cluster.restartNameNode("-rollingUpgrade", "rollback");
+    cluster.restartDataNode(dnprop, true);
 
     final DistributedFileSystem dfs = cluster.getFileSystem();
     Assert.assertTrue(dfs.exists(foo));
     Assert.assertFalse(dfs.exists(bar));
+    AppendTestUtil.checkFullFile(dfs, file, data.length, data);
   }
 
   @Test