You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ju...@apache.org on 2017/12/05 01:04:08 UTC

[1/2] hadoop git commit: HDFS-12832. INode.getFullPathName may throw ArrayIndexOutOfBoundsException lead to NameNode exit. Contribuited by Konstantin Shvachko.

Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.3 f92fea391 -> b3fe56402


HDFS-12832. INode.getFullPathName may throw ArrayIndexOutOfBoundsException lead to NameNode exit. Contribuited by Konstantin Shvachko.

(cherry picked from commit d331762f24b3f22f609366740c9c4f449edc61ac)
(cherry picked from commit 3219b1bdf6d6a8a86ed1db1491df63a4748ad6de)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a32ae95b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a32ae95b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a32ae95b

Branch: refs/heads/branch-2.8.3
Commit: a32ae95b0959b6536690e0510daf7ac10b230b35
Parents: f92fea3
Author: Konstantin V Shvachko <sh...@apache.org>
Authored: Tue Nov 28 17:14:23 2017 -0800
Committer: Junping Du <ju...@apache.org>
Committed: Mon Dec 4 17:01:02 2017 -0800

----------------------------------------------------------------------
 .../hdfs/server/blockmanagement/BlockManager.java |  2 --
 .../server/blockmanagement/ReplicationWork.java   | 18 ++++++++++++++----
 2 files changed, 14 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a32ae95b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 3cc66bf..e2bdfcb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1527,8 +1527,6 @@ public class BlockManager implements BlockStatsMXBean {
       }
 
       // choose replication targets: NOT HOLDING THE GLOBAL LOCK
-      // It is costly to extract the filename for which chooseTargets is called,
-      // so for now we pass in the block collection itself.
       rw.chooseTargets(blockplacement, storagePolicySuite, excludedNodes);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a32ae95b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
index 258dfdd..8362096 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
@@ -25,7 +25,8 @@ import java.util.Set;
 
 class ReplicationWork {
   private final BlockInfo block;
-  private final BlockCollection bc;
+  private final String srcPath;
+  private final byte storagePolicyID;
   private final DatanodeDescriptor srcNode;
   private final int additionalReplRequired;
   private final int priority;
@@ -38,7 +39,8 @@ class ReplicationWork {
       List<DatanodeStorageInfo> liveReplicaStorages, int additionalReplRequired,
       int priority) {
     this.block = block;
-    this.bc = bc;
+    this.srcPath = bc.getName();
+    this.storagePolicyID = bc.getStoragePolicyID();
     this.srcNode = srcNode;
     this.srcNode.incrementPendingReplicationWithoutTargets();
     this.containingNodes = containingNodes;
@@ -52,10 +54,10 @@ class ReplicationWork {
       BlockStoragePolicySuite storagePolicySuite,
       Set<Node> excludedNodes) {
     try {
-      targets = blockplacement.chooseTarget(bc.getName(),
+      targets = blockplacement.chooseTarget(getSrcPath(),
           additionalReplRequired, srcNode, liveReplicaStorages, false,
           excludedNodes, block.getNumBytes(),
-          storagePolicySuite.getPolicy(bc.getStoragePolicyID()), null);
+          storagePolicySuite.getPolicy(getStoragePolicyID()), null);
     } finally {
       srcNode.decrementPendingReplicationWithoutTargets();
     }
@@ -84,4 +86,12 @@ class ReplicationWork {
   public DatanodeDescriptor getSrcNode() {
     return srcNode;
   }
+
+  public String getSrcPath() {
+    return srcPath;
+  }
+
+  public byte getStoragePolicyID() {
+    return storagePolicyID;
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[2/2] hadoop git commit: HDFS-12638. Delete copy-on-truncate block along with the original block, when deleting a file being truncated. Contributed by Konstantin Shvachko.

Posted by ju...@apache.org.
HDFS-12638. Delete copy-on-truncate block along with the original block, when deleting a file being truncated. Contributed by Konstantin Shvachko.

(cherry picked from commit 60fd0d7fd73198fd610e59d1a4cd007c5fcc7205)
(cherry picked from commit 19c18f7cc90c8243af634600c4c69ec1109fcce6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3fe5640
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3fe5640
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3fe5640

Branch: refs/heads/branch-2.8.3
Commit: b3fe56402d908019d99af1f1f4fc65cb1d1436a2
Parents: a32ae95
Author: Konstantin V Shvachko <sh...@apache.org>
Authored: Thu Nov 30 18:18:09 2017 -0800
Committer: Junping Du <ju...@apache.org>
Committed: Mon Dec 4 17:02:47 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hdfs/server/namenode/INode.java      | 14 +++++++
 .../hdfs/server/namenode/TestFileTruncate.java  | 41 ++++++++++++++++++++
 2 files changed, 55 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3fe5640/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 779ae13..838497c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -33,9 +33,11 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
@@ -1051,6 +1053,18 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
       assert toDelete != null : "toDelete is null";
       toDelete.delete();
       toDeleteList.add(toDelete);
+      // If the file is being truncated
+      // the copy-on-truncate block should also be collected for deletion
+      BlockUnderConstructionFeature uc = toDelete.getUnderConstructionFeature();
+      if(uc == null) {
+        return;
+      }
+      Block truncateBlock = uc.getTruncateBlock();
+      if(truncateBlock == null || truncateBlock.equals(toDelete)) {
+        return;
+      }
+      assert truncateBlock instanceof BlockInfo : "should be BlockInfo";
+      addDeleteBlock((BlockInfo) truncateBlock);
     }
 
     public void addUpdateReplicationFactor(BlockInfo block, short targetRepl) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3fe5640/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index f97939a..292eefd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -1153,6 +1154,46 @@ public class TestFileTruncate {
     fs.delete(parent, true);
   }
 
+  /**
+   * While rolling upgrade is in-progress the test truncates a file
+   * such that copy-on-truncate is triggered, then deletes the file,
+   * and makes sure that no blocks involved in truncate are hanging around.
+   */
+  @Test
+  public void testTruncateWithRollingUpgrade() throws Exception {
+    final DFSAdmin dfsadmin = new DFSAdmin(cluster.getConfiguration(0));
+    DistributedFileSystem dfs = cluster.getFileSystem();
+    //start rolling upgrade
+    dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    int status = dfsadmin.run(new String[]{"-rollingUpgrade", "prepare"});
+    assertEquals("could not prepare for rolling upgrade", 0, status);
+    dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+    Path dir = new Path("/testTruncateWithRollingUpgrade");
+    fs.mkdirs(dir);
+    final Path p = new Path(dir, "file");
+    final byte[] data = new byte[3];
+    ThreadLocalRandom.current().nextBytes(data);
+    writeContents(data, data.length, p);
+
+    assertEquals("block num should 1", 1,
+        cluster.getNamesystem().getFSDirectory().getBlockManager()
+            .getTotalBlocks());
+
+    final boolean isReady = fs.truncate(p, 2);
+    assertFalse("should be copy-on-truncate", isReady);
+    assertEquals("block num should 2", 2,
+        cluster.getNamesystem().getFSDirectory().getBlockManager()
+            .getTotalBlocks());
+    fs.delete(p, true);
+
+    assertEquals("block num should 0", 0,
+        cluster.getNamesystem().getFSDirectory().getBlockManager()
+            .getTotalBlocks());
+    status = dfsadmin.run(new String[]{"-rollingUpgrade", "finalize"});
+    assertEquals("could not finalize rolling upgrade", 0, status);
+  }
+
   static void writeContents(byte[] contents, int fileLength, Path p)
       throws IOException {
     FSDataOutputStream out = fs.create(p, true, BLOCK_SIZE, REPLICATION,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org