You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by br...@apache.org on 2018/09/26 18:32:27 UTC

hadoop git commit: HDFS-13840. RBW Blocks which are having less GS should be added to Corrupt. Contributed by Brahma Reddy Battula

Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 85e00477b -> b28dacf60


HDFS-13840. RBW Blocks which are having less GS should be added to Corrupt. Contributed by Brahma Reddy Battula

(cherry picked from commit e0ff8e2c10f38298b81624df1e62743ac171f18d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b28dacf6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b28dacf6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b28dacf6

Branch: refs/heads/branch-3.0
Commit: b28dacf60ca98350f78d7b950d172da78a44991a
Parents: 85e0047
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Wed Sep 26 23:44:16 2018 +0530
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Wed Sep 26 23:55:02 2018 +0530

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    | 19 ++++++++--
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |  5 +++
 .../TestRBWBlockInvalidation.java               | 38 ++++++++++++++++++++
 .../server/namenode/TestAddStripedBlocks.java   |  4 +--
 4 files changed, 61 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28dacf6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 6f527ef..8884940 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3021,10 +3021,20 @@ public class BlockManager implements BlockStatsMXBean {
       }
     case RBW:
     case RWR:
+      final long reportedGS = reported.getGenerationStamp();
       if (!storedBlock.isComplete()) {
+        //When DN report lesser GS than the storedBlock then mark it is corrupt,
+        //As already valid replica will be present.
+        if (storedBlock.getGenerationStamp() > reported.getGenerationStamp()) {
+          return new BlockToMarkCorrupt(new Block(reported), storedBlock,
+              reportedGS,
+              "reported " + reportedState + " replica with genstamp "
+                  + reportedGS
+                  + " does not match Stored block's genstamp in block map "
+                  + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
+        }
         return null; // not corrupt
       } else if (storedBlock.getGenerationStamp() != reported.getGenerationStamp()) {
-        final long reportedGS = reported.getGenerationStamp();
         return new BlockToMarkCorrupt(new Block(reported), storedBlock, reportedGS,
             "reported " + reportedState + " replica with genstamp " + reportedGS
             + " does not match COMPLETE block's genstamp in block map "
@@ -3084,8 +3094,11 @@ public class BlockManager implements BlockStatsMXBean {
     block.getUnderConstructionFeature().addReplicaIfNotPresent(
         storageInfo, ucBlock.reportedBlock, ucBlock.reportedState);
 
-    if (ucBlock.reportedState == ReplicaState.FINALIZED &&
-        (block.findStorageInfo(storageInfo) < 0)) {
+    // Add replica if appropriate. If the replica was previously corrupt
+    // but now okay, it might need to be updated.
+    if (ucBlock.reportedState == ReplicaState.FINALIZED && (
+        block.findStorageInfo(storageInfo) < 0) || corruptReplicas
+        .isReplicaCorrupt(block, storageInfo.getDatanodeDescriptor())) {
       addStoredBlock(block, ucBlock.reportedBlock, storageInfo, null, true);
     }
   } 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28dacf6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 6fa9a4f..7741f76 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -556,6 +556,11 @@ public class MiniDFSCluster implements AutoCloseable {
     public void setDnArgs(String ... args) {
       dnArgs = args;
     }
+
+    public DataNode getDatanode() {
+      return datanode;
+    }
+
   }
 
   private Configuration conf;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28dacf6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
index cb2ee9c..c1170eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -27,6 +28,7 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -244,6 +246,42 @@ public class TestRBWBlockInvalidation {
 
   }
 
+  @Test
+  public void testRWRShouldNotAddedOnDNRestart() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.set("dfs.client.block.write.replace-datanode-on-failure.enable",
+        "false");
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(2).build()) {
+      Path path = new Path("/testRBW");
+      FSDataOutputStream out = cluster.getFileSystem().create(path, (short) 2);
+      out.writeBytes("old gs data\n");
+      out.hflush();
+      // stop one datanode
+      DataNodeProperties dnProp = cluster.stopDataNode(0);
+      String dnAddress = dnProp.getDatanode().getXferAddress().toString();
+      if (dnAddress.startsWith("/")) {
+        dnAddress = dnAddress.substring(1);
+      }
+      //Write some more data after DN stopped.
+      out.writeBytes("old gs data\n");
+      out.hflush();
+      cluster.restartDataNode(dnProp, true);
+      // wait till the block report comes
+      Thread.sleep(3000);
+      // check the block locations, this should not contain restarted datanode
+      BlockLocation[] locations = cluster.getFileSystem()
+          .getFileBlockLocations(path, 0, Long.MAX_VALUE);
+      String[] names = locations[0].getNames();
+      for (String node : names) {
+        if (node.equals(dnAddress)) {
+          fail("Old GS DN should not be present in latest block locations.");
+        }
+      }
+      out.close();
+    }
+  }
+
   private void waitForNumTotalBlocks(final MiniDFSCluster cluster,
       final int numTotalBlocks) throws Exception {
     GenericTestUtils.waitFor(new Supplier<Boolean>() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28dacf6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
index ec13b44..9374c04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
@@ -337,8 +337,8 @@ public class TestAddStripedBlocks {
     int i = groupSize - 1;
     for (DataNode dn : cluster.getDataNodes()) {
       String storageID = storageIDs.get(i);
-      final Block block = new Block(lastBlock.getBlockId() + i--,
-          lastBlock.getGenerationStamp(), 0);
+      final Block block = new Block(lastBlock.getBlockId() + i--, 0,
+          lastBlock.getGenerationStamp());
       DatanodeStorage storage = new DatanodeStorage(storageID);
       List<ReplicaBeingWritten> blocks = new ArrayList<>();
       ReplicaBeingWritten replica = new ReplicaBeingWritten(block, null, null,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org