You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ki...@apache.org on 2017/08/25 19:50:19 UTC
hadoop git commit: HDFS-12299. Race Between update pipeline and DN
Re-Registration
Repository: hadoop
Updated Branches:
refs/heads/trunk 4b2c442d4 -> 8455d7075
HDFS-12299. Race Between update pipeline and DN Re-Registration
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8455d707
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8455d707
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8455d707
Branch: refs/heads/trunk
Commit: 8455d70756b584ddf27fc626a147f4eb2e1dc94e
Parents: 4b2c442
Author: Kihwal Lee <ki...@apache.org>
Authored: Fri Aug 25 14:49:29 2017 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Fri Aug 25 14:49:29 2017 -0500
----------------------------------------------------------------------
.../org/apache/hadoop/hdfs/DataStreamer.java | 3 +-
.../TestClientProtocolForPipelineRecovery.java | 47 ++++++++++++++++++++
2 files changed, 49 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8455d707/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index f5ce0ff..838da7e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -1620,7 +1620,8 @@ class DataStreamer extends Daemon {
}
/** update pipeline at the namenode */
- private void updatePipeline(long newGS) throws IOException {
+ @VisibleForTesting
+ public void updatePipeline(long newGS) throws IOException {
final ExtendedBlock oldBlock = block.getCurrentBlock();
// the new GS has been propagated to all DN, it should be ok to update the
// local block state
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8455d707/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
index 0212c4e..3f8c7f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
@@ -38,6 +38,9 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
@@ -706,4 +709,48 @@ public class TestClientProtocolForPipelineRecovery {
cluster.shutdown();
}
}
+
+ @Test
+ public void testUpdatePipeLineAfterDNReg()throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+ cluster.waitActive();
+ FileSystem fileSys = cluster.getFileSystem();
+
+ Path file = new Path("/testUpdatePipeLineAfterDNReg");
+ FSDataOutputStream out = fileSys.create(file);
+ out.write(1);
+ out.hflush();
+ //Get the First DN and disable the heartbeats and then put in Deadstate
+ DataNode dn1 = cluster.getDataNodes().get(0);
+ dn1.setHeartbeatsDisabledForTests(true);
+ DatanodeDescriptor dn1Desc = cluster.getNamesystem(0).getBlockManager()
+ .getDatanodeManager().getDatanode(dn1.getDatanodeId());
+ cluster.setDataNodeDead(dn1Desc);
+ //Re-register the DeadNode
+ DatanodeProtocolClientSideTranslatorPB dnp = new DatanodeProtocolClientSideTranslatorPB(
+ cluster.getNameNode().getNameNodeAddress(), conf);
+ dnp.registerDatanode(
+ dn1.getDNRegistrationForBP(cluster.getNamesystem().getBlockPoolId()));
+ DFSOutputStream dfsO = (DFSOutputStream) out.getWrappedStream();
+ String clientName = ((DistributedFileSystem) fileSys).getClient()
+ .getClientName();
+ NamenodeProtocols namenode = cluster.getNameNodeRpc();
+ //Update the genstamp and call updatepipeline
+ LocatedBlock newBlock = namenode
+ .updateBlockForPipeline(dfsO.getBlock(), clientName);
+ dfsO.getStreamer()
+ .updatePipeline(newBlock.getBlock().getGenerationStamp());
+ newBlock = namenode.updateBlockForPipeline(dfsO.getBlock(), clientName);
+ //Should not throw any error Pipeline should be success
+ dfsO.getStreamer()
+ .updatePipeline(newBlock.getBlock().getGenerationStamp());
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org