You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by br...@apache.org on 2016/06/21 10:28:26 UTC
[1/3] hadoop git commit: HDFS-9530. ReservedSpace is not cleared for
abandoned Blocks (Contributed by Brahma Reddy Battula)
Repository: hadoop
Updated Branches:
refs/heads/branch-2 4e11f33cc -> ee0f389ec
refs/heads/branch-2.8 33e6986ec -> aa8f4cc48
refs/heads/trunk 46f1602e8 -> f2ac132d6
HDFS-9530. ReservedSpace is not cleared for abandoned Blocks (Contributed by Brahma Reddy Battula)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2ac132d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2ac132d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2ac132d
Branch: refs/heads/trunk
Commit: f2ac132d6a21c215093b7f87acf2843ac8123716
Parents: 46f1602
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Tue Jun 21 15:42:28 2016 +0530
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Tue Jun 21 15:42:28 2016 +0530
----------------------------------------------------------------------
.../server/datanode/DataNodeFaultInjector.java | 2 +
.../hdfs/server/datanode/DataXceiver.java | 3 ++
.../datanode/fsdataset/impl/FsDatasetImpl.java | 4 ++
.../fsdataset/impl/TestSpaceReservation.java | 48 ++++++++++++++++++++
4 files changed, 57 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2ac132d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index 7327420..4ecbdc0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -53,4 +53,6 @@ public class DataNodeFaultInjector {
public void stopSendingPacketDownstream() throws IOException {}
public void noRegistration() throws IOException { }
+
+ public void failMirrorConnection() throws IOException { }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2ac132d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index d5dc328..829badd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -739,6 +739,9 @@ class DataXceiver extends Receiver implements Runnable {
mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
mirrorSock = datanode.newSocket();
try {
+
+ DataNodeFaultInjector.get().failMirrorConnection();
+
int timeoutValue = dnConf.socketTimeout +
(HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length);
int writeTimeout = dnConf.socketWriteTimeout +
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2ac132d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 243a0e2..b042297 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -79,6 +79,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException;
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
import org.apache.hadoop.hdfs.server.datanode.ReplicaHandler;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
+import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery;
@@ -1955,6 +1956,9 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
LOG.debug("Block file " + removing.getBlockFile().getName()
+ " is to be deleted");
}
+ if (removing instanceof ReplicaInPipelineInterface) {
+ ((ReplicaInPipelineInterface) removing).releaseAllBytesReserved();
+ }
}
if (v.isTransientStorage()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2ac132d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
index 6dbd299..fad5216 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.*;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.ipc.RemoteException;
@@ -77,6 +78,7 @@ public class TestSpaceReservation {
private DFSClient client = null;
FsVolumeReference singletonVolumeRef = null;
FsVolumeImpl singletonVolume = null;
+ private DataNodeFaultInjector old = null;
private static Random rand = new Random();
@@ -146,6 +148,9 @@ public class TestSpaceReservation {
cluster.shutdown();
cluster = null;
}
+ if (old != null) {
+ DataNodeFaultInjector.set(old);
+ }
}
private void createFileAndTestSpaceReservation(
@@ -613,6 +618,49 @@ public class TestSpaceReservation {
checkReservedSpace(expectedFile2Reserved);
}
+ @Test(timeout = 30000)
+ public void testReservedSpaceForPipelineRecovery() throws Exception {
+ final short replication = 3;
+ startCluster(BLOCK_SIZE, replication, -1);
+
+ final String methodName = GenericTestUtils.getMethodName();
+ final Path file = new Path("/" + methodName + ".01.dat");
+
+ old = DataNodeFaultInjector.get();
+ // Fault injector to fail connection to mirror first time.
+ DataNodeFaultInjector.set(new DataNodeFaultInjector() {
+ private int tries = 0;
+
+ @Override
+ public void failMirrorConnection() throws IOException {
+ if (tries++ == 0) {
+ throw new IOException("Failing Mirror for space reservation");
+ }
+ }
+ });
+ // Write 1 byte to the file and kill the writer.
+ FSDataOutputStream os = fs.create(file, replication);
+ os.write(new byte[1]);
+ os.close();
+ // Ensure all space reserved for the replica was released on each
+ // DataNode.
+ cluster.triggerBlockReports();
+ for (final DataNode dn : cluster.getDataNodes()) {
+ try (FsDatasetSpi.FsVolumeReferences volumes =
+ dn.getFSDataset().getFsVolumeReferences()) {
+ final FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
+ GenericTestUtils.waitFor(new Supplier<Boolean>() {
+ @Override
+ public Boolean get() {
+ LOG.info("dn " + dn.getDisplayName() + " space : "
+ + volume.getReservedForReplicas());
+ return (volume.getReservedForReplicas() == 0);
+ }
+ }, 100, Integer.MAX_VALUE); // Wait until the test times out.
+ }
+ }
+ }
+
private void checkReservedSpace(final long expectedReserved) throws TimeoutException,
InterruptedException, IOException {
for (final DataNode dn : cluster.getDataNodes()) {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[2/3] hadoop git commit: HDFS-9530. ReservedSpace is not cleared for
abandoned Blocks (Contributed by Brahma Reddy Battula)
Posted by br...@apache.org.
HDFS-9530. ReservedSpace is not cleared for abandoned Blocks (Contributed by Brahma Reddy Battula)
(cherry picked from commit f2ac132d6a21c215093b7f87acf2843ac8123716)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee0f389e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee0f389e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee0f389e
Branch: refs/heads/branch-2
Commit: ee0f389ecd0c479aa38dae7ee6fd02ed66a64e4e
Parents: 4e11f33
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Tue Jun 21 15:42:28 2016 +0530
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Tue Jun 21 15:44:45 2016 +0530
----------------------------------------------------------------------
.../server/datanode/DataNodeFaultInjector.java | 2 +
.../hdfs/server/datanode/DataXceiver.java | 3 ++
.../datanode/fsdataset/impl/FsDatasetImpl.java | 4 ++
.../fsdataset/impl/TestSpaceReservation.java | 48 ++++++++++++++++++++
4 files changed, 57 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee0f389e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index 7327420..4ecbdc0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -53,4 +53,6 @@ public class DataNodeFaultInjector {
public void stopSendingPacketDownstream() throws IOException {}
public void noRegistration() throws IOException { }
+
+ public void failMirrorConnection() throws IOException { }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee0f389e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index e5ae98f..c5cc36c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -738,6 +738,9 @@ class DataXceiver extends Receiver implements Runnable {
mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
mirrorSock = datanode.newSocket();
try {
+
+ DataNodeFaultInjector.get().failMirrorConnection();
+
int timeoutValue = dnConf.socketTimeout +
(HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length);
int writeTimeout = dnConf.socketWriteTimeout +
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee0f389e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index c692a58..b034c0a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -84,6 +84,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException;
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
import org.apache.hadoop.hdfs.server.datanode.ReplicaHandler;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
+import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery;
@@ -1960,6 +1961,9 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
LOG.debug("Block file " + removing.getBlockFile().getName()
+ " is to be deleted");
}
+ if (removing instanceof ReplicaInPipelineInterface) {
+ ((ReplicaInPipelineInterface) removing).releaseAllBytesReserved();
+ }
}
if (v.isTransientStorage()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee0f389e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
index 6dbd299..fad5216 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.*;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.ipc.RemoteException;
@@ -77,6 +78,7 @@ public class TestSpaceReservation {
private DFSClient client = null;
FsVolumeReference singletonVolumeRef = null;
FsVolumeImpl singletonVolume = null;
+ private DataNodeFaultInjector old = null;
private static Random rand = new Random();
@@ -146,6 +148,9 @@ public class TestSpaceReservation {
cluster.shutdown();
cluster = null;
}
+ if (old != null) {
+ DataNodeFaultInjector.set(old);
+ }
}
private void createFileAndTestSpaceReservation(
@@ -613,6 +618,49 @@ public class TestSpaceReservation {
checkReservedSpace(expectedFile2Reserved);
}
+ @Test(timeout = 30000)
+ public void testReservedSpaceForPipelineRecovery() throws Exception {
+ final short replication = 3;
+ startCluster(BLOCK_SIZE, replication, -1);
+
+ final String methodName = GenericTestUtils.getMethodName();
+ final Path file = new Path("/" + methodName + ".01.dat");
+
+ old = DataNodeFaultInjector.get();
+ // Fault injector to fail connection to mirror first time.
+ DataNodeFaultInjector.set(new DataNodeFaultInjector() {
+ private int tries = 0;
+
+ @Override
+ public void failMirrorConnection() throws IOException {
+ if (tries++ == 0) {
+ throw new IOException("Failing Mirror for space reservation");
+ }
+ }
+ });
+ // Write 1 byte to the file and kill the writer.
+ FSDataOutputStream os = fs.create(file, replication);
+ os.write(new byte[1]);
+ os.close();
+ // Ensure all space reserved for the replica was released on each
+ // DataNode.
+ cluster.triggerBlockReports();
+ for (final DataNode dn : cluster.getDataNodes()) {
+ try (FsDatasetSpi.FsVolumeReferences volumes =
+ dn.getFSDataset().getFsVolumeReferences()) {
+ final FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
+ GenericTestUtils.waitFor(new Supplier<Boolean>() {
+ @Override
+ public Boolean get() {
+ LOG.info("dn " + dn.getDisplayName() + " space : "
+ + volume.getReservedForReplicas());
+ return (volume.getReservedForReplicas() == 0);
+ }
+ }, 100, Integer.MAX_VALUE); // Wait until the test times out.
+ }
+ }
+ }
+
private void checkReservedSpace(final long expectedReserved) throws TimeoutException,
InterruptedException, IOException {
for (final DataNode dn : cluster.getDataNodes()) {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[3/3] hadoop git commit: HDFS-9530. ReservedSpace is not cleared for
abandoned Blocks (Contributed by Brahma Reddy Battula)
Posted by br...@apache.org.
HDFS-9530. ReservedSpace is not cleared for abandoned Blocks (Contributed by Brahma Reddy Battula)
(cherry picked from commit f2ac132d6a21c215093b7f87acf2843ac8123716)
(cherry picked from commit ee0f389ecd0c479aa38dae7ee6fd02ed66a64e4e)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa8f4cc4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa8f4cc4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa8f4cc4
Branch: refs/heads/branch-2.8
Commit: aa8f4cc48c4fa6fc9b33fbf999d590fe1827dacb
Parents: 33e6986
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Tue Jun 21 15:42:28 2016 +0530
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Tue Jun 21 15:55:57 2016 +0530
----------------------------------------------------------------------
.../server/datanode/DataNodeFaultInjector.java | 2 +
.../hdfs/server/datanode/DataXceiver.java | 3 ++
.../datanode/fsdataset/impl/FsDatasetImpl.java | 4 ++
.../fsdataset/impl/TestSpaceReservation.java | 48 ++++++++++++++++++++
4 files changed, 57 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa8f4cc4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index 7327420..4ecbdc0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -53,4 +53,6 @@ public class DataNodeFaultInjector {
public void stopSendingPacketDownstream() throws IOException {}
public void noRegistration() throws IOException { }
+
+ public void failMirrorConnection() throws IOException { }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa8f4cc4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index e5ae98f..c5cc36c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -738,6 +738,9 @@ class DataXceiver extends Receiver implements Runnable {
mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
mirrorSock = datanode.newSocket();
try {
+
+ DataNodeFaultInjector.get().failMirrorConnection();
+
int timeoutValue = dnConf.socketTimeout +
(HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length);
int writeTimeout = dnConf.socketWriteTimeout +
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa8f4cc4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 0693520..a4c54fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -84,6 +84,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException;
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
import org.apache.hadoop.hdfs.server.datanode.ReplicaHandler;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
+import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery;
@@ -1958,6 +1959,9 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
LOG.debug("Block file " + removing.getBlockFile().getName()
+ " is to be deleted");
}
+ if (removing instanceof ReplicaInPipelineInterface) {
+ ((ReplicaInPipelineInterface) removing).releaseAllBytesReserved();
+ }
}
if (v.isTransientStorage()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa8f4cc4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
index 6dbd299..fad5216 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.*;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.ipc.RemoteException;
@@ -77,6 +78,7 @@ public class TestSpaceReservation {
private DFSClient client = null;
FsVolumeReference singletonVolumeRef = null;
FsVolumeImpl singletonVolume = null;
+ private DataNodeFaultInjector old = null;
private static Random rand = new Random();
@@ -146,6 +148,9 @@ public class TestSpaceReservation {
cluster.shutdown();
cluster = null;
}
+ if (old != null) {
+ DataNodeFaultInjector.set(old);
+ }
}
private void createFileAndTestSpaceReservation(
@@ -613,6 +618,49 @@ public class TestSpaceReservation {
checkReservedSpace(expectedFile2Reserved);
}
+ @Test(timeout = 30000)
+ public void testReservedSpaceForPipelineRecovery() throws Exception {
+ final short replication = 3;
+ startCluster(BLOCK_SIZE, replication, -1);
+
+ final String methodName = GenericTestUtils.getMethodName();
+ final Path file = new Path("/" + methodName + ".01.dat");
+
+ old = DataNodeFaultInjector.get();
+ // Fault injector to fail connection to mirror first time.
+ DataNodeFaultInjector.set(new DataNodeFaultInjector() {
+ private int tries = 0;
+
+ @Override
+ public void failMirrorConnection() throws IOException {
+ if (tries++ == 0) {
+ throw new IOException("Failing Mirror for space reservation");
+ }
+ }
+ });
+ // Write 1 byte to the file and kill the writer.
+ FSDataOutputStream os = fs.create(file, replication);
+ os.write(new byte[1]);
+ os.close();
+ // Ensure all space reserved for the replica was released on each
+ // DataNode.
+ cluster.triggerBlockReports();
+ for (final DataNode dn : cluster.getDataNodes()) {
+ try (FsDatasetSpi.FsVolumeReferences volumes =
+ dn.getFSDataset().getFsVolumeReferences()) {
+ final FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
+ GenericTestUtils.waitFor(new Supplier<Boolean>() {
+ @Override
+ public Boolean get() {
+ LOG.info("dn " + dn.getDisplayName() + " space : "
+ + volume.getReservedForReplicas());
+ return (volume.getReservedForReplicas() == 0);
+ }
+ }, 100, Integer.MAX_VALUE); // Wait until the test times out.
+ }
+ }
+ }
+
private void checkReservedSpace(final long expectedReserved) throws TimeoutException,
InterruptedException, IOException {
for (final DataNode dn : cluster.getDataNodes()) {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org