You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by br...@apache.org on 2017/06/07 09:04:20 UTC

[1/3] hadoop git commit: HDFS-11711. DN should not delete the block On "Too many open files" Exception. Contributed by Brahma Reddy Battula.

Repository: hadoop
Updated Branches:
  refs/heads/branch-2 72f0fdfb5 -> fca08f836
  refs/heads/branch-2.8 4a391c72d -> 8a16846f5
  refs/heads/branch-2.8.1 8f0b90263 -> c24e51a35


HDFS-11711. DN should not delete the block On "Too many open files" Exception. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fca08f83
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fca08f83
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fca08f83

Branch: refs/heads/branch-2
Commit: fca08f8362f0332b064f28c2625fd535004ef85d
Parents: 72f0fdf
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Wed Jun 7 16:24:50 2017 +0800
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Wed Jun 7 16:54:29 2017 +0800

----------------------------------------------------------------------
 .../hdfs/server/datanode/BlockSender.java       | 13 ++--
 .../server/datanode/DataNodeFaultInjector.java  |  4 ++
 .../server/datanode/TestDataNodeMetrics.java    | 62 ++++++++++++++++++++
 3 files changed, 75 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fca08f83/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index e6b7e3b..210dbdd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -303,6 +303,7 @@ class BlockSender implements java.io.Closeable {
         LengthInputStream metaIn = null;
         boolean keepMetaInOpen = false;
         try {
+          DataNodeFaultInjector.get().throwTooManyOpenFiles();
           metaIn = datanode.data.getMetaDataInputStream(block);
           if (!corruptChecksumOk || metaIn != null) {
             if (metaIn == null) {
@@ -332,10 +333,14 @@ class BlockSender implements java.io.Closeable {
             LOG.warn("Could not find metadata file for " + block);
           }
         } catch (FileNotFoundException e) {
-          // The replica is on its volume map but not on disk
-          datanode.notifyNamenodeDeletedBlock(block, replica.getStorageUuid());
-          datanode.data.invalidate(block.getBlockPoolId(),
-              new Block[]{block.getLocalBlock()});
+          if ((e.getMessage() != null) && !(e.getMessage()
+              .contains("Too many open files"))) {
+            // The replica is on its volume map but not on disk
+            datanode
+                .notifyNamenodeDeletedBlock(block, replica.getStorageUuid());
+            datanode.data.invalidate(block.getBlockPoolId(),
+                new Block[] {block.getLocalBlock()});
+          }
           throw e;
         } finally {
           if (!keepMetaInOpen) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fca08f83/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index bba26b2..bfb8a50 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -21,6 +21,7 @@ import com.google.common.annotations.VisibleForTesting;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 
 /**
@@ -85,4 +86,7 @@ public class DataNodeFaultInjector {
   public void startOfferService() throws Exception {}
 
   public void endOfferService() throws Exception {}
+
+  public void throwTooManyOpenFiles() throws FileNotFoundException {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fca08f83/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
index 4f91a08..0afaae1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
@@ -24,8 +24,10 @@ import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.*;
 
 import java.io.Closeable;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.List;
 
@@ -45,8 +47,10 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
@@ -334,4 +338,62 @@ public class TestDataNodeMetrics {
       }
     }
   }
+
+  @Test
+  public void testDNShouldNotDeleteBlockONTooManyOpenFiles()
+      throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
+    conf.setLong(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 1);
+    DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get();
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    final DataNodeFaultInjector injector =
+        Mockito.mock(DataNodeFaultInjector.class);
+    try {
+      // wait until the cluster is up
+      cluster.waitActive();
+      DistributedFileSystem fs = cluster.getFileSystem();
+      Path p = new Path("/testShouldThrowTMP");
+      DFSTestUtil.writeFile(fs, p, new String("testdata"));
+      //Before DN throws too many open files
+      verifyBlockLocations(fs, p, 1);
+      Mockito.doThrow(new FileNotFoundException("Too many open files")).
+          when(injector).
+          throwTooManyOpenFiles();
+      DataNodeFaultInjector.set(injector);
+      ExtendedBlock b =
+          fs.getClient().getLocatedBlocks(p.toString(), 0).get(0).getBlock();
+      try {
+        new BlockSender(b, 0, -1, false, true, true,
+            cluster.getDataNodes().get(0), null,
+            CachingStrategy.newDefaultStrategy());
+        fail("Must throw FileNotFoundException");
+      } catch (FileNotFoundException fe) {
+        assertTrue("Should throw too many open files",
+            fe.getMessage().contains("Too many open files"));
+      }
+      cluster.triggerHeartbeats(); // IBR delete ack
+      //After DN throws too many open files
+      assertTrue(cluster.getDataNodes().get(0).getFSDataset().isValidBlock(b));
+      verifyBlockLocations(fs, p, 1);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+      DataNodeFaultInjector.set(oldInjector);
+    }
+  }
+
+  private void verifyBlockLocations(DistributedFileSystem fs, Path p,
+      final int expected)
+      throws IOException, TimeoutException, InterruptedException {
+    final LocatedBlock lb =
+        fs.getClient().getLocatedBlocks(p.toString(), 0).get(0);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override public Boolean get() {
+        return lb.getLocations().length == expected;
+      }
+    }, 1000, 6000);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[2/3] hadoop git commit: HDFS-11711. DN should not delete the block On "Too many open files" Exception. Contributed by Brahma Reddy Battula.

Posted by br...@apache.org.
HDFS-11711. DN should not delete the block On "Too many open files" Exception. Contributed by Brahma Reddy Battula.

(cherry picked from commit fca08f8362f0332b064f28c2625fd535004ef85d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a16846f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a16846f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a16846f

Branch: refs/heads/branch-2.8
Commit: 8a16846f5d236c57436e9b35552ac20a7e233970
Parents: 4a391c7
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Wed Jun 7 16:24:50 2017 +0800
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Wed Jun 7 16:59:26 2017 +0800

----------------------------------------------------------------------
 .../hdfs/server/datanode/BlockSender.java       | 13 ++--
 .../server/datanode/DataNodeFaultInjector.java  |  4 ++
 .../server/datanode/TestDataNodeMetrics.java    | 62 ++++++++++++++++++++
 3 files changed, 75 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a16846f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index aeeef97..de9c7e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -305,6 +305,7 @@ class BlockSender implements java.io.Closeable {
         LengthInputStream metaIn = null;
         boolean keepMetaInOpen = false;
         try {
+          DataNodeFaultInjector.get().throwTooManyOpenFiles();
           metaIn = datanode.data.getMetaDataInputStream(block);
           if (!corruptChecksumOk || metaIn != null) {
             if (metaIn == null) {
@@ -334,10 +335,14 @@ class BlockSender implements java.io.Closeable {
             LOG.warn("Could not find metadata file for " + block);
           }
         } catch (FileNotFoundException e) {
-          // The replica is on its volume map but not on disk
-          datanode.notifyNamenodeDeletedBlock(block, replica.getStorageUuid());
-          datanode.data.invalidate(block.getBlockPoolId(),
-              new Block[]{block.getLocalBlock()});
+          if ((e.getMessage() != null) && !(e.getMessage()
+              .contains("Too many open files"))) {
+            // The replica is on its volume map but not on disk
+            datanode
+                .notifyNamenodeDeletedBlock(block, replica.getStorageUuid());
+            datanode.data.invalidate(block.getBlockPoolId(),
+                new Block[] {block.getLocalBlock()});
+          }
           throw e;
         } finally {
           if (!keepMetaInOpen) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a16846f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index 931c124..c271124 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -21,6 +21,7 @@ import com.google.common.annotations.VisibleForTesting;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 
 /**
@@ -58,4 +59,7 @@ public class DataNodeFaultInjector {
 
   public void failPipeline(ReplicaInPipelineInterface replicaInfo,
       String mirrorAddr) throws IOException { }
+
+  public void throwTooManyOpenFiles() throws FileNotFoundException {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a16846f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
index 355f7a1..c631d10 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
@@ -24,8 +24,10 @@ import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.*;
 
 import java.io.Closeable;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.List;
 
@@ -44,8 +46,10 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
@@ -331,4 +335,62 @@ public class TestDataNodeMetrics {
       }
     }
   }
+
+  @Test
+  public void testDNShouldNotDeleteBlockONTooManyOpenFiles()
+      throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
+    conf.setLong(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 1);
+    DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get();
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    final DataNodeFaultInjector injector =
+        Mockito.mock(DataNodeFaultInjector.class);
+    try {
+      // wait until the cluster is up
+      cluster.waitActive();
+      DistributedFileSystem fs = cluster.getFileSystem();
+      Path p = new Path("/testShouldThrowTMP");
+      DFSTestUtil.writeFile(fs, p, new String("testdata"));
+      //Before DN throws too many open files
+      verifyBlockLocations(fs, p, 1);
+      Mockito.doThrow(new FileNotFoundException("Too many open files")).
+          when(injector).
+          throwTooManyOpenFiles();
+      DataNodeFaultInjector.set(injector);
+      ExtendedBlock b =
+          fs.getClient().getLocatedBlocks(p.toString(), 0).get(0).getBlock();
+      try {
+        new BlockSender(b, 0, -1, false, true, true,
+            cluster.getDataNodes().get(0), null,
+            CachingStrategy.newDefaultStrategy());
+        fail("Must throw FileNotFoundException");
+      } catch (FileNotFoundException fe) {
+        assertTrue("Should throw too many open files",
+            fe.getMessage().contains("Too many open files"));
+      }
+      cluster.triggerHeartbeats(); // IBR delete ack
+      //After DN throws too many open files
+      assertTrue(cluster.getDataNodes().get(0).getFSDataset().isValidBlock(b));
+      verifyBlockLocations(fs, p, 1);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+      DataNodeFaultInjector.set(oldInjector);
+    }
+  }
+
+  private void verifyBlockLocations(DistributedFileSystem fs, Path p,
+      final int expected)
+      throws IOException, TimeoutException, InterruptedException {
+    final LocatedBlock lb =
+        fs.getClient().getLocatedBlocks(p.toString(), 0).get(0);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override public Boolean get() {
+        return lb.getLocations().length == expected;
+      }
+    }, 1000, 6000);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[3/3] hadoop git commit: HDFS-11711. DN should not delete the block On "Too many open files" Exception. Contributed by Brahma Reddy Battula.

Posted by br...@apache.org.
HDFS-11711. DN should not delete the block On "Too many open files" Exception. Contributed by Brahma Reddy Battula.

(cherry picked from commit fca08f8362f0332b064f28c2625fd535004ef85d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c24e51a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c24e51a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c24e51a3

Branch: refs/heads/branch-2.8.1
Commit: c24e51a3589c11dd2fb7dcaa21a84269ca65035a
Parents: 8f0b902
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Wed Jun 7 16:24:50 2017 +0800
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Wed Jun 7 17:02:53 2017 +0800

----------------------------------------------------------------------
 .../hdfs/server/datanode/BlockSender.java       | 13 ++--
 .../server/datanode/DataNodeFaultInjector.java  |  4 ++
 .../server/datanode/TestDataNodeMetrics.java    | 62 ++++++++++++++++++++
 3 files changed, 75 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c24e51a3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index aeeef97..de9c7e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -305,6 +305,7 @@ class BlockSender implements java.io.Closeable {
         LengthInputStream metaIn = null;
         boolean keepMetaInOpen = false;
         try {
+          DataNodeFaultInjector.get().throwTooManyOpenFiles();
           metaIn = datanode.data.getMetaDataInputStream(block);
           if (!corruptChecksumOk || metaIn != null) {
             if (metaIn == null) {
@@ -334,10 +335,14 @@ class BlockSender implements java.io.Closeable {
             LOG.warn("Could not find metadata file for " + block);
           }
         } catch (FileNotFoundException e) {
-          // The replica is on its volume map but not on disk
-          datanode.notifyNamenodeDeletedBlock(block, replica.getStorageUuid());
-          datanode.data.invalidate(block.getBlockPoolId(),
-              new Block[]{block.getLocalBlock()});
+          if ((e.getMessage() != null) && !(e.getMessage()
+              .contains("Too many open files"))) {
+            // The replica is on its volume map but not on disk
+            datanode
+                .notifyNamenodeDeletedBlock(block, replica.getStorageUuid());
+            datanode.data.invalidate(block.getBlockPoolId(),
+                new Block[] {block.getLocalBlock()});
+          }
           throw e;
         } finally {
           if (!keepMetaInOpen) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c24e51a3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index 931c124..c271124 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -21,6 +21,7 @@ import com.google.common.annotations.VisibleForTesting;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 
 /**
@@ -58,4 +59,7 @@ public class DataNodeFaultInjector {
 
   public void failPipeline(ReplicaInPipelineInterface replicaInfo,
       String mirrorAddr) throws IOException { }
+
+  public void throwTooManyOpenFiles() throws FileNotFoundException {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c24e51a3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
index 355f7a1..c631d10 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
@@ -24,8 +24,10 @@ import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.*;
 
 import java.io.Closeable;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.List;
 
@@ -44,8 +46,10 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
@@ -331,4 +335,62 @@ public class TestDataNodeMetrics {
       }
     }
   }
+
+  @Test
+  public void testDNShouldNotDeleteBlockONTooManyOpenFiles()
+      throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
+    conf.setLong(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 1);
+    DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get();
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    final DataNodeFaultInjector injector =
+        Mockito.mock(DataNodeFaultInjector.class);
+    try {
+      // wait until the cluster is up
+      cluster.waitActive();
+      DistributedFileSystem fs = cluster.getFileSystem();
+      Path p = new Path("/testShouldThrowTMP");
+      DFSTestUtil.writeFile(fs, p, new String("testdata"));
+      //Before DN throws too many open files
+      verifyBlockLocations(fs, p, 1);
+      Mockito.doThrow(new FileNotFoundException("Too many open files")).
+          when(injector).
+          throwTooManyOpenFiles();
+      DataNodeFaultInjector.set(injector);
+      ExtendedBlock b =
+          fs.getClient().getLocatedBlocks(p.toString(), 0).get(0).getBlock();
+      try {
+        new BlockSender(b, 0, -1, false, true, true,
+            cluster.getDataNodes().get(0), null,
+            CachingStrategy.newDefaultStrategy());
+        fail("Must throw FileNotFoundException");
+      } catch (FileNotFoundException fe) {
+        assertTrue("Should throw too many open files",
+            fe.getMessage().contains("Too many open files"));
+      }
+      cluster.triggerHeartbeats(); // IBR delete ack
+      //After DN throws too many open files
+      assertTrue(cluster.getDataNodes().get(0).getFSDataset().isValidBlock(b));
+      verifyBlockLocations(fs, p, 1);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+      DataNodeFaultInjector.set(oldInjector);
+    }
+  }
+
+  private void verifyBlockLocations(DistributedFileSystem fs, Path p,
+      final int expected)
+      throws IOException, TimeoutException, InterruptedException {
+    final LocatedBlock lb =
+        fs.getClient().getLocatedBlocks(p.toString(), 0).get(0);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override public Boolean get() {
+        return lb.getLocations().length == expected;
+      }
+    }, 1000, 6000);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org