You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2014/10/02 18:30:37 UTC

[2/3] git commit: HDFS-7178. Additional unit test for replica write with full disk. (Arpit Agarwal)

HDFS-7178. Additional unit test for replica write with full disk. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8e64813
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8e64813
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8e64813

Branch: refs/heads/branch-2
Commit: d8e64813951b55a1371791fddf46f4e4608c31e5
Parents: 6483342
Author: arp <ar...@apache.org>
Authored: Thu Oct 2 09:28:26 2014 -0700
Committer: arp <ar...@apache.org>
Committed: Thu Oct 2 09:28:41 2014 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../fsdataset/impl/TestRbwSpaceReservation.java | 47 ++++++++++++++++++--
 2 files changed, 46 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8e64813/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index dda93df..13b183e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -542,6 +542,9 @@ Release 2.6.0 - UNRELEASED
     HDFS-7176. The namenode usage message doesn't include "-rollingupgrade
     started" (cmccabe)
 
+    HDFS-7178. Additional unit test for replica write with full disk.
+    (Arpit Agarwal)
+
     BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
       HDFS-6387. HDFS CLI admin tool for creating & deleting an

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8e64813/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java
index 74ac167..487f3ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
-import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -28,19 +27,20 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertThat;
 
-import org.apache.hadoop.fs.DU;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.*;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Daemon;
 import org.apache.log4j.Level;
 import org.junit.After;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.ExpectedException;
 
-import java.io.File;
-import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.util.List;
@@ -80,6 +80,7 @@ public class TestRbwSpaceReservation {
 
   static {
     ((Log4JLogger) FsDatasetImpl.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
   }
 
   private void startCluster(int blockSize, long perVolumeCapacity) throws IOException {
@@ -188,6 +189,44 @@ public class TestRbwSpaceReservation {
     createFileAndTestSpaceReservation(GenericTestUtils.getMethodName(), BLOCK_SIZE * 2);
   }
 
+  @Rule
+  public ExpectedException thrown = ExpectedException.none();
+
+  @Test (timeout=300000)
+  public void testWithLimitedSpace() throws IOException {
+    // Cluster with just enough space for a full block + meta.
+    startCluster(BLOCK_SIZE, 2 * BLOCK_SIZE - 1);
+    final String methodName = GenericTestUtils.getMethodName();
+    Path file1 = new Path("/" + methodName + ".01.dat");
+    Path file2 = new Path("/" + methodName + ".02.dat");
+
+    // Create two files.
+    FSDataOutputStream os1 = null, os2 = null;
+
+    try {
+      os1 = fs.create(file1);
+      os2 = fs.create(file2);
+
+      // Write one byte to the first file.
+      LOG.info("arpit: writing first file");
+      byte[] data = new byte[1];
+      os1.write(data);
+      os1.hsync();
+
+      // Try to write one byte to the second file.
+      // The block allocation must fail.
+      thrown.expect(RemoteException.class);
+      os2.write(data);
+      os2.hsync();
+    } finally {
+      if (os1 != null) {
+        os1.close();
+      }
+
+      // os2.close() will fail as no block was allocated.
+    }
+  }
+
   /**
    * Stress test to ensure we are not leaking reserved space.
    * @throws IOException