You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2009/08/25 20:18:52 UTC
svn commit: r807744 - in /hadoop/hdfs/branches/HDFS-265: CHANGES.txt
src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
Author: szetszwo
Date: Tue Aug 25 18:18:52 2009
New Revision: 807744
URL: http://svn.apache.org/viewvc?rev=807744&view=rev
Log:
HDFS-562. Add a test for NameNode.getBlockLocations(..) to check read from un-closed file. (szetszwo)
Modified:
hadoop/hdfs/branches/HDFS-265/CHANGES.txt
hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
Modified: hadoop/hdfs/branches/HDFS-265/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/CHANGES.txt?rev=807744&r1=807743&r2=807744&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-265/CHANGES.txt Tue Aug 25 18:18:52 2009
@@ -110,6 +110,9 @@
HDFS-552. Change TestFiDataTransferProtocol to junit 4 and add a few new
tests. (szetszwo)
+ HDFS-562. Add a test for NameNode.getBlockLocations(..) to check read from
+ un-closed file. (szetszwo)
+
BUG FIXES
HDFS-76. Better error message to users when commands fail because of
Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java?rev=807744&r1=807743&r2=807744&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java Tue Aug 25 18:18:52 2009
@@ -17,7 +17,12 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
import java.io.IOException;
+import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
@@ -26,30 +31,34 @@
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestFileCreation;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
-import junit.framework.TestCase;
-
-public class TestBlockUnderConstruction extends TestCase {
+public class TestBlockUnderConstruction {
static final String BASE_DIR = "/test/TestBlockUnderConstruction";
static final int BLOCK_SIZE = 8192; // same as TestFileCreation.blocksize
static final int NUM_BLOCKS = 5; // number of blocks to write
- private MiniDFSCluster cluster;
- private DistributedFileSystem hdfs;
+ private static MiniDFSCluster cluster;
+ private static DistributedFileSystem hdfs;
- protected void setUp() throws Exception {
- super.setUp();
+ @BeforeClass
+ public static void setUp() throws Exception {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster(conf, 3, true, null);
cluster.waitActive();
hdfs = (DistributedFileSystem)cluster.getFileSystem();
}
- protected void tearDown() throws Exception {
+ @AfterClass
+ public static void tearDown() throws Exception {
if(hdfs != null) hdfs.close();
if(cluster != null) cluster.shutdown();
- super.tearDown();
}
void writeFile(Path file, FSDataOutputStream stm, int size)
@@ -116,6 +125,7 @@
ns.blockManager.getStoredBlock(curBlock) == curBlock);
}
+ @Test
public void testBlockCreation() throws IOException {
Path file1 = new Path(BASE_DIR, "file1.dat");
FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3);
@@ -132,4 +142,36 @@
// verify consistency
verifyFileBlocks(file1.toString(), false);
}
+
+ /**
+ * Test NameNode.getBlockLocations(..) on reading un-closed files.
+ */
+ @Test
+ public void testGetBlockLocations() throws IOException {
+ final NameNode namenode = cluster.getNameNode();
+ final Path p = new Path(BASE_DIR, "file1.dat");
+ final String src = p.toString();
+ final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);
+
+ // write a half block
+ int len = BLOCK_SIZE >>> 1;
+ writeFile(p, out, len);
+
+ for(int i = 1; i < NUM_BLOCKS; ) {
+ // verify consistency
+ final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
+ final List<LocatedBlock> blocks = lb.getLocatedBlocks();
+ assertEquals(i, blocks.size());
+ final Block b = blocks.get(blocks.size() - 1).getBlock();
+ assertTrue(b instanceof BlockInfoUnderConstruction);
+
+ if (++i < NUM_BLOCKS) {
+ // write one more block
+ writeFile(p, out, BLOCK_SIZE);
+ len += BLOCK_SIZE;
+ }
+ }
+ // close file
+ out.close();
+ }
}