You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sz...@apache.org on 2008/10/13 22:46:52 UTC

svn commit: r704241 - in /hadoop/core/trunk: CHANGES.txt src/test/org/apache/hadoop/hdfs/AppendTestUtil.java src/test/org/apache/hadoop/hdfs/TestFileAppend3.java

Author: szetszwo
Date: Mon Oct 13 13:46:52 2008
New Revision: 704241

URL: http://svn.apache.org/viewvc?rev=704241&view=rev
Log:
HADOOP-3790. Add more unit tests for testing HDFS file append.  (szetszwo)

Added:
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/AppendTestUtil.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=704241&r1=704240&r2=704241&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Mon Oct 13 13:46:52 2008
@@ -491,6 +491,8 @@
     HADOOP-4354. Separate TestDatanodeDeath.testDatanodeDeath() into 4 tests.
     (szetszwo)
 
+    HADOOP-3790. Add more unit tests for testing HDFS file append.  (szetszwo)
+
   OPTIMIZATIONS
 
     HADOOP-3556. Removed lock contention in MD5Hash by changing the 

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/AppendTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/AppendTestUtil.java?rev=704241&r1=704240&r2=704241&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/AppendTestUtil.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/AppendTestUtil.java Mon Oct 13 13:46:52 2008
@@ -17,10 +17,21 @@
  */
 package org.apache.hadoop.hdfs;
 
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
 import java.util.Random;
 
+import junit.framework.TestCase;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 /** Utilities for append-related tests */ 
 class AppendTestUtil {
@@ -62,4 +73,47 @@
     rand.nextBytes(b);
     return b;
   }
+
+  static void sleep(long ms) {
+    try {
+      Thread.sleep(ms);
+    } catch (InterruptedException e) {
+      LOG.info("ms=" + ms, e);
+    }
+  }
+
+  static FileSystem createHdfsWithDifferentUsername(Configuration conf
+      ) throws IOException {
+    Configuration conf2 = new Configuration(conf);
+    String username = UserGroupInformation.getCurrentUGI().getUserName()+"_XXX";
+    UnixUserGroupInformation.saveToConf(conf2,
+        UnixUserGroupInformation.UGI_PROPERTY_NAME,
+        new UnixUserGroupInformation(username, new String[]{"supergroup"}));
+    return FileSystem.get(conf2);
+  }
+
+  static void write(OutputStream out, int offset, int length) throws IOException {
+    final byte[] bytes = new byte[length];
+    for(int i = 0; i < length; i++) {
+      bytes[i] = (byte)(offset + i);
+    }
+    out.write(bytes);
+  }
+  
+  static void check(FileSystem fs, Path p, long length) throws IOException {
+    int i = -1;
+    try {
+      final FileStatus status = fs.getFileStatus(p);
+      TestCase.assertEquals(length, status.getLen());
+      InputStream in = fs.open(p);
+      for(i++; i < length; i++) {
+        TestCase.assertEquals((byte)i, (byte)in.read());  
+      }
+      i = -(int)length;
+      TestCase.assertEquals(-1, in.read()); //EOF  
+      in.close();
+    } catch(IOException ioe) {
+      throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
+    }
+  }
 }
\ No newline at end of file

Added: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=704241&view=auto
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java (added)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java Mon Oct 13 13:46:52 2008
@@ -0,0 +1,221 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+
+import junit.extensions.TestSetup;
+import junit.framework.Test;
+import junit.framework.TestSuite;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo;
+
+/** This class implements some of tests posted in HADOOP-2658. */
+public class TestFileAppend3 extends junit.framework.TestCase {
+  static final long BLOCK_SIZE = 64 * 1024;
+  static final short REPLICATION = 3;
+  static final int DATANODE_NUM = 5;
+
+  private static Configuration conf;
+  private static int buffersize;
+  private static MiniDFSCluster cluster;
+  private static DistributedFileSystem fs;
+
+  public static Test suite() {
+    return new TestSetup(new TestSuite(TestFileAppend3.class)) {
+      protected void setUp() throws java.lang.Exception {
+        AppendTestUtil.LOG.info("setUp()");
+        conf = new Configuration();
+        conf.setInt("io.bytes.per.checksum", 512);
+        buffersize = conf.getInt("io.file.buffer.size", 4096);
+        cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
+        fs = (DistributedFileSystem)cluster.getFileSystem();
+      }
+    
+      protected void tearDown() throws Exception {
+        AppendTestUtil.LOG.info("tearDown()");
+        cluster.shutdown();
+      }
+    };  
+  }
+
+  /** TC1: Append on block boundary. */
+  public void testTC1() throws Exception {
+    final Path p = new Path("/TC1/foo");
+    System.out.println("p=" + p);
+
+    //a. Create file and write one block of data. Close file.
+    final int len1 = (int)BLOCK_SIZE; 
+    {
+      FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
+      AppendTestUtil.write(out, 0, len1);
+      out.close();
+    }
+
+    //   Reopen file to append. Append half block of data. Close file.
+    final int len2 = (int)BLOCK_SIZE/2; 
+    {
+      FSDataOutputStream out = fs.append(p);
+      AppendTestUtil.write(out, len1, len2);
+      out.close();
+    }
+    
+    //b. Reopen file and read 1.5 blocks worth of data. Close file.
+    AppendTestUtil.check(fs, p, len1 + len2);
+  }
+
+  /** TC2: Append on non-block boundary. */
+  public void testTC2() throws Exception {
+    final Path p = new Path("/TC2/foo");
+    System.out.println("p=" + p);
+
+    //a. Create file with one and a half block of data. Close file.
+    final int len1 = (int)(BLOCK_SIZE + BLOCK_SIZE/2); 
+    {
+      FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
+      AppendTestUtil.write(out, 0, len1);
+      out.close();
+    }
+
+    //   Reopen file to append quarter block of data. Close file.
+    final int len2 = (int)BLOCK_SIZE/4; 
+    {
+      FSDataOutputStream out = fs.append(p);
+      AppendTestUtil.write(out, len1, len2);
+      out.close();
+    }
+
+    //b. Reopen file and read 1.75 blocks of data. Close file.
+    AppendTestUtil.check(fs, p, len1 + len2);
+  }
+
+  /** TC5: Only one simultaneous append. */
+  public void testTC5() throws Exception {
+    final Path p = new Path("/TC5/foo");
+    System.out.println("p=" + p);
+
+    //a. Create file on Machine M1. Write half block to it. Close file.
+    {
+      FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
+      AppendTestUtil.write(out, 0, (int)(BLOCK_SIZE/2));
+      out.close();
+    }
+
+    //b. Reopen file in "append" mode on Machine M1.
+    FSDataOutputStream out = fs.append(p);
+
+    //c. On Machine M2, reopen file in "append" mode. This should fail.
+    try {
+      AppendTestUtil.createHdfsWithDifferentUsername(conf).append(p);
+      fail("This should fail.");
+    } catch(IOException ioe) {
+      AppendTestUtil.LOG.info("GOOD: got an exception", ioe);
+    }
+
+    //d. On Machine M1, close file.
+    out.close();        
+  }
+
+  /** TC11: Racing rename */
+  public void testTC11() throws Exception {
+    final Path p = new Path("/TC11/foo");
+    System.out.println("p=" + p);
+
+    //a. Create file and write one block of data. Close file.
+    final int len1 = (int)BLOCK_SIZE; 
+    {
+      FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
+      AppendTestUtil.write(out, 0, len1);
+      out.close();
+    }
+
+    //b. Reopen file in "append" mode. Append half block of data.
+    FSDataOutputStream out = fs.append(p);
+    final int len2 = (int)BLOCK_SIZE/2; 
+    AppendTestUtil.write(out, len1, len2);
+    
+    //c. Rename file to file.new.
+    final Path pnew = new Path(p + ".new");
+    assertTrue(fs.rename(p, pnew));
+
+    //d. Close file handle that was opened in (b). 
+    try {
+      out.close();
+      fail("close() should throw an exception");
+    } catch(Exception e) {
+      AppendTestUtil.LOG.info("GOOD!", e);
+    }
+
+    //wait for the lease recovery 
+    cluster.setLeasePeriod(1000, 1000);
+    AppendTestUtil.sleep(5000);
+
+    //check block sizes 
+    final long len = fs.getFileStatus(pnew).getLen();
+    final LocatedBlocks locatedblocks = fs.dfs.namenode.getBlockLocations(pnew.toString(), 0L, len);
+    final int numblock = locatedblocks.locatedBlockCount();
+    for(int i = 0; i < numblock; i++) {
+      final LocatedBlock lb = locatedblocks.get(i);
+      final Block blk = lb.getBlock();
+      final long size = lb.getBlockSize();
+      if (i < numblock - 1) {
+        assertEquals(BLOCK_SIZE, size);
+      }
+      for(DatanodeInfo datanodeinfo : lb.getLocations()) {
+        final DataNode dn = cluster.getDataNode(datanodeinfo.getIpcPort());
+        final BlockMetaDataInfo metainfo = dn.getBlockMetaDataInfo(blk);
+        assertEquals(size, metainfo.getNumBytes());
+      }
+    }
+  }
+
+  /** TC12: Append to partial CRC chunk */
+  public void testTC12() throws Exception {
+    final Path p = new Path("/TC12/foo");
+    System.out.println("p=" + p);
+    
+    //a. Create file with a block size of 64KB
+    //   and a default io.bytes.per.checksum of 512 bytes.
+    //   Write 25687 bytes of data. Close file.
+    final int len1 = 25687; 
+    {
+      FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
+      AppendTestUtil.write(out, 0, len1);
+      out.close();
+    }
+
+    //b. Reopen file in "append" mode. Append another 5877 bytes of data. Close file.
+    final int len2 = 5877; 
+    {
+      FSDataOutputStream out = fs.append(p);
+      AppendTestUtil.write(out, len1, len2);
+      out.close();
+    }
+
+    //c. Reopen file and read 25687+5877 bytes of data from file. Close file.
+    AppendTestUtil.check(fs, p, len1 + len2);
+  }
+}
\ No newline at end of file