You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2011/06/10 02:11:33 UTC

svn commit: r1134137 [2/2] - in /hadoop/hdfs/branches/HDFS-1073: ./ src/c++/libhdfs/ src/contrib/hdfsproxy/ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/common/ src/java/org...

Propchange: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jun 10 00:11:32 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/test/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/test/hdfs:820487
-/hadoop/hdfs/trunk/src/test/hdfs:1086482-1132839
+/hadoop/hdfs/trunk/src/test/hdfs:1086482-1134136

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/BlockReaderTestUtil.java?rev=1134137&r1=1134136&r2=1134137&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/BlockReaderTestUtil.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/BlockReaderTestUtil.java Fri Jun 10 00:11:32 2011
@@ -73,28 +73,39 @@ public class BlockReaderTestUtil {
 
   /**
    * Create a file of the given size filled with random data.
-   * @return  List of Blocks of the new file.
+   * @return  File data.
    */
-  public List<LocatedBlock> writeFile(Path filepath, int sizeKB)
+  public byte[] writeFile(Path filepath, int sizeKB)
       throws IOException {
     FileSystem fs = cluster.getFileSystem();
 
     // Write a file with the specified amount of data
     DataOutputStream os = fs.create(filepath);
-    byte data[] = new byte[1024];
+    byte data[] = new byte[1024 * sizeKB];
     new Random().nextBytes(data);
-    for (int i = 0; i < sizeKB; i++) {
-      os.write(data);
-    }
+    os.write(data);
     os.close();
+    return data;
+  }
 
+  /**
+   * Get the list of Blocks for a file.
+   */
+  public List<LocatedBlock> getFileBlocks(Path filepath, int sizeKB)
+      throws IOException {
     // Return the blocks we just wrote
-    DFSClient dfsclient = new DFSClient(
-      new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
+    DFSClient dfsclient = getDFSClient();
     return dfsclient.getNamenode().getBlockLocations(
       filepath.toString(), 0, sizeKB * 1024).getLocatedBlocks();
   }
 
+  /**
+   * Get the DFSClient.
+   */
+  public DFSClient getDFSClient() throws IOException {
+    InetSocketAddress nnAddr = new InetSocketAddress("localhost", cluster.getNameNodePort());
+    return new DFSClient(nnAddr, conf);
+  }
 
   /**
    * Exercise the BlockReader and read length bytes.

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java?rev=1134137&r1=1134136&r2=1134137&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java Fri Jun 10 00:11:32 2011
@@ -75,10 +75,12 @@ public class DataNodeCluster {
     " [-inject startingBlockId numBlocksPerDN]" +
     " [-r replicationFactorForInjectedBlocks]" +
     " [-d dataNodeDirs]\n" + 
+    " [-checkDataNodeAddrConfig]\n" +
     "      Default datanode direcory is " + DATANODE_DIRS + "\n" +
     "      Default replication factor for injected blocks is 1\n" +
     "      Defaul rack is used if -racks is not specified\n" +
-    "      Data nodes are simulated if -simulated OR conf file specifies simulated\n";
+    "      Data nodes are simulated if -simulated OR conf file specifies simulated\n" +
+    "      -checkDataNodeAddrConfig tells DataNodeConf to use data node addresses from conf file, if it is set. If not set, use .localhost'.";
   
   
   static void printUsageExit() {
@@ -97,6 +99,7 @@ public class DataNodeCluster {
     long startingBlockId = 1;
     int numBlocksPerDNtoInject = 0;
     int replication = 1;
+    boolean checkDataNodeAddrConfig = false;
     
     Configuration conf = new HdfsConfiguration();
 
@@ -139,6 +142,8 @@ public class DataNodeCluster {
          printUsageExit("Missing number of blocks to inject");
        }
        numBlocksPerDNtoInject = Integer.parseInt(args[i]);      
+      } else if (args[i].equals("-checkDataNodeAddrConfig")) {
+        checkDataNodeAddrConfig = true;
       } else {
         printUsageExit();
       }
@@ -186,7 +191,7 @@ public class DataNodeCluster {
     }
     try {
       mc.startDataNodes(conf, numDataNodes, true, StartupOption.REGULAR,
-          rack4DataNode);
+          rack4DataNode, null, null, false, checkDataNodeAddrConfig);
       if (inject) {
         long blockSize = 10;
         System.out.println("Injecting " + numBlocksPerDNtoInject +

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1134137&r1=1134136&r2=1134137&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Fri Jun 10 00:11:32 2011
@@ -36,6 +36,8 @@ import java.util.Random;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
@@ -79,6 +81,8 @@ import org.apache.hadoop.util.ToolRunner
  * The data directories for non-simulated DFS are under the testing directory.
  * For simulated data nodes, no underlying fs storage is used.
  */
+@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "Hive", "MapReduce", "Pig"})
+@InterfaceStability.Unstable
 public class MiniDFSCluster {
 
   private static final String NAMESERVICE_ID_PREFIX = "nameserviceId";
@@ -752,7 +756,41 @@ public class MiniDFSCluster {
                              String[] racks, String[] hosts,
                              long[] simulatedCapacities,
                              boolean setupHostsFile) throws IOException {
+    startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, hosts,
+                   simulatedCapacities, setupHostsFile, false);
+  }
 
+  /**
+   * Modify the config and start up additional DataNodes.  The info port for
+   * DataNodes is guaranteed to use a free port.
+   *  
+   *  Data nodes can run with the name node in the mini cluster or
+   *  a real name node. For example, running with a real name node is useful
+   *  when running simulated data nodes with a real name node.
+   *  If minicluster's name node is null assume that the conf has been
+   *  set with the right address:port of the name node.
+   *
+   * @param conf the base configuration to use in starting the DataNodes.  This
+   *          will be modified as necessary.
+   * @param numDataNodes Number of DataNodes to start; may be zero
+   * @param manageDfsDirs if true, the data directories for DataNodes will be
+   *          created and dfs.datanode.data.dir will be set in the conf
+   * @param operation the operation with which to start the DataNodes.  If null
+   *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
+   * @param racks array of strings indicating the rack that each DataNode is on
+   * @param hosts array of strings indicating the hostnames for each DataNode
+   * @param simulatedCapacities array of capacities of the simulated data nodes
+   * @param setupHostsFile add new nodes to dfs hosts files
+   * @param checkDataNodeAddrConfig if true, only set DataNode port addresses if not already set in config
+   *
+   * @throws IllegalStateException if NameNode has been shutdown
+   */
+  public synchronized void startDataNodes(Configuration conf, int numDataNodes,
+                             boolean manageDfsDirs, StartupOption operation, 
+                             String[] racks, String[] hosts,
+                             long[] simulatedCapacities,
+                             boolean setupHostsFile,
+                             boolean checkDataNodeAddrConfig) throws IOException {
     int curDatanodesNum = dataNodes.size();
     // for mincluster's the default initialDelay for BRs is 0
     if (conf.get(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) {
@@ -792,7 +830,7 @@ public class MiniDFSCluster {
     for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
       Configuration dnConf = new HdfsConfiguration(conf);
       // Set up datanode address
-      setupDatanodeAddress(dnConf, setupHostsFile);
+      setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
       if (manageDfsDirs) {
         File dir1 = getStorageDir(i, 0);
         File dir2 = getStorageDir(i, 1);
@@ -1791,7 +1829,8 @@ public class MiniDFSCluster {
     return port;
   }
   
-  private void setupDatanodeAddress(Configuration conf, boolean setupHostsFile) throws IOException {
+  private void setupDatanodeAddress(Configuration conf, boolean setupHostsFile,
+                           boolean checkDataNodeAddrConfig) throws IOException {
     if (setupHostsFile) {
       String hostsFile = conf.get(DFSConfigKeys.DFS_HOSTS, "").trim();
       if (hostsFile.length() == 0) {
@@ -1799,13 +1838,23 @@ public class MiniDFSCluster {
       }
       // Setup datanode in the include file, if it is defined in the conf
       String address = "127.0.0.1:" + getFreeSocketPort();
-      conf.set("dfs.datanode.address", address);
+      if (checkDataNodeAddrConfig) {
+        conf.setIfUnset("dfs.datanode.address", address);
+      } else {
+        conf.set("dfs.datanode.address", address);
+      }
       addToFile(hostsFile, address);
       LOG.info("Adding datanode " + address + " to hosts file " + hostsFile);
     } else {
-      conf.set("dfs.datanode.address", "127.0.0.1:0");
-      conf.set("dfs.datanode.http.address", "127.0.0.1:0");
-      conf.set("dfs.datanode.ipc.address", "127.0.0.1:0");
+      if (checkDataNodeAddrConfig) {
+        conf.setIfUnset("dfs.datanode.address", "127.0.0.1:0");
+        conf.setIfUnset("dfs.datanode.http.address", "127.0.0.1:0");
+        conf.setIfUnset("dfs.datanode.ipc.address", "127.0.0.1:0");
+      } else {
+        conf.set("dfs.datanode.address", "127.0.0.1:0");
+        conf.set("dfs.datanode.http.address", "127.0.0.1:0");
+        conf.set("dfs.datanode.ipc.address", "127.0.0.1:0");
+      }
     }
   }
   

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java?rev=1134137&r1=1134136&r2=1134137&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java Fri Jun 10 00:11:32 2011
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 
 import java.util.List;
 
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.fs.Path;
 
@@ -41,23 +42,24 @@ public class TestClientBlockVerification
   public static void setupCluster() throws Exception {
     final int REPLICATION_FACTOR = 1;
     util = new BlockReaderTestUtil(REPLICATION_FACTOR);
-    List<LocatedBlock> blkList = util.writeFile(TEST_FILE, FILE_SIZE_K);
+    util.writeFile(TEST_FILE, FILE_SIZE_K);
+    List<LocatedBlock> blkList = util.getFileBlocks(TEST_FILE, FILE_SIZE_K);
     testBlock = blkList.get(0);     // Use the first block to test
   }
 
   /**
-   * Verify that if we read an entire block, we send checksumOk
+   * Verify that if we read an entire block, we send CHECKSUM_OK
    */
   @Test
   public void testBlockVerification() throws Exception {
     BlockReader reader = spy(util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024));
     util.readAndCheckEOS(reader, FILE_SIZE_K * 1024, true);
-    verify(reader).checksumOk(reader.dnSock);
+    verify(reader).sendReadResult(reader.dnSock, Status.CHECKSUM_OK);
     reader.close();
   }
 
   /**
-   * Test that if we do an incomplete read, we don't call checksumOk
+   * Test that if we do an incomplete read, we don't call CHECKSUM_OK
    */
   @Test
   public void testIncompleteRead() throws Exception {
@@ -65,14 +67,14 @@ public class TestClientBlockVerification
     util.readAndCheckEOS(reader, FILE_SIZE_K / 2 * 1024, false);
 
     // We asked the blockreader for the whole file, and only read
-    // half of it, so no checksumOk
-    verify(reader, never()).checksumOk(reader.dnSock);
+    // half of it, so no CHECKSUM_OK
+    verify(reader, never()).sendReadResult(reader.dnSock, Status.CHECKSUM_OK);
     reader.close();
   }
 
   /**
    * Test that if we ask for a half block, and read it all, we *do*
-   * call checksumOk. The DN takes care of knowing whether it was
+   * send CHECKSUM_OK. The DN takes care of knowing whether it was
    * the whole block or not.
    */
   @Test
@@ -81,7 +83,7 @@ public class TestClientBlockVerification
     BlockReader reader = spy(util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024 / 2));
     // And read half the file
     util.readAndCheckEOS(reader, FILE_SIZE_K * 1024 / 2, true);
-    verify(reader).checksumOk(reader.dnSock);
+    verify(reader).sendReadResult(reader.dnSock, Status.CHECKSUM_OK);
     reader.close();
   }
 
@@ -99,7 +101,7 @@ public class TestClientBlockVerification
                            " len=" + length);
         BlockReader reader = spy(util.getBlockReader(testBlock, startOffset, length));
         util.readAndCheckEOS(reader, length, true);
-        verify(reader).checksumOk(reader.dnSock);
+        verify(reader).sendReadResult(reader.dnSock, Status.CHECKSUM_OK);
         reader.close();
       }
     }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1134137&r1=1134136&r2=1134137&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java Fri Jun 10 00:11:32 2011
@@ -384,6 +384,8 @@ public class TestDFSClientRetries extend
     conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 
                 retries);
     conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, timeWin);
+    // Disable keepalive
+    conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 0);
 
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(replicationFactor).build();
     cluster.waitActive();

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=1134137&r1=1134136&r2=1134137&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Fri Jun 10 00:11:32 2011
@@ -17,7 +17,10 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.HashMap;
@@ -27,6 +30,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileSystem;
@@ -95,7 +99,147 @@ public class TestLeaseRecovery2 {
     IOUtils.closeStream(dfs);
     if (cluster != null) {cluster.shutdown();}
   }
-  
+
+  /**
+   * Test the NameNode's revoke lease on current lease holder function.
+   * @throws Exception
+   */
+  @Test
+  public void testImmediateRecoveryOfLease() throws Exception {
+    //create a file
+    // write bytes into the file.
+    byte [] actual = new byte[FILE_SIZE];
+    int size = AppendTestUtil.nextInt(FILE_SIZE);
+    Path filepath = createFile("/immediateRecoverLease-shortlease", size, true);
+    // set the soft limit to be 1 second so that the
+    // namenode triggers lease recovery on next attempt to write-for-open.
+    cluster.setLeasePeriod(SHORT_LEASE_PERIOD, LONG_LEASE_PERIOD);
+
+    recoverLeaseUsingCreate(filepath);
+    verifyFile(dfs, filepath, actual, size);
+
+    //test recoverLease
+    // set the soft limit to be 1 hour but recoverLease should
+    // close the file immediately
+    cluster.setLeasePeriod(LONG_LEASE_PERIOD, LONG_LEASE_PERIOD);
+    size = AppendTestUtil.nextInt(FILE_SIZE);
+    filepath = createFile("/immediateRecoverLease-longlease", size, false);
+
+    // test recoverLese from a different client
+    recoverLease(filepath, null);
+    verifyFile(dfs, filepath, actual, size);
+
+    // test recoverlease from the same client
+    size = AppendTestUtil.nextInt(FILE_SIZE);
+    filepath = createFile("/immediateRecoverLease-sameclient", size, false);
+
+    // create another file using the same client
+    Path filepath1 = new Path(filepath.toString() + AppendTestUtil.nextInt());
+    FSDataOutputStream stm = dfs.create(filepath1, true, BUF_SIZE,
+      REPLICATION_NUM, BLOCK_SIZE);
+
+    // recover the first file
+    recoverLease(filepath, dfs);
+    verifyFile(dfs, filepath, actual, size);
+
+    // continue to write to the second file
+    stm.write(buffer, 0, size);
+    stm.close();
+    verifyFile(dfs, filepath1, actual, size);
+  }
+
+  private Path createFile(final String filestr, final int size,
+      final boolean triggerLeaseRenewerInterrupt)
+  throws IOException, InterruptedException {
+    AppendTestUtil.LOG.info("filestr=" + filestr);
+    Path filepath = new Path(filestr);
+    FSDataOutputStream stm = dfs.create(filepath, true, BUF_SIZE,
+      REPLICATION_NUM, BLOCK_SIZE);
+    assertTrue(dfs.dfs.exists(filestr));
+
+    AppendTestUtil.LOG.info("size=" + size);
+    stm.write(buffer, 0, size);
+
+    // hflush file
+    AppendTestUtil.LOG.info("hflush");
+    stm.hflush();
+
+    if (triggerLeaseRenewerInterrupt) {
+      AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
+      dfs.dfs.leaserenewer.interruptAndJoin();
+    }
+    return filepath;
+  }
+
+  private void recoverLease(Path filepath, DistributedFileSystem dfs)
+  throws Exception {
+    if (dfs == null) {
+      dfs = (DistributedFileSystem)getFSAsAnotherUser(conf);
+    }
+
+    while (!dfs.recoverLease(filepath)) {
+      AppendTestUtil.LOG.info("sleep " + 5000 + "ms");
+      Thread.sleep(5000);
+    }
+  }
+
+  private FileSystem getFSAsAnotherUser(final Configuration c)
+  throws IOException, InterruptedException {
+    return FileSystem.get(FileSystem.getDefaultUri(c), c,
+      UserGroupInformation.createUserForTesting(fakeUsername, 
+          new String [] {fakeGroup}).getUserName());
+  }
+
+  private void recoverLeaseUsingCreate(Path filepath)
+  throws IOException, InterruptedException {
+    FileSystem dfs2 = getFSAsAnotherUser(conf);
+
+    boolean done = false;
+    for(int i = 0; i < 10 && !done; i++) {
+      AppendTestUtil.LOG.info("i=" + i);
+      try {
+        dfs2.create(filepath, false, BUF_SIZE, (short)1, BLOCK_SIZE);
+        fail("Creation of an existing file should never succeed.");
+      } catch (IOException ioe) {
+        final String message = ioe.getMessage();
+        if (message.contains("file exists")) {
+          AppendTestUtil.LOG.info("done", ioe);
+          done = true;
+        }
+        else if (message.contains(AlreadyBeingCreatedException.class.getSimpleName())) {
+          AppendTestUtil.LOG.info("GOOD! got " + message);
+        }
+        else {
+          AppendTestUtil.LOG.warn("UNEXPECTED IOException", ioe);
+        }
+      }
+
+      if (!done) {
+        AppendTestUtil.LOG.info("sleep " + 5000 + "ms");
+        try {Thread.sleep(5000);} catch (InterruptedException e) {}
+      }
+    }
+    assertTrue(done);
+  }
+
+  private void verifyFile(FileSystem dfs, Path filepath, byte[] actual,
+      int size) throws IOException {
+    AppendTestUtil.LOG.info("Lease for file " +  filepath + " is recovered. "
+        + "Validating its contents now...");
+
+    // verify that file-size matches
+    assertTrue("File should be " + size + " bytes, but is actually " +
+               " found to be " + dfs.getFileStatus(filepath).getLen() +
+               " bytes",
+               dfs.getFileStatus(filepath).getLen() == size);
+
+    // verify that there is enough data to read.
+    System.out.println("File size is good. Now validating sizes from datanodes...");
+    FSDataInputStream stmin = dfs.open(filepath);
+    stmin.readFully(0, actual, 0, size);
+    stmin.close();
+  }
+
   /**
    * This test makes the client does not renew its lease and also
    * set the hard lease expiration period to be short 1s. Thus triggering

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java?rev=1134137&r1=1134136&r2=1134137&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java Fri Jun 10 00:11:32 2011
@@ -24,8 +24,11 @@ import java.io.IOException;
 import java.io.RandomAccessFile;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@@ -77,4 +80,50 @@ public class TestFSEditLogLoader {
           e.getMessage().matches(expectedErrorMessage));
     }
   }
+  
+  /**
+   * Test that, if the NN restarts with a new minimum replication,
+   * any files created with the old replication count will get
+   * automatically bumped up to the new minimum upon restart.
+   */
+  @Test
+  public void testReplicationAdjusted() throws IOException {
+    // start a cluster 
+    Configuration conf = new HdfsConfiguration();
+    // Replicate and heartbeat fast to shave a few seconds off test
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
+    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
+          .build();
+      cluster.waitActive();
+      FileSystem fs = cluster.getFileSystem();
+  
+      // Create a file with replication count 1
+      Path p = new Path("/testfile");
+      DFSTestUtil.createFile(fs, p, 10, /*repl*/ (short)1, 1);
+      DFSTestUtil.waitReplication(fs, p, (short)1);
+  
+      // Shut down and restart cluster with new minimum replication of 2
+      cluster.shutdown();
+      cluster = null;
+      
+      conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, 2);
+  
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
+        .format(false).build();
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+      
+      // The file should get adjusted to replication 2 when
+      // the edit log is replayed.
+      DFSTestUtil.waitReplication(fs, p, (short)2);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1134137&r1=1134136&r2=1134137&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Fri Jun 10 00:11:32 2011
@@ -23,6 +23,7 @@ import java.util.Random;
 
 import junit.framework.TestCase;
 
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -37,6 +38,11 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.test.MetricsAsserts;
+import org.apache.log4j.Level;
+
+import org.apache.commons.logging.LogFactory;
+
 import static org.apache.hadoop.test.MetricsAsserts.*;
 
 /**
@@ -59,6 +65,9 @@ public class TestNameNodeMetrics extends
         DFS_REPLICATION_INTERVAL);
     CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
         DFS_REPLICATION_INTERVAL);
+
+    ((Log4JLogger)LogFactory.getLog(MetricsAsserts.class))
+      .getLogger().setLevel(Level.DEBUG);
   }
   
   private MiniDFSCluster cluster;
@@ -255,9 +264,5 @@ public class TestNameNodeMetrics extends
     readFile(fs, file1_Path);
     updateMetrics();
     assertCounter("GetBlockLocations", 3L, getMetrics(NN_METRICS));
-  
-    // Verify total load metrics, total load = Data Node started.
-    updateMetrics();
-    assertGauge("TotalLoad" ,DATANODE_COUNT, getMetrics(NS_METRICS));
   }
 }

Propchange: hadoop/hdfs/branches/HDFS-1073/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jun 10 00:11:32 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/datanode:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/webapps/datanode:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/datanode:820487
-/hadoop/hdfs/trunk/src/webapps/datanode:1086482-1132839
+/hadoop/hdfs/trunk/src/webapps/datanode:1086482-1134136

Propchange: hadoop/hdfs/branches/HDFS-1073/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jun 10 00:11:32 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/hdfs:820487
-/hadoop/hdfs/trunk/src/webapps/hdfs:1086482-1132839
+/hadoop/hdfs/trunk/src/webapps/hdfs:1086482-1134136

Propchange: hadoop/hdfs/branches/HDFS-1073/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jun 10 00:11:32 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/secondary:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/webapps/secondary:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/secondary:820487
-/hadoop/hdfs/trunk/src/webapps/secondary:1086482-1132839
+/hadoop/hdfs/trunk/src/webapps/secondary:1086482-1134136