You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cn...@apache.org on 2013/06/21 08:37:39 UTC

svn commit: r1495297 [33/46] - in /hadoop/common/branches/branch-1-win: ./ bin/ conf/ ivy/ lib/jdiff/ src/c++/libhdfs/docs/ src/c++/libhdfs/tests/conf/ src/contrib/capacity-scheduler/ivy/ src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred...

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/AppendTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/AppendTestUtil.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/AppendTestUtil.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/AppendTestUtil.java Fri Jun 21 06:37:27 2013
@@ -165,7 +165,7 @@ public class AppendTestUtil {
     LOG.info("leasechecker.interruptAndJoin()");
     // lose the lease on the client
     DistributedFileSystem dfs = (DistributedFileSystem)whichfs;
-    dfs.dfs.leasechecker.interruptAndJoin();
+    dfs.dfs.getLeaseRenewer().interruptAndJoin();
   }
   
   public static void recoverFile(MiniDFSCluster cluster, FileSystem fs,

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/DFSTestUtil.java Fri Jun 21 06:37:27 2013
@@ -34,6 +34,7 @@ import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -136,27 +137,39 @@ public class DFSTestUtil {
   
   public static void createFile(FileSystem fs, Path fileName, long fileLen, 
       short replFactor, long seed) throws IOException {
+    createFile(fs, fileName, 1024, fileLen, fs.getDefaultBlockSize(fileName),
+        replFactor, seed);
+  }
+  
+  public static void createFile(FileSystem fs, Path fileName, int bufferLen,
+      long fileLen, long blockSize, short replFactor, long seed)
+      throws IOException {
+    assert bufferLen > 0;
     if (!fs.mkdirs(fileName.getParent())) {
       throw new IOException("Mkdirs failed to create " + 
                             fileName.getParent().toString());
     }
     FSDataOutputStream out = null;
     try {
-      out = fs.create(fileName, replFactor);
-      byte[] toWrite = new byte[1024];
-      Random rb = new Random(seed);
-      long bytesToWrite = fileLen;
-      while (bytesToWrite>0) {
-        rb.nextBytes(toWrite);
-        int bytesToWriteNext = (1024<bytesToWrite)?1024:(int)bytesToWrite;
-
-        out.write(toWrite, 0, bytesToWriteNext);
-        bytesToWrite -= bytesToWriteNext;
+      out = fs.create(fileName, true,
+        fs.getConf().getInt("io.file.buffer.size", 4096),replFactor, blockSize);
+      if (fileLen > 0) {
+        byte[] toWrite = new byte[bufferLen];
+        Random rb = new Random(seed);
+        long bytesToWrite = fileLen;
+        while (bytesToWrite>0) {
+          rb.nextBytes(toWrite);
+          int bytesToWriteNext = (bufferLen < bytesToWrite) ? bufferLen
+              : (int) bytesToWrite;
+  
+          out.write(toWrite, 0, bytesToWriteNext);
+          bytesToWrite -= bytesToWriteNext;
+        }
       }
-      out.close();
-      out = null;
     } finally {
-      IOUtils.closeStream(out);
+      if (out != null) {
+        out.close();
+      }
     }
   }
   

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java Fri Jun 21 06:37:27 2013
@@ -253,7 +253,8 @@ public class MiniDFSCluster {
     
     int replication = conf.getInt("dfs.replication", 3);
     conf.setInt("dfs.replication", Math.min(replication, numDataNodes));
-    conf.setInt("dfs.safemode.extension", 0);
+    int safemodeExtension = conf.getInt("dfs.safemode.extension.testing", 0);
+    conf.setInt("dfs.safemode.extension", safemodeExtension);
     conf.setInt("dfs.namenode.decommission.interval", 3); // 3 second
 
     // Set a small delay on blockReceived in the minicluster to approximate
@@ -277,6 +278,9 @@ public class MiniDFSCluster {
                    StaticMapping.class, DNSToSwitchMapping.class);
     nameNode = NameNode.createNameNode(args, conf);
     
+    if (operation == StartupOption.RECOVER) {
+      return;
+    }
     // Start the DataNodes
     startDataNodes(conf, numDataNodes, manageDataDfsDirs, 
                     operation, racks, hosts, simulatedCapacities);
@@ -589,27 +593,35 @@ public class MiniDFSCluster {
     }
   }
 
-  /**
-   * Restart namenode. Waits for exit from safemode.
-   */
-  public synchronized void restartNameNode()
-      throws IOException {
-    restartNameNode(true);
+  /** Same as restartNameNode(true, true). */
+  public synchronized void restartNameNode() throws IOException {
+    restartNameNode(true, true);
   }
   
+  /** Same as restartNameNode(waitSafemodeExit, true). */
+  public synchronized void restartNameNode(boolean waitSafemodeExit
+      ) throws IOException {
+    restartNameNode(waitSafemodeExit, true);
+  }
+
   /**
    * Restart namenode.
+   * 
+   * @param waitSafemodeExit Should it wait for safe mode to turn off?
+   * @param waitClusterActive Should it wait for cluster to be active?
+   * @throws IOException
    */
-  public synchronized void restartNameNode(boolean waitSafemodeExit)
-      throws IOException {
+  public synchronized void restartNameNode(boolean waitSafemodeExit,
+      boolean waitClusterActive) throws IOException {
     shutdownNameNode();
     nameNode = NameNode.createNameNode(new String[] {}, conf);
     if (waitSafemodeExit) {
       waitClusterUp();
     }
     System.out.println("Restarted the namenode");
+
     int failedCount = 0;
-    while (true) {
+    while(waitClusterActive) {
       try {
         waitActive();
         break;
@@ -624,7 +636,6 @@ public class MiniDFSCluster {
         }
       }
     }
-    System.out.println("Cluster is active");
   }
 
   /*
@@ -866,6 +877,7 @@ public class MiniDFSCluster {
     }
 
     client.close();
+    System.out.println("Cluster is active");
   }
 
   private synchronized boolean shouldWait(DatanodeInfo[] dnInfo) {
@@ -988,8 +1000,8 @@ public class MiniDFSCluster {
    * Set the softLimit and hardLimit of client lease periods
    */
   void setLeasePeriod(long soft, long hard) {
-    nameNode.namesystem.leaseManager.setLeasePeriod(soft, hard);
-    nameNode.namesystem.lmthread.interrupt();
+    nameNode.getNamesystem().leaseManager.setLeasePeriod(soft, hard);
+    nameNode.getNamesystem().lmthread.interrupt();
   }
 
   /**
@@ -1010,4 +1022,9 @@ public class MiniDFSCluster {
   public String getDataDirectory() {
     return data_dir.getAbsolutePath();
   }
+
+  public static File getBaseDir() {
+    return new File(System.getProperty(
+      "test.build.data", "build/test/data"), "dfs/");
+  }
 }

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java Fri Jun 21 06:37:27 2013
@@ -52,7 +52,7 @@ public class TestBlocksScheduledCounter 
     ((DFSOutputStream)(out.getWrappedStream())).sync();
     
     ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
-    cluster.getNameNode().namesystem.DFSNodesStatus(dnList, dnList);
+    cluster.getNameNode().getNamesystem().DFSNodesStatus(dnList, dnList);
     DatanodeDescriptor dn = dnList.get(0);
     
     assertEquals(1, dn.getBlocksScheduled());

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestByteRangeInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestByteRangeInputStream.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestByteRangeInputStream.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestByteRangeInputStream.java Fri Jun 21 06:37:27 2013
@@ -23,6 +23,7 @@ import java.io.InputStream;
 import java.net.HttpURLConnection;
 import java.net.URL;
 
+import org.apache.hadoop.hdfs.server.namenode.StreamFile;
 import org.junit.Test;
 
 public class TestByteRangeInputStream {
@@ -76,6 +77,11 @@ public static class MockHttpURLConnectio
   public void setResponseCode(int resCode) {
     responseCode = resCode;
   }
+  
+  @Override
+  public String getHeaderField(String field) {
+      return (field.equalsIgnoreCase(StreamFile.CONTENT_LENGTH)) ? "65535" : null;
+  }
 }
 
   @Test

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSClientRetries.java Fri Jun 21 06:37:27 2013
@@ -17,50 +17,73 @@
  */
 package org.apache.hadoop.hdfs;
 
-import java.net.SocketTimeoutException;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.LongWritable;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.spy;
+
+import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.net.InetSocketAddress;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.net.InetSocketAddress;
+import java.net.SocketTimeoutException;
+import java.net.URI;
 import java.util.ArrayList;
 import java.util.List;
-import java.net.SocketTimeoutException;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient.DFSInputStream;
-import org.apache.hadoop.hdfs.protocol.*;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
-
-import org.apache.hadoop.hdfs.server.common.*;
-import org.apache.hadoop.hdfs.server.datanode.TestInterDatanodeProtocol;
-import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.io.*;
+import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
+import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.retry.RetryPolicies.MultipleLinearRandomRetry;
+import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.net.NetUtils;
-
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-
-import junit.framework.TestCase;
-import static org.mockito.Mockito.*;
-import org.mockito.stubbing.Answer;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.log4j.Level;
 import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
 /**
  * These tests make sure that DFSClient retries fetching data from DFS
@@ -202,6 +225,10 @@ public class TestDFSClientRetries extend
       return versionID;
     }
 
+    public boolean isFileClosed(String src) throws IOException {
+      return true;
+    }
+
     public LocatedBlock addBlock(String src, String clientName)
     throws IOException
     {
@@ -247,6 +274,8 @@ public class TestDFSClientRetries extend
     public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {}
 
     public boolean rename(String src, String dst) throws IOException { return false; }
+    
+    public void concat(String trg, String[] srcs)  throws IOException {}
 
     public boolean delete(String src) throws IOException { return false; }
 
@@ -466,7 +495,7 @@ public class TestDFSClientRetries extend
 
     try {
       proxy = DFSClient.createClientDatanodeProtocolProxy(dnInfo, conf,
-          fakeBlock.getBlock(), fakeBlock.getBlockToken(), 500);
+          fakeBlock.getBlock(), fakeBlock.getBlockToken(), 500, false);
 
       fail ("Did not get expected exception: SocketTimeoutException");
     } catch (SocketTimeoutException e) {
@@ -509,4 +538,253 @@ public class TestDFSClientRetries extend
       cluster.shutdown();
     }
   }
+
+  /** Test client retry with namenode restarting. */
+  public void testNamenodeRestart() throws Exception {
+    namenodeRestartTest(new Configuration(), false);
+  }
+
+  public static void namenodeRestartTest(final Configuration conf,
+      final boolean isWebHDFS) throws Exception {
+    ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
+
+    final List<Exception> exceptions = new ArrayList<Exception>();
+
+    final Path dir = new Path("/testNamenodeRestart");
+
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, true);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
+    conf.setInt("dfs.safemode.extension.testing", 5000);
+
+    final short numDatanodes = 3;
+    final MiniDFSCluster cluster = new MiniDFSCluster(
+        conf, numDatanodes, true, null);
+    try {
+      cluster.waitActive();
+      final DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+      final FileSystem fs = isWebHDFS?
+          WebHdfsTestUtil.getWebHdfsFileSystem(conf): dfs;
+      final URI uri = dfs.getUri();
+      assertTrue(DistributedFileSystem.isHealthy(uri));
+
+      //create a file
+      final long length = 1L << 20;
+      final Path file1 = new Path(dir, "foo"); 
+      DFSTestUtil.createFile(fs, file1, length, numDatanodes, 20120406L);
+
+      //get file status
+      final FileStatus s1 = fs.getFileStatus(file1);
+      assertEquals(length, s1.getLen());
+
+      //create file4, write some data but not close
+      final Path file4 = new Path(dir, "file4"); 
+      final FSDataOutputStream out4 = fs.create(file4, false, 4096,
+          fs.getDefaultReplication(file4), 1024L, null);
+      final byte[] bytes = new byte[1000];
+      new Random().nextBytes(bytes);
+      out4.write(bytes);
+      out4.write(bytes);
+      out4.sync();
+
+      //shutdown namenode
+      assertTrue(DistributedFileSystem.isHealthy(uri));
+      cluster.shutdownNameNode();
+      assertFalse(DistributedFileSystem.isHealthy(uri));
+
+      //namenode is down, continue writing file4 in a thread
+      final Thread file4thread = new Thread(new Runnable() {
+        @Override
+        public void run() {
+          try {
+            //write some more data and then close the file
+            out4.write(bytes);
+            out4.write(bytes);
+            out4.write(bytes);
+            out4.close();
+          } catch (Exception e) {
+            exceptions.add(e);
+          }
+        }
+      });
+      file4thread.start();
+
+      //namenode is down, read the file in a thread
+      final Thread reader = new Thread(new Runnable() {
+        @Override
+        public void run() {
+          try {
+            //it should retry till namenode is up.
+            final FileSystem fs = createFsWithDifferentUsername(conf, isWebHDFS);
+            final FSDataInputStream in = fs.open(file1);
+            int count = 0;
+            for(; in.read() != -1; count++);
+            in.close();
+            assertEquals(s1.getLen(), count);
+          } catch (Exception e) {
+            exceptions.add(e);
+          }
+        }
+      });
+      reader.start();
+
+      //namenode is down, create another file in a thread
+      final Path file3 = new Path(dir, "file"); 
+      final Thread thread = new Thread(new Runnable() {
+        @Override
+        public void run() {
+          try {
+            //it should retry till namenode is up.
+            final FileSystem fs = createFsWithDifferentUsername(conf, isWebHDFS);
+            DFSTestUtil.createFile(fs, file3, length, numDatanodes, 20120406L);
+          } catch (Exception e) {
+            exceptions.add(e);
+          }
+        }
+      });
+      thread.start();
+ 
+      //restart namenode in a new thread
+      new Thread(new Runnable() {
+        @Override
+        public void run() {
+          try {
+            //sleep, restart, and then wait active
+            TimeUnit.SECONDS.sleep(30);
+            assertFalse(DistributedFileSystem.isHealthy(uri));
+            cluster.restartNameNode(false, false);
+            cluster.waitActive();
+            assertTrue(DistributedFileSystem.isHealthy(uri));
+          } catch (Exception e) {
+            exceptions.add(e);
+          }
+        }
+      }).start();
+
+      //namenode is down, it should retry until namenode is up again. 
+      final FileStatus s2 = fs.getFileStatus(file1);
+      assertEquals(s1, s2);
+
+      //check file1 and file3
+      thread.join();
+      assertEmpty(exceptions);
+      assertEquals(s1.getLen(), fs.getFileStatus(file3).getLen());
+      assertEquals(fs.getFileChecksum(file1), fs.getFileChecksum(file3));
+
+      reader.join();
+      assertEmpty(exceptions);
+
+      //check file4
+      file4thread.join();
+      assertEmpty(exceptions);
+      {
+        final FSDataInputStream in = fs.open(file4);
+        int count = 0;
+        for(int r; (r = in.read()) != -1; count++) {
+          Assert.assertEquals(String.format("count=%d", count),
+              bytes[count % bytes.length], (byte)r);
+        }
+        Assert.assertEquals(5 * bytes.length, count);
+        in.close();
+      }
+
+      //enter safe mode
+      assertTrue(DistributedFileSystem.isHealthy(uri));
+      dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      assertFalse(DistributedFileSystem.isHealthy(uri));
+      
+      //leave safe mode in a new thread
+      new Thread(new Runnable() {
+        @Override
+        public void run() {
+          try {
+            //sleep and then leave safe mode
+            TimeUnit.SECONDS.sleep(30);
+            assertFalse(DistributedFileSystem.isHealthy(uri));
+            dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+            assertTrue(DistributedFileSystem.isHealthy(uri));
+          } catch (Exception e) {
+            exceptions.add(e);
+          }
+        }
+      }).start();
+
+      //namenode is in safe mode, create should retry until it leaves safe mode.
+      final Path file2 = new Path(dir, "bar");
+      DFSTestUtil.createFile(fs, file2, length, numDatanodes, 20120406L);
+      assertEquals(fs.getFileChecksum(file1), fs.getFileChecksum(file2));
+
+      assertTrue(DistributedFileSystem.isHealthy(uri));
+      
+      //make sure it won't retry on exceptions like FileNotFoundException
+      final Path nonExisting = new Path(dir, "nonExisting");
+      LOG.info("setPermission: " + nonExisting);
+      try {
+        fs.setPermission(nonExisting, new FsPermission((short)0));
+        fail();
+      } catch(FileNotFoundException fnfe) {
+        LOG.info("GOOD!", fnfe);
+      }
+
+      assertEmpty(exceptions);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  static void assertEmpty(final List<Exception> exceptions) {
+    if (!exceptions.isEmpty()) {
+      final StringBuilder b = new StringBuilder("There are ")
+        .append(exceptions.size())
+        .append(" exception(s):");
+      for(int i = 0; i < exceptions.size(); i++) {
+        b.append("\n  Exception ")
+         .append(i)
+         .append(": ")
+         .append(StringUtils.stringifyException(exceptions.get(i)));
+      }
+      fail(b.toString());
+    }
+  }
+
+  private static FileSystem createFsWithDifferentUsername(
+      final Configuration conf, final boolean isWebHDFS
+      ) throws IOException, InterruptedException {
+    String username = UserGroupInformation.getCurrentUser().getShortUserName()+"_XXX";
+    UserGroupInformation ugi = 
+      UserGroupInformation.createUserForTesting(username, new String[]{"supergroup"});
+    
+    return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf)
+        : DFSTestUtil.getFileSystemAs(ugi, conf);
+  }
+
+  public void testMultipleLinearRandomRetry() {
+    parseMultipleLinearRandomRetry(null, "");
+    parseMultipleLinearRandomRetry(null, "11");
+    parseMultipleLinearRandomRetry(null, "11,22,33");
+    parseMultipleLinearRandomRetry(null, "11,22,33,44,55");
+    parseMultipleLinearRandomRetry(null, "AA");
+    parseMultipleLinearRandomRetry(null, "11,AA");
+    parseMultipleLinearRandomRetry(null, "11,22,33,FF");
+    parseMultipleLinearRandomRetry(null, "11,-22");
+    parseMultipleLinearRandomRetry(null, "-11,22");
+
+    parseMultipleLinearRandomRetry("[22x11ms]",
+        "11,22");
+    parseMultipleLinearRandomRetry("[22x11ms, 44x33ms]",
+        "11,22,33,44");
+    parseMultipleLinearRandomRetry("[22x11ms, 44x33ms, 66x55ms]",
+        "11,22,33,44,55,66");
+    parseMultipleLinearRandomRetry("[22x11ms, 44x33ms, 66x55ms]",
+        "   11,   22, 33,  44, 55,  66   ");
+  }
+  
+  static void parseMultipleLinearRandomRetry(String expected, String s) {
+    final MultipleLinearRandomRetry r = MultipleLinearRandomRetry.parseCommaSeparatedString(s);
+    LOG.info("input=" + s + ", parsed=" + r + ", expected=" + expected);
+    if (r == null) {
+      Assert.assertEquals(expected, null);
+    } else {
+      Assert.assertEquals("MultipleLinearRandomRetry" + expected, r.toString());
+    }
+  }
 }

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSRemove.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSRemove.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSRemove.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSRemove.java Fri Jun 21 06:37:27 2013
@@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.server.dat
 
 public class TestDFSRemove extends junit.framework.TestCase {
   static int countLease(MiniDFSCluster cluster) {
-    return cluster.getNameNode().namesystem.leaseManager.countLease();
+    return cluster.getNameNode().getNamesystem().leaseManager.countLease();
   }
   
   final Path dir = new Path("/test/remove/");

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSRename.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSRename.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSRename.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSRename.java Fri Jun 21 06:37:27 2013
@@ -30,7 +30,7 @@ public class TestDFSRename extends junit
   static Configuration CONF = new Configuration();
   static MiniDFSCluster cluster = null;
   static int countLease(MiniDFSCluster cluster) {
-    return cluster.getNameNode().namesystem.leaseManager.countLease();
+    return cluster.getNameNode().getNamesystem().leaseManager.countLease();
   }
   
   final Path dir = new Path("/test/rename/");

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSShell.java Fri Jun 21 06:37:27 2013
@@ -45,6 +45,7 @@ import org.apache.hadoop.fs.shell.Count;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Shell;
@@ -1298,6 +1299,108 @@ public class TestDFSShell extends TestCa
       cluster.shutdown();
     }
   }
+  
+  /**
+   * run copyFromLocal when home dir does not exist and use . as the destination
+   */
+  public void testCopyFromLocal1() throws IOException {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
+    FsShell shell = new FsShell();
+    shell.setConf(conf);
+    try {
+      String fname = "testCopyFromLocal1.txt";
+      File f = createLocalFile(new File(TEST_ROOT_DIR, fname));
+
+      // copy the file to hdfs when home dir does not exist
+      int exitCode = runCmd(shell, "-copyFromLocal", f.getAbsolutePath(), ".");
+
+      assertEquals("cmd exit code is not 0", 0, exitCode);
+
+      // make sure the file exists on hdfs
+      assertTrue("File " + fname + " does not exist on hdfs.",
+          dfs.exists(new Path(fname)));
+
+      // copy the file to hdfs again and it should fail
+      exitCode = runCmd(shell, "-copyFromLocal", f.getAbsolutePath(), ".");
+      assertTrue("cmd exit code is 0 when it should not be -> " + exitCode,
+          exitCode != 0);
+
+      // copy another file onto hdfs when the home dir exists and make sure it
+      // works
+      String fname2 = "testCopyFromLocal1.2.txt";
+      File f2 = createLocalFile(new File(TEST_ROOT_DIR, fname2));
+
+      // copy the file to hdfs
+      exitCode = runCmd(shell, "-copyFromLocal", f2.getAbsolutePath(), ".");
+
+      assertEquals("cmd exit code is not 0", 0, exitCode);
+
+      // make sure the file exists on hdfs
+      assertTrue("File " + fname2 + " does not exist on hdfs.",
+          dfs.exists(new Path(fname2)));
+
+      // copy another file onto hdfs when the home dir exists and make sure it
+      // works when we use file name as the destination
+      String fname3 = "testCopyFromLocal1.3.txt";
+      File f3 = createLocalFile(new File(TEST_ROOT_DIR, fname3));
+
+      // copy the file to hdfs
+      exitCode = runCmd(shell, "-copyFromLocal", f3.getAbsolutePath(), fname3);
+
+      assertEquals("cmd exit code is not 0", 0, exitCode);
+
+      // make sure the file exists on hdfs
+      assertTrue("File " + fname3 + " does not exist on hdfs.",
+          dfs.exists(new Path(fname3)));
+    } finally {
+      try {
+        dfs.close();
+      } catch (Exception e) {
+      }
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * run copyFromLocal when home dir does not exist and use the file name as the
+   * destination
+   */
+  public void testCopyFromLocal2() throws IOException {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
+    FsShell shell = new FsShell();
+    shell.setConf(conf);
+    try {
+      String fname = "testCopyFromLocal2.txt";
+      File f = createLocalFile(new File(TEST_ROOT_DIR, fname));
+
+      // copy the file to hdfs
+      int exitCode = runCmd(shell, "-copyFromLocal", f.getAbsolutePath(), fname);
+
+      assertEquals("cmd exit code is not 0", 0, exitCode);
+
+      // make sure the file exists on hdfs
+      assertTrue("File " + fname + " does not exist on hdfs.",
+          dfs.exists(new Path(fname)));
+
+      // copy the file to hdfs again and make sure it fails.
+      exitCode = runCmd(shell, "-copyFromLocal", f.getAbsolutePath(), fname);
+
+      assertTrue("cmd exit code is 0 when it should not be -> " + exitCode,
+          exitCode != 0);
+
+    } finally {
+      try {
+        dfs.close();
+      } catch (Exception e) {
+      }
+      cluster.shutdown();
+    }
+  }
+  
   private static String runLsr(final FsShell shell, String root, int returnvalue
       ) throws Exception {
     System.out.println("root=" + root + ", returnvalue=" + returnvalue);
@@ -1319,4 +1422,19 @@ public class TestDFSShell extends TestCa
     System.out.println("results:\n" + results);
     return results;
   }
+  
+    
+  /**
+   * default setting is file:// which is not a DFS so DFSAdmin should throw and
+   * catch InvalidArgumentException and return -1 exit code.
+   * 
+   * @throws Exception
+   */
+  public void testInvalidShell() throws Exception {
+    Configuration conf = new Configuration(); // default FS (non-DFS)
+    DFSAdmin admin = new DFSAdmin();
+    admin.setConf(conf);
+    int res = admin.run(new String[] { "-refreshNodes" });
+    assertEquals("expected to fail -1", res, -1);
+  }
 }

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java Fri Jun 21 06:37:27 2013
@@ -326,7 +326,7 @@ public class TestDatanodeBlockScanner ex
      cluster.restartDataNode(corruptReplicasDNIDs[i]);
 
     // Loop until all corrupt replicas are reported
-    int corruptReplicaSize = cluster.getNameNode().namesystem.
+    int corruptReplicaSize = cluster.getNameNode().getNamesystem().
                               corruptReplicas.numCorruptReplicas(blk);
     while (corruptReplicaSize != numCorruptReplicas) {
       try {
@@ -340,7 +340,7 @@ public class TestDatanodeBlockScanner ex
         Thread.sleep(1000);
       } catch (InterruptedException ignore) {
       }
-      corruptReplicaSize = cluster.getNameNode().namesystem.
+      corruptReplicaSize = cluster.getNameNode().getNamesystem().
                               corruptReplicas.numCorruptReplicas(blk);
     }
     
@@ -361,7 +361,7 @@ public class TestDatanodeBlockScanner ex
 
     // Make sure the corrupt replica is invalidated and removed from
     // corruptReplicasMap
-    corruptReplicaSize = cluster.getNameNode().namesystem.
+    corruptReplicaSize = cluster.getNameNode().getNamesystem().
                           corruptReplicas.numCorruptReplicas(blk);
     while (corruptReplicaSize != 0 || replicaCount != numReplicas) {
       try {
@@ -369,7 +369,7 @@ public class TestDatanodeBlockScanner ex
         Thread.sleep(1000);
       } catch (InterruptedException ignore) {
       }
-      corruptReplicaSize = cluster.getNameNode().namesystem.
+      corruptReplicaSize = cluster.getNameNode().getNamesystem().
                             corruptReplicas.numCorruptReplicas(blk);
       blocks = dfsClient.namenode.
                  getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDecommission.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDecommission.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDecommission.java Fri Jun 21 06:37:27 2013
@@ -183,7 +183,7 @@ public class TestDecommission extends Te
     ArrayList<String> nodes = new ArrayList<String>(decommissionedNodes);
     nodes.add(nodename);
     writeConfigFile(localFileSys, excludeFile, nodes);
-    namenode.namesystem.refreshNodes(conf);
+    namenode.getNamesystem().refreshNodes(conf);
     return nodename;
   }
 

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java Fri Jun 21 06:37:27 2013
@@ -46,6 +46,10 @@ import org.junit.Test;
 public class TestDistributedFileSystem {
   private static final Random RAN = new Random();
 
+  {
+    ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
+  }
+
   private boolean dualPortTesting = false;
   
   private Configuration getTestConfiguration() {
@@ -102,40 +106,108 @@ public class TestDistributedFileSystem {
   @Test
   public void testDFSClient() throws Exception {
     Configuration conf = getTestConfiguration();
+    final long grace = 1000L;
     MiniDFSCluster cluster = null;
 
     try {
       cluster = new MiniDFSCluster(conf, 2, true, null);
-      final Path filepath = new Path("/test/LeaseChecker/foo");
+      final String filepathstring = "/test/LeaseChecker/foo";
+      final Path[] filepaths = new Path[4];
+      for(int i = 0; i < filepaths.length; i++) {
+        filepaths[i] = new Path(filepathstring + i);
+      }
       final long millis = System.currentTimeMillis();
 
       {
         DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
-        assertFalse(dfs.dfs.isLeaseCheckerStarted());
+        dfs.dfs.getLeaseRenewer().setGraceSleepPeriod(grace);
+        assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
   
-        //create a file
-        FSDataOutputStream out = dfs.create(filepath);
-        assertTrue(dfs.dfs.isLeaseCheckerStarted());
-  
-        //write something and close
-        out.writeLong(millis);
-        assertTrue(dfs.dfs.isLeaseCheckerStarted());
-        out.close();
-        assertTrue(dfs.dfs.isLeaseCheckerStarted());
+        {
+          //create a file
+          final FSDataOutputStream out = dfs.create(filepaths[0]);
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
+          //write something
+          out.writeLong(millis);
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
+          //close
+          out.close();
+          Thread.sleep(grace/4*3);
+          //within grace period
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
+          for(int i = 0; i < 3; i++) {
+            if (dfs.dfs.getLeaseRenewer().isRunning()) {
+              Thread.sleep(grace/2);
+            }
+          }
+          //passed grace period
+          assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
+        }
+
+        {
+          //create file1
+          final FSDataOutputStream out1 = dfs.create(filepaths[1]);
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
+          //create file2
+          final FSDataOutputStream out2 = dfs.create(filepaths[2]);
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
+
+          //write something to file1
+          out1.writeLong(millis);
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
+          //close file1
+          out1.close();
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
+
+          //write something to file2
+          out2.writeLong(millis);
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
+          //close file2
+          out2.close();
+          Thread.sleep(grace/4*3);
+          //within grace period
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
+        }
+
+        {
+          //create file3
+          final FSDataOutputStream out3 = dfs.create(filepaths[3]);
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
+          Thread.sleep(grace/4*3);
+          //passed previous grace period, should still running
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
+          //write something to file3
+          out3.writeLong(millis);
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
+          //close file3
+          out3.close();
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
+          Thread.sleep(grace/4*3);
+          //within grace period
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
+          for(int i = 0; i < 3; i++) {
+            if (dfs.dfs.getLeaseRenewer().isRunning()) {
+              Thread.sleep(grace/2);
+            }
+          }
+          //passed grace period
+          assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
+        }
+
         dfs.close();
       }
 
       {
         DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
-        assertFalse(dfs.dfs.isLeaseCheckerStarted());
+        assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
 
         //open and check the file
-        FSDataInputStream in = dfs.open(filepath);
-        assertFalse(dfs.dfs.isLeaseCheckerStarted());
+        FSDataInputStream in = dfs.open(filepaths[0]);
+        assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
         assertEquals(millis, in.readLong());
-        assertFalse(dfs.dfs.isLeaseCheckerStarted());
+        assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
         in.close();
-        assertFalse(dfs.dfs.isLeaseCheckerStarted());
+        assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
         dfs.close();
       }
     }
@@ -400,4 +472,26 @@ public class TestDistributedFileSystem {
     testDFSClient();
     testFileChecksum();
   }
+
+  @Test(timeout=60000)
+  public void testFileCloseStatus() throws IOException {
+    Configuration conf = getTestConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
+    try {
+      // create a new file.
+      Path file = new Path("/simpleFlush.dat");
+      FSDataOutputStream output = fs.create(file);
+      // write to file
+      output.writeBytes("Some test data");
+      output.flush();
+      assertFalse("File status should be open", fs.isFileClosed(file));
+      output.close();
+      assertTrue("File status should be closed", fs.isFileClosed(file));
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java Fri Jun 21 06:37:27 2013
@@ -108,7 +108,7 @@ public class TestFileAppend2 extends Tes
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
     conf.setInt("dfs.datanode.handler.count", 50);
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
     initBuffer(fileSize);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     FileSystem fs = cluster.getFileSystem();
@@ -359,7 +359,7 @@ public class TestFileAppend2 extends Tes
     conf.setInt("dfs.datanode.handler.count", 50);
     conf.setInt("dfs.datanode.artificialBlockReceivedDelay",
                 artificialBlockReceivedDelay);
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
 
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
                                                 true, null);

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java Fri Jun 21 06:37:27 2013
@@ -52,7 +52,7 @@ public class TestFileAppend3 extends jun
         AppendTestUtil.LOG.info("setUp()");
         conf = new Configuration();
         conf.setInt("io.bytes.per.checksum", 512);
-        conf.setBoolean("dfs.support.append", true);
+        conf.setBoolean("dfs.support.broken.append", true);
         buffersize = conf.getInt("io.file.buffer.size", 4096);
         cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
         fs = (DistributedFileSystem)cluster.getFileSystem();

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileAppend4.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileAppend4.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileAppend4.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileAppend4.java Fri Jun 21 06:37:27 2013
@@ -97,7 +97,7 @@ public class TestFileAppend4 extends Tes
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
 
     // lower heartbeat interval for fast recognition of DN death
     conf.setInt("heartbeat.recheck.interval", 1000);
@@ -656,7 +656,7 @@ public class TestFileAppend4 extends Tes
       // has not been completed in the NN.
       // Lose the leases
       LOG.info("Killing lease checker");
-      client.leasechecker.interruptAndJoin();
+      client.getLeaseRenewer().interruptAndJoin();
 
       FileSystem fs1 = cluster.getFileSystem();
       FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(
@@ -726,7 +726,7 @@ public class TestFileAppend4 extends Tes
       // has not been completed in the NN.
       // Lose the leases
       LOG.info("Killing lease checker");
-      client.leasechecker.interruptAndJoin();
+      client.getLeaseRenewer().interruptAndJoin();
 
       FileSystem fs1 = cluster.getFileSystem();
       FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(
@@ -780,9 +780,9 @@ public class TestFileAppend4 extends Tes
 
       // Make the NN fail to commitBlockSynchronization one time
       NameNode nn = cluster.getNameNode();
-      nn.namesystem = spy(nn.namesystem);
+      nn.setNamesystem(spy(nn.getNamesystem()));
       doAnswer(new ThrowNTimesAnswer(IOException.class, 1)).
-        when(nn.namesystem).
+        when(nn.getNamesystem()).
         commitBlockSynchronization((Block)anyObject(), anyInt(), anyInt(),
                                    anyBoolean(), anyBoolean(),
                                    (DatanodeID[])anyObject());
@@ -890,9 +890,9 @@ public class TestFileAppend4 extends Tes
     // Allow us to delay commitBlockSynchronization
     DelayAnswer delayer = new DelayAnswer();
     NameNode nn = cluster.getNameNode();
-    nn.namesystem = spy(nn.namesystem);
+    nn.setNamesystem(spy(nn.getNamesystem()));
     doAnswer(delayer).
-      when(nn.namesystem).
+      when(nn.getNamesystem()).
       commitBlockSynchronization((Block) anyObject(), anyInt(), anyInt(),
         anyBoolean(), anyBoolean(),
         (DatanodeID[]) anyObject());
@@ -1235,7 +1235,7 @@ public class TestFileAppend4 extends Tes
       LOG.info("======== Writing");
       AppendTestUtil.write(stm, 0, halfBlock/2);
       LOG.info("======== Checking progress");
-      assertFalse(NameNodeAdapter.checkFileProgress(nn.namesystem, "/delayedReceiveBlock", true));
+      assertFalse(NameNodeAdapter.checkFileProgress(nn.getNamesystem(), "/delayedReceiveBlock", true));
       LOG.info("======== Closing");
       stm.close();
 
@@ -1286,7 +1286,8 @@ public class TestFileAppend4 extends Tes
       AppendTestUtil.write(stm, 0, halfBlock/4);
 
       LOG.info("======== Checking progress");
-      assertFalse(NameNodeAdapter.checkFileProgress(nn.namesystem, "/delayedReceiveBlock", true));
+      assertFalse(NameNodeAdapter.checkFileProgress(nn.getNamesystem(),
+          "/delayedReceiveBlock", true));
       LOG.info("======== Closing");
       stm.close();
 
@@ -1320,7 +1321,8 @@ public class TestFileAppend4 extends Tes
       waitForBlockReplication(fs1, "/delayedReceiveBlock", 0, 3000);
 
       LOG.info("======== Checking not complete");
-      assertFalse(NameNodeAdapter.checkFileProgress(nn.namesystem, "/delayedReceiveBlock", true));
+      assertFalse(NameNodeAdapter.checkFileProgress(nn.getNamesystem(),
+          "/delayedReceiveBlock", true));
 
       // Stop one of the DNs, don't restart
       MiniDFSCluster.DataNodeProperties dnprops = cluster.stopDataNode(0);
@@ -1330,7 +1332,8 @@ public class TestFileAppend4 extends Tes
 
       // Make sure we don't see the file as complete
       LOG.info("======== Checking progress");
-      assertFalse(NameNodeAdapter.checkFileProgress(nn.namesystem, "/delayedReceiveBlock", true));
+      assertFalse(NameNodeAdapter.checkFileProgress(nn.getNamesystem(),
+          "/delayedReceiveBlock", true));
       LOG.info("======== Closing");
       stm.close();
 
@@ -1361,9 +1364,9 @@ public class TestFileAppend4 extends Tes
       DelayAnswer delayer = new DelayAnswer(false);
 
       NameNode nn = cluster.getNameNode();
-      nn.namesystem = spy(nn.namesystem);
+      nn.setNamesystem(spy(nn.getNamesystem()));
       NameNodeAdapter.callNextGenerationStampForBlock(
-        doAnswer(delayer).when(nn.namesystem),
+        doAnswer(delayer).when(nn.getNamesystem()),
         (Block)anyObject(), anyBoolean());
 
       final AtomicReference<Throwable> err = new AtomicReference<Throwable>();

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileConcurrentReader.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileConcurrentReader.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileConcurrentReader.java Fri Jun 21 06:37:27 2013
@@ -231,7 +231,7 @@ public class TestFileConcurrentReader ex
     Configuration conf
   ) throws IOException {
     try {
-      conf.setBoolean("dfs.support.append", syncType == SyncType.APPEND);
+      conf.setBoolean("dfs.support.broken.append", syncType == SyncType.APPEND);
       conf.setBoolean("dfs.datanode.transferTo.allowed", transferToAllowed);
       init(conf);
 

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileCorruption.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileCorruption.java Fri Jun 21 06:37:27 2013
@@ -134,7 +134,7 @@ public class TestFileCorruption extends 
       DataNode dataNode = datanodes.get(2);
       
       // report corrupted block by the third datanode
-      cluster.getNameNode().namesystem.markBlockAsCorrupt(blk, 
+      cluster.getNameNode().getNamesystem().markBlockAsCorrupt(blk, 
           new DatanodeInfo(dataNode.dnRegistration ));
       
       // open the file

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileCreation.java Fri Jun 21 06:37:27 2013
@@ -23,6 +23,7 @@ import java.io.FileNotFoundException;
 import java.io.FileReader;
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
@@ -46,13 +47,16 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.log4j.Level;
+import org.junit.Test;
 
+import static org.junit.Assume.assumeTrue;
+import static org.junit.Assert.*;
 
 /**
  * This class tests that a file need not be closed before its
  * data can be read by another client.
  */
-public class TestFileCreation extends junit.framework.TestCase {
+public class TestFileCreation {
   static final String DIR = "/" + TestFileCreation.class.getSimpleName() + "/";
 
   {
@@ -170,11 +174,54 @@ public class TestFileCreation extends ju
     stm.close();
   }
 
+  @Test
+  public void testFileCreation() throws IOException {
+    checkFileCreation(null, null);
+  }
+
+  /** Same test but the client should use DN hostname instead of IPs */
+  @Test
+  public void testFileCreationByHostname() throws IOException {
+    assumeTrue(System.getProperty("os.name").startsWith("Linux"));
+
+    // Since the mini cluster only listens on the loopback we have to
+    // ensure the hostname used to access DNs maps to the loopback. We
+    // do this by telling the DN to advertise localhost as its hostname
+    // instead of the default hostname.
+    checkFileCreation("localhost", null);
+  }
+
+  /** Same test but the client should bind to a local interface */
+  @Test
+  public void testFileCreationSetLocalInterface() throws IOException {
+    assumeTrue(System.getProperty("os.name").startsWith("Linux"));
+
+    // The mini cluster listens on the loopback so we can use it here
+    checkFileCreation(null, "lo");
+
+    try {
+      checkFileCreation(null, "bogus-interface");
+      fail("Able to specify a bogus interface");
+    } catch (UnknownHostException e) {
+      assertEquals("Unknown interface bogus-interface", e.getMessage());
+    }
+  }
+
   /**
    * Test that file data becomes available before file is closed.
+   * @param hostname the hostname, if any, clients should use to access DNs
+   * @param netIf the local interface, if any, clients should use to access DNs
    */
-  public void testFileCreation() throws IOException {
+  public void checkFileCreation(String hostname, String netIf) throws IOException {
     Configuration conf = new Configuration();
+
+    if (hostname != null) {
+      conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, true);
+      conf.set("slave.host.name", hostname);
+    }
+    if (netIf != null) {
+      conf.set(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf);
+    }
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
@@ -203,7 +250,7 @@ public class TestFileCreation extends ju
         fs.close();
         assertTrue("Did not prevent directory from being overwritten.", false);
       } catch (IOException ie) {
-        if (!ie.getMessage().contains("already exists as a directory."))
+        if (!ie.getMessage().contains("already exists as a directory"))
           throw ie;
       }
       
@@ -253,6 +300,7 @@ public class TestFileCreation extends ju
   /**
    * Test deleteOnExit
    */
+  @Test
   public void testDeleteOnExit() throws IOException {
     Configuration conf = new Configuration();
     if (simulatedStorage) {
@@ -315,6 +363,7 @@ public class TestFileCreation extends ju
   /**
    * Test file creation using createNonRecursive().
    */
+  @Test
   public void testFileCreationNonRecursive() throws IOException {
     Configuration conf = new Configuration();
     if (simulatedStorage) {
@@ -407,6 +456,7 @@ public class TestFileCreation extends ju
   /**
    * Test that file data does not become corrupted even in the face of errors.
    */
+  @Test
   public void testFileCreationError1() throws IOException {
     Configuration conf = new Configuration();
     conf.setInt("heartbeat.recheck.interval", 1000);
@@ -479,6 +529,7 @@ public class TestFileCreation extends ju
    * Test that the filesystem removes the last block from a file if its
    * lease expires.
    */
+  @Test
   public void testFileCreationError2() throws IOException {
     long leasePeriod = 1000;
     System.out.println("testFileCreationError2 start");
@@ -548,6 +599,7 @@ public class TestFileCreation extends ju
    * This test is currently not triggered because more HDFS work is 
    * is needed to handle persistent leases.
    */
+  //@Test
   public void xxxtestFileCreationNamenodeRestart() throws IOException {
     Configuration conf = new Configuration();
     final int MAX_IDLE_TIME = 2000; // 2s
@@ -689,6 +741,7 @@ public class TestFileCreation extends ju
   /**
    * Test that all open files are closed when client dies abnormally.
    */
+  @Test
   public void testDFSClientDeath() throws IOException, InterruptedException {
     Configuration conf = new Configuration();
     System.out.println("Testing adbornal client death.");
@@ -725,6 +778,7 @@ public class TestFileCreation extends ju
 /**
  * Test that file data becomes available before file is closed.
  */
+  @Test
   public void testFileCreationSimulated() throws IOException {
     simulatedStorage = true;
     testFileCreation();
@@ -734,6 +788,7 @@ public class TestFileCreation extends ju
   /**
    * Test creating two files at the same time. 
    */
+  @Test
   public void testConcurrentFileCreation() throws IOException {
     Configuration conf = new Configuration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
@@ -764,10 +819,44 @@ public class TestFileCreation extends ju
   }
 
   /**
+   * Test creating a file whose data gets sync when closed
+   */
+  public void testFileCreationSyncOnClose() throws IOException {
+    Configuration conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY, true);
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+
+    try {
+      FileSystem fs = cluster.getFileSystem();
+      
+      Path[] p = {new Path("/foo"), new Path("/bar")};
+      
+      //write 2 files at the same time
+      FSDataOutputStream[] out = {fs.create(p[0]), fs.create(p[1])};
+      int i = 0;
+      for(; i < 100; i++) {
+        out[0].write(i);
+        out[1].write(i);
+      }
+      out[0].close();
+      for(; i < 200; i++) {out[1].write(i);}
+      out[1].close();
+
+      //verify
+      FSDataInputStream[] in = {fs.open(p[0]), fs.open(p[1])};  
+      for(i = 0; i < 100; i++) {assertEquals(i, in[0].read());}
+      for(i = 0; i < 200; i++) {assertEquals(i, in[1].read());}
+    } finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
+  
+  /**
    * Create a file, write something, fsync but not close.
    * Then change lease period and wait for lease recovery.
    * Finally, read the block directly from each Datanode and verify the content.
    */
+  @Test
   public void testLeaseExpireHardLimit() throws Exception {
     System.out.println("testLeaseExpireHardLimit start");
     final long leasePeriod = 1000;
@@ -830,6 +919,7 @@ public class TestFileCreation extends ju
   }
 
   // test closing file system before all file handles are closed.
+  @Test
   public void testFsClose() throws Exception {
     System.out.println("test file system close start");
     final int DATANODE_NUM = 3;
@@ -856,4 +946,50 @@ public class TestFileCreation extends ju
       cluster.shutdown();
     }
   }
+
+  // test closing file after cluster is shutdown
+  public void testFsCloseAfterClusterShutdown() throws IOException {
+    System.out.println("test testFsCloseAfterClusterShutdown start");
+    final int DATANODE_NUM = 3;
+
+    Configuration conf = new Configuration();
+    conf.setInt("dfs.replication.min", 3);
+    conf.setBoolean("ipc.client.ping", false); // hdfs timeout is default 60 seconds
+    conf.setInt("ipc.ping.interval", 10000); // hdfs timeout is now 10 second
+
+    // create cluster
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
+    DistributedFileSystem dfs = null;
+    try {
+      cluster.waitActive();
+      dfs = (DistributedFileSystem)cluster.getFileSystem();
+
+      // create a new file.
+      final String f = DIR + "dhrubashutdown";
+      final Path fpath = new Path(f);
+      FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM);
+      out.write("something_dhruba".getBytes());
+      out.sync();    // ensure that block is allocated
+
+      // shutdown last datanode in pipeline.
+      cluster.stopDataNode(2);
+
+      // close file. Since we have set the minReplcatio to 3 but have killed one
+      // of the three datanodes, the close call will loop until the hdfsTimeout is
+      // encountered.
+      boolean hasException = false;
+      try {
+        out.close();
+        System.out.println("testFsCloseAfterClusterShutdown: Error here");
+      } catch (IOException e) {
+        hasException = true;
+      }
+      assertTrue("Failed to close file after cluster shutdown", hasException);
+    } finally {
+      System.out.println("testFsCloseAfterClusterShutdown successful");
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java Fri Jun 21 06:37:27 2013
@@ -42,7 +42,6 @@ public class TestFileCreationDelete exte
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
-    conf.setBoolean("dfs.support.append", true);
 
     // create cluster
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);

Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file                                                           
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
+import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
+import org.junit.Assert;
+import org.junit.Test;
+
+/** Test the fileLength on cluster restarts */
+public class TestFileLengthOnClusterRestart {
+  /**
+   * Tests the fileLength when we sync the file and restart the cluster and
+   * Datanodes not report to Namenode yet.
+   */
+  @Test(timeout = 60000)
+  public void testFileLengthWithHSyncAndClusterRestartWithOutDNsRegister()
+      throws Exception {
+    final Configuration conf = new Configuration();
+    // create cluster
+    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
+
+    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    DFSDataInputStream in = null;
+    FSDataOutputStream out = null;
+    DistributedFileSystem dfs = null;
+    try {
+      Path path = new Path(MiniDFSCluster.getBaseDir().getPath(), "test");
+      dfs = (DistributedFileSystem) cluster.getFileSystem();
+      out = dfs.create(path);
+      int fileLength = 1030;
+      out.write(new byte[fileLength]);
+      out.sync();
+      cluster.restartNameNode();
+      cluster.waitActive();
+      in = (DFSDataInputStream) dfs.open(path, 1024);
+      // Verify the length when we just restart NN. DNs will register
+      // immediately.
+      Assert.assertEquals(fileLength, in.getVisibleLength());
+      cluster.shutdownDataNodes();
+      cluster.restartNameNode(false);
+      // This is just for ensuring NN started.
+      verifyNNIsInSafeMode(dfs);
+
+      try {
+        in = (DFSDataInputStream) dfs.open(path);
+        Assert.fail("Expected IOException");
+      } catch (IOException e) {
+        Assert.assertTrue(e.getLocalizedMessage().indexOf(
+            "Name node is in safe mode") >= 0);
+      }
+
+    } finally {
+      if (null != in) {
+        in.close();
+      }
+      if (null != dfs) {
+        dfs.dfs.clientRunning = false;
+      }
+      cluster.shutdown();
+    }
+  }
+
+  private void verifyNNIsInSafeMode(DistributedFileSystem dfs)
+      throws IOException {
+    while (true) {
+      try {
+        if (dfs.dfs.namenode.setSafeMode(SafeModeAction.SAFEMODE_GET)) {
+          return;
+        } else {
+          throw new IOException("Expected to be in SafeMode");
+        }
+      } catch (IOException e) {
+        // NN might not started completely Ignore
+      }
+    }
+  }
+}

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestGetBlocks.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestGetBlocks.java Fri Jun 21 06:37:27 2013
@@ -19,29 +19,114 @@ package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.util.*;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+
+import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-
-import junit.framework.TestCase;
 /**
  * This class tests if block replacement request to data nodes work correctly.
  */
 public class TestGetBlocks extends TestCase {
+  
+  private static final int blockSize = 8192;
+  private static final String racks[] = new String[] { "/d1/r1", "/d1/r1",
+      "/d1/r2", "/d1/r2", "/d1/r2", "/d2/r3", "/d2/r3" };
+  private static final int numDatanodes = racks.length;
+
+  /**
+   * Test if the datanodes returned by
+   * {@link ClientProtocol#getBlockLocations(String, long, long)} is correct
+   * when 1) stale nodes checking is enabled, 2) a writing is going on, 
+   * and 3) a datanode becomes stale happen simultaneously
+   * 
+   * @throws Exception
+   */
+  public void testReadSelectNonStaleDatanode() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
+    // DataNode will send out heartbeat every 15 minutes
+    // In this way, when we have set a datanode as stale,
+    // its heartbeat will not come to refresh its state
+    long heartbeatInterval = 15 * 60;
+    long staleInterval = 3 * heartbeatInterval * 1000;
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
+        staleInterval);
+    conf.setLong("dfs.heartbeat.interval", heartbeatInterval);
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true,
+        racks);
+
+    cluster.waitActive();
+    InetSocketAddress addr = new InetSocketAddress("localhost",
+        cluster.getNameNodePort());
+    DFSClient client = new DFSClient(addr, conf);
+    List<DatanodeDescriptor> nodeInfoList = cluster.getNameNode()
+        .getNamesystem().getDatanodeListForReport(DatanodeReportType.LIVE);
+    assertEquals("Unexpected number of datanodes", numDatanodes,
+        nodeInfoList.size());
+    FileSystem fileSys = cluster.getFileSystem();
+    FSDataOutputStream stm = null;
+    try {
+      // do the writing but do not close the FSDataOutputStream
+      // in order to mimic the ongoing writing
+      final Path fileName = new Path("/file1");
+      stm = fileSys.create(fileName, true,
+          fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) 3,
+          blockSize);
+      stm.write(new byte[(blockSize * 3) / 2]);
+      // We do not close the stream so that
+      // the writing seems to be still ongoing
+      stm.sync();
+
+      LocatedBlocks blocks = cluster.getNameNode().getBlockLocations(
+          fileName.toString(), 0, blockSize);
+      DatanodeInfo[] nodes = blocks.get(0).getLocations();
+      assertEquals(nodes.length, 3);
+      for (DataNode dn : cluster.getDataNodes()) {
+        if (dn.getHostName().equals(nodes[0].getHostName())) {
+          // set the first node as stale
+          DatanodeDescriptor staleNodeInfo = cluster.getNameNode()
+              .getNamesystem().getDatanode(dn.dnRegistration);
+          staleNodeInfo.setLastUpdate(System.currentTimeMillis()
+              - staleInterval - 1);
+        }
+      }
+
+      LocatedBlocks blocksAfterStale = cluster.getNameNode().getBlockLocations(
+          fileName.toString(), 0, blockSize);
+      DatanodeInfo[] nodesAfterStale = blocksAfterStale.get(0).getLocations();
+      assertEquals(nodesAfterStale.length, 3);
+      assertEquals(nodesAfterStale[2].getHostName(), nodes[0].getHostName());
+    } finally {
+      if (stm != null) {
+        stm.close();
+      }
+      cluster.shutdown();
+    }
+  }
+  
   /** test getBlocks */
   public void testGetBlocks() throws Exception {
     final Configuration CONF = new Configuration();

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestHftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestHftpFileSystem.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestHftpFileSystem.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestHftpFileSystem.java Fri Jun 21 06:37:27 2013
@@ -52,15 +52,15 @@ public class TestHftpFileSystem {
     URI uri = URI.create("hftp://localhost");
     HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
 
-    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort());
-    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort());
+    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT,
+        fs.getDefaultPort());
 
     URI fsUri = fs.getUri();
     assertEquals(uri.getHost(), fsUri.getHost());
     assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fsUri.getPort());
     
     assertEquals(
-        "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT,
+        "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT,
         fs.getCanonicalServiceName()
     );
   }
@@ -75,14 +75,13 @@ public class TestHftpFileSystem {
     HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
 
     assertEquals(123, fs.getDefaultPort());
-    assertEquals(456, fs.getDefaultSecurePort());
     
     URI fsUri = fs.getUri();
     assertEquals(uri.getHost(), fsUri.getHost());
     assertEquals(123, fsUri.getPort());
     
     assertEquals(
-        "127.0.0.1:456",
+        "127.0.0.1:123",
         fs.getCanonicalServiceName()
     );
   }
@@ -94,15 +93,13 @@ public class TestHftpFileSystem {
     HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
 
     assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort());
-    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort());
 
     URI fsUri = fs.getUri();
     assertEquals(uri.getHost(), fsUri.getHost());
     assertEquals(uri.getPort(), fsUri.getPort());
     
-    assertEquals(
-        "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT,
-        fs.getCanonicalServiceName()
+    assertEquals("127.0.0.1:123",
+                 fs.getCanonicalServiceName()
     );
   }
 
@@ -116,15 +113,13 @@ public class TestHftpFileSystem {
     HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
 
     assertEquals(123, fs.getDefaultPort());
-    assertEquals(456, fs.getDefaultSecurePort());
     
     URI fsUri = fs.getUri();
     assertEquals(uri.getHost(), fsUri.getHost());
     assertEquals(789, fsUri.getPort());
     
-    assertEquals(
-        "127.0.0.1:456",
-        fs.getCanonicalServiceName()
+    assertEquals("127.0.0.1:789",
+                 fs.getCanonicalServiceName()
     );
   }
 
@@ -137,7 +132,6 @@ public class TestHftpFileSystem {
     HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
 
     assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort());
-    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort());
 
     URI fsUri = fs.getUri();
     assertEquals(uri.getHost(), fsUri.getHost());
@@ -159,7 +153,6 @@ public class TestHftpFileSystem {
     HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
 
     assertEquals(456, fs.getDefaultPort());
-    assertEquals(456, fs.getDefaultSecurePort());
     
     URI fsUri = fs.getUri();
     assertEquals(uri.getHost(), fsUri.getHost());
@@ -178,7 +171,6 @@ public class TestHftpFileSystem {
     HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
 
     assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort());
-    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort());
 
     URI fsUri = fs.getUri();
     assertEquals(uri.getHost(), fsUri.getHost());
@@ -200,7 +192,6 @@ public class TestHftpFileSystem {
     HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
 
     assertEquals(456, fs.getDefaultPort());
-    assertEquals(456, fs.getDefaultSecurePort());
     
     URI fsUri = fs.getUri();
     assertEquals(uri.getHost(), fsUri.getHost());
@@ -281,4 +272,4 @@ public class TestHftpFileSystem {
     assertNotNull(gotToken);
     assertEquals(gotToken, hftpToken);
   }
-}
\ No newline at end of file
+}

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestLease.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestLease.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestLease.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestLease.java Fri Jun 21 06:37:27 2013
@@ -17,25 +17,35 @@
  */
 package org.apache.hadoop.hdfs;
 
-import java.io.*;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
 
-public class TestLease extends junit.framework.TestCase {
+public class TestLease {
   static boolean hasLease(MiniDFSCluster cluster, Path src) {
-    return cluster.getNameNode().namesystem.leaseManager.getLeaseByPath(src.toString()) != null;
+    return cluster.getNameNode().getNamesystem().leaseManager
+        .getLeaseByPath(src.toString()) != null;
   }
   
   final Path dir = new Path("/test/lease/");
 
+  @Test
   public void testLease() throws Exception {
     Configuration conf = new Configuration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     try {
       FileSystem fs = cluster.getFileSystem();
-      assertTrue(fs.mkdirs(dir));
+      Assert.assertTrue(fs.mkdirs(dir));
       
       Path a = new Path(dir, "a");
       Path b = new Path(dir, "b");
@@ -43,24 +53,66 @@ public class TestLease extends junit.fra
       DataOutputStream a_out = fs.create(a);
       a_out.writeBytes("something");
 
-      assertTrue(hasLease(cluster, a));
-      assertTrue(!hasLease(cluster, b));
+      Assert.assertTrue(hasLease(cluster, a));
+      Assert.assertTrue(!hasLease(cluster, b));
       
       DataOutputStream b_out = fs.create(b);
       b_out.writeBytes("something");
 
-      assertTrue(hasLease(cluster, a));
-      assertTrue(hasLease(cluster, b));
+      Assert.assertTrue(hasLease(cluster, a));
+      Assert.assertTrue(hasLease(cluster, b));
 
       a_out.close();
       b_out.close();
 
-      assertTrue(!hasLease(cluster, a));
-      assertTrue(!hasLease(cluster, b));
+      Assert.assertTrue(!hasLease(cluster, a));
+      Assert.assertTrue(!hasLease(cluster, b));
       
       fs.delete(dir, true);
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }
   }
+
+  @Test
+  public void testFactory() throws Exception {
+    final String[] groups = new String[]{"supergroup"};
+    final UserGroupInformation[] ugi = new UserGroupInformation[3];
+    for(int i = 0; i < ugi.length; i++) {
+      ugi[i] = UserGroupInformation.createUserForTesting("user" + i, groups);
+    }
+
+    final Configuration conf = new Configuration();
+    final DFSClient c1 = createDFSClientAs(ugi[0], conf);
+    FSDataOutputStream out1 = createFsOut(c1, "/out1");
+    final DFSClient c2 = createDFSClientAs(ugi[0], conf);
+    FSDataOutputStream out2 = createFsOut(c2, "/out2");
+    Assert.assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer());
+    final DFSClient c3 = createDFSClientAs(ugi[1], conf);
+    FSDataOutputStream out3 = createFsOut(c3, "/out3");
+    Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
+    final DFSClient c4 = createDFSClientAs(ugi[1], conf);
+    FSDataOutputStream out4 = createFsOut(c4, "/out4");
+    Assert.assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer());
+    final DFSClient c5 = createDFSClientAs(ugi[2], conf);
+    FSDataOutputStream out5 = createFsOut(c5, "/out5");
+    Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
+    Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
+  }
+
+  private FSDataOutputStream createFsOut(DFSClient dfs, String path)
+      throws IOException {
+    return new FSDataOutputStream(dfs.create(path, true), null);
+  }
+  
+  static final ClientProtocol mcp = Mockito.mock(ClientProtocol.class);
+  static public DFSClient createDFSClientAs(UserGroupInformation ugi, 
+      final Configuration conf) throws Exception {
+    return ugi.doAs(new PrivilegedExceptionAction<DFSClient>() {
+      @Override
+      public DFSClient run() throws Exception {
+        return new DFSClient(null, mcp, conf, null);
+      }
+    });
+  }
 }

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java Fri Jun 21 06:37:27 2013
@@ -70,7 +70,7 @@ public class TestLeaseRecovery extends j
     final int ORG_FILE_SIZE = 3000; 
     Configuration conf = new Configuration();
     conf.setLong("dfs.block.size", BLOCK_SIZE);
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
     MiniDFSCluster cluster = null;
 
     try {
@@ -138,7 +138,7 @@ public class TestLeaseRecovery extends j
 
       BlockMetaDataInfo[] updatedmetainfo = new BlockMetaDataInfo[REPLICATION_NUM];
       int minsize = min(newblocksizes);
-      long currentGS = cluster.getNameNode().namesystem.getGenerationStamp();
+      long currentGS = cluster.getNameNode().getNamesystem().getGenerationStamp();
       lastblock.setGenerationStamp(currentGS);
       for(int i = 0; i < REPLICATION_NUM; i++) {
         updatedmetainfo[i] = datanodes[i].getBlockMetaDataInfo(lastblock);

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Fri Jun 21 06:37:27 2013
@@ -157,7 +157,7 @@ public class TestLeaseRecovery2 extends 
     stm.sync();
     if (triggerSoftLease) {
       AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
-      dfs.dfs.leasechecker.interruptAndJoin();
+      dfs.dfs.getLeaseRenewer().interruptAndJoin();
     }
     return filepath;
   }