You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by vi...@apache.org on 2013/11/10 21:09:14 UTC

svn commit: r1540535 [2/2] - in /hadoop/common/branches/YARN-321/hadoop-hdfs-project: ./ hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/ hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs-nfs/src/test/java/org/ap...

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java Sun Nov 10 20:09:09 2013
@@ -203,6 +203,9 @@ public class SnapshotFSImageFormat {
       // useful, but set the parent here to be consistent with the original 
       // fsdir tree.
       deleted.setParent(parent);
+      if (deleted.isFile()) {
+        loader.updateBlocksMap(deleted.asFile());
+      }
     }
     return deletedList;
   }

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java Sun Nov 10 20:09:09 2013
@@ -149,8 +149,8 @@ public class StartupProgress {
    * @return Counter associated with phase and step
    */
   public Counter getCounter(Phase phase, Step step) {
-    final StepTracking tracking = lazyInitStep(phase, step);
     if (!isComplete()) {
+      final StepTracking tracking = lazyInitStep(phase, step);
       return new Counter() {
         @Override
         public void increment() {

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshottableDir.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshottableDir.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshottableDir.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshottableDir.java Sun Nov 10 20:09:09 2013
@@ -21,9 +21,12 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
 
 /**
  * A tool used to list all snapshottable directories that are owned by the 
@@ -31,23 +34,23 @@ import org.apache.hadoop.hdfs.protocol.S
  * is a super user.
  */
 @InterfaceAudience.Private
-public class LsSnapshottableDir {
-  public static void main(String[] argv) throws IOException {
+public class LsSnapshottableDir extends Configured implements Tool {
+  @Override
+  public int run(String[] argv) throws Exception {
     String description = "LsSnapshottableDir: \n" +
         "\tGet the list of snapshottable directories that are owned by the current user.\n" +
         "\tReturn all the snapshottable directories if the current user is a super user.\n";
 
     if(argv.length != 0) {
       System.err.println("Usage: \n" + description);
-      System.exit(1);
+      return 1;
     }
     
-    Configuration conf = new Configuration();
-    FileSystem fs = FileSystem.get(conf);
+    FileSystem fs = FileSystem.get(getConf());
     if (! (fs instanceof DistributedFileSystem)) {
       System.err.println(
           "LsSnapshottableDir can only be used in DistributedFileSystem");
-      System.exit(1);
+      return 1;
     }
     DistributedFileSystem dfs = (DistributedFileSystem) fs;
     
@@ -57,7 +60,12 @@ public class LsSnapshottableDir {
     } catch (IOException e) {
       String[] content = e.getLocalizedMessage().split("\n");
       System.err.println("lsSnapshottableDir: " + content[0]);
+      return 1;
     }
+    return 0;
+  }
+  public static void main(String[] argv) throws Exception {
+    int rc = ToolRunner.run(new LsSnapshottableDir(), argv);
+    System.exit(rc);
   }
-
 }

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Sun Nov 10 20:09:09 2013
@@ -684,19 +684,6 @@ public class WebHdfsFileSystem extends F
     }
   }
 
-  @VisibleForTesting
-  final class ConnRunner extends AbstractRunner {
-    protected ConnRunner(final HttpOpParam.Op op, HttpURLConnection conn) {
-      super(op, false);
-      this.conn = conn;
-    }
-
-    @Override
-    protected URL getUrl() {
-      return null;
-    }
-  }
-
   private FsPermission applyUMask(FsPermission permission) {
     if (permission == null) {
       permission = FsPermission.getDefault();

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Sun Nov 10 20:09:09 2013
@@ -1459,4 +1459,29 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.namenode.edit.log.autoroll.multiplier.threshold</name>
+  <value>2.0</value>
+  <description>
+    Determines when an active namenode will roll its own edit log.
+    The actual threshold (in number of edits) is determined by multiplying
+    this value by dfs.namenode.checkpoint.txns.
+
+    This prevents extremely large edit files from accumulating on the active
+    namenode, which can cause timeouts during namenode startup and pose an
+    administrative hassle. This behavior is intended as a failsafe for when
+    the standby or secondary namenode fail to roll the edit log by the normal
+    checkpoint threshold.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.edit.log.autoroll.check.interval.ms</name>
+  <value>300000</value>
+  <description>
+    How often an active namenode will check if it needs to roll its edit log,
+    in milliseconds.
+  </description>
+</property>
+
 </configuration>

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java Sun Nov 10 20:09:09 2013
@@ -32,6 +32,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.util.ThreadUtil;
 
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 
 
@@ -41,10 +43,25 @@ import org.junit.Test;
  */
 public class TestDFSClientExcludedNodes {
 
-  @Test(timeout=10000)
+  private MiniDFSCluster cluster;
+  private Configuration conf;
+
+  @Before
+  public void setUp() {
+    cluster = null;
+    conf = new HdfsConfiguration();
+  }
+
+  @After
+  public void tearDown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test(timeout=60000)
   public void testExcludedNodes() throws IOException {
-    Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     FileSystem fs = cluster.getFileSystem();
     Path filePath = new Path("/testExcludedNodes");
 
@@ -67,17 +84,16 @@ public class TestDFSClientExcludedNodes 
     }
   }
 
-  @Test(timeout=10000)
+  @Test(timeout=60000)
   public void testExcludedNodesForgiveness() throws IOException {
-    Configuration conf = new HdfsConfiguration();
-    // Forgive nodes in under 1s for this test case.
+    // Forgive nodes in under 2.5s for this test case.
     conf.setLong(
         DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL,
-        1000);
+        2500);
     // We'll be using a 512 bytes block size just for tests
     // so making sure the checksum bytes too match it.
     conf.setInt("io.bytes.per.checksum", 512);
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     List<DataNodeProperties> props = cluster.dataNodes;
     FileSystem fs = cluster.getFileSystem();
     Path filePath = new Path("/testForgivingExcludedNodes");
@@ -112,11 +128,11 @@ public class TestDFSClientExcludedNodes 
     Assert.assertEquals(true, cluster.restartDataNode(two, true));
     cluster.waitActive();
 
-    // Sleep for 2s, to let the excluded nodes be expired
+    // Sleep for 5s, to let the excluded nodes be expired
     // from the excludes list (i.e. forgiven after the configured wait period).
-    // [Sleeping just in case the restart of the DNs completed < 2s cause
+    // [Sleeping just in case the restart of the DNs completed < 5s cause
     // otherwise, we'll end up quickly excluding those again.]
-    ThreadUtil.sleepAtLeastIgnoreInterrupts(2000);
+    ThreadUtil.sleepAtLeastIgnoreInterrupts(5000);
 
     // Terminate the last good DN, to assert that there's no
     // single-DN-available scenario, caused by not forgiving the other

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java Sun Nov 10 20:09:09 2013
@@ -23,20 +23,14 @@ package org.apache.hadoop.hdfs.security;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
-import java.net.HttpURLConnection;
 import java.net.InetAddress;
 import java.net.NetworkInterface;
-import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Enumeration;
-import java.util.Map;
-
-import javax.servlet.http.HttpServletResponse;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -47,23 +41,17 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
-import org.apache.hadoop.hdfs.web.resources.DoAsParam;
-import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
-import org.apache.hadoop.hdfs.web.resources.GetOpParam;
-import org.apache.hadoop.hdfs.web.resources.PostOpParam;
-import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.hadoop.security.TestDoAsEffectiveUser;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.token.Token;
-import org.apache.log4j.Level;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
 
 public class TestDelegationTokenForProxyUser {
   private static MiniDFSCluster cluster;
@@ -155,56 +143,26 @@ public class TestDelegationTokenForProxy
     }
   }
   
-  @Test(timeout=20000)
+  @Test(timeout=5000)
   public void testWebHdfsDoAs() throws Exception {
     WebHdfsTestUtil.LOG.info("START: testWebHdfsDoAs()");
-    ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)ExceptionHandler.LOG).getLogger().setLevel(Level.ALL);
     WebHdfsTestUtil.LOG.info("ugi.getShortUserName()=" + ugi.getShortUserName());
     final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config);
     
     final Path root = new Path("/");
     cluster.getFileSystem().setPermission(root, new FsPermission((short)0777));
 
-    {
-      //test GETHOMEDIRECTORY with doAs
-      final URL url = WebHdfsTestUtil.toUrl(webhdfs,
-          GetOpParam.Op.GETHOMEDIRECTORY,  root, new DoAsParam(PROXY_USER));
-      final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
-      final Map<?, ?> m = WebHdfsTestUtil.connectAndGetJson(conn, HttpServletResponse.SC_OK);
-      conn.disconnect();
-  
-      final Object responsePath = m.get(Path.class.getSimpleName());
-      WebHdfsTestUtil.LOG.info("responsePath=" + responsePath);
-      Assert.assertEquals("/user/" + PROXY_USER, responsePath);
-    }
+    Whitebox.setInternalState(webhdfs, "ugi", proxyUgi);
 
     {
-      //test GETHOMEDIRECTORY with DOas
-      final URL url = WebHdfsTestUtil.toUrl(webhdfs,
-          GetOpParam.Op.GETHOMEDIRECTORY,  root, new DoAsParam(PROXY_USER) {
-            @Override
-            public String getName() {
-              return "DOas";
-            }
-      });
-      final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
-      final Map<?, ?> m = WebHdfsTestUtil.connectAndGetJson(conn, HttpServletResponse.SC_OK);
-      conn.disconnect();
-  
-      final Object responsePath = m.get(Path.class.getSimpleName());
+      Path responsePath = webhdfs.getHomeDirectory();
       WebHdfsTestUtil.LOG.info("responsePath=" + responsePath);
-      Assert.assertEquals("/user/" + PROXY_USER, responsePath);
+      Assert.assertEquals(webhdfs.getUri() + "/user/" + PROXY_USER, responsePath.toString());
     }
 
     final Path f = new Path("/testWebHdfsDoAs/a.txt");
     {
-      //test create file with doAs
-      final PutOpParam.Op op = PutOpParam.Op.CREATE;
-      final URL url = WebHdfsTestUtil.toUrl(webhdfs, op,  f, new DoAsParam(PROXY_USER));
-      HttpURLConnection conn = (HttpURLConnection) url.openConnection();
-      conn = WebHdfsTestUtil.twoStepWrite(webhdfs, op, conn);
-      final FSDataOutputStream out = WebHdfsTestUtil.write(webhdfs, op, conn, 4096);
+      FSDataOutputStream out = webhdfs.create(f);
       out.write("Hello, webhdfs user!".getBytes());
       out.close();
   
@@ -214,12 +172,7 @@ public class TestDelegationTokenForProxy
     }
 
     {
-      //test append file with doAs
-      final PostOpParam.Op op = PostOpParam.Op.APPEND;
-      final URL url = WebHdfsTestUtil.toUrl(webhdfs, op,  f, new DoAsParam(PROXY_USER));
-      HttpURLConnection conn = (HttpURLConnection) url.openConnection();
-      conn = WebHdfsTestUtil.twoStepWrite(webhdfs, op, conn);
-      final FSDataOutputStream out = WebHdfsTestUtil.write(webhdfs, op, conn, 4096);
+      final FSDataOutputStream out = webhdfs.append(f);
       out.write("\nHello again!".getBytes());
       out.close();
   

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java Sun Nov 10 20:09:09 2013
@@ -2243,4 +2243,50 @@ public class TestRenameWithSnapshots {
     
     restartClusterAndCheckImage(true);
   }
+  
+  /**
+   * Make sure we clean the whole subtree under a DstReference node after 
+   * deleting a snapshot.
+   * see HDFS-5476.
+   */
+  @Test
+  public void testCleanDstReference() throws Exception {
+    final Path test = new Path("/test");
+    final Path foo = new Path(test, "foo");
+    final Path bar = new Path(foo, "bar");
+    hdfs.mkdirs(bar);
+    SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
+    
+    // create file after s0 so that the file should not be included in s0
+    final Path fileInBar = new Path(bar, "file");
+    DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPL, SEED);
+    // rename foo --> foo2
+    final Path foo2 = new Path(test, "foo2");
+    hdfs.rename(foo, foo2);
+    // create snapshot s1, note the file is included in s1
+    hdfs.createSnapshot(test, "s1");
+    // delete bar and foo2
+    hdfs.delete(new Path(foo2, "bar"), true);
+    hdfs.delete(foo2, true);
+    
+    final Path sfileInBar = SnapshotTestHelper.getSnapshotPath(test, "s1",
+        "foo2/bar/file");
+    assertTrue(hdfs.exists(sfileInBar));
+    
+    hdfs.deleteSnapshot(test, "s1");
+    assertFalse(hdfs.exists(sfileInBar));
+    
+    restartClusterAndCheckImage(true);
+    // make sure the file under bar is deleted 
+    final Path barInS0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
+        "foo/bar");
+    INodeDirectoryWithSnapshot barNode = (INodeDirectoryWithSnapshot) fsdir
+        .getINode(barInS0.toString());
+    assertEquals(0, barNode.getChildrenList(null).size());
+    List<DirectoryDiff> diffList = barNode.getDiffs().asList();
+    assertEquals(1, diffList.size());
+    DirectoryDiff diff = diffList.get(0);
+    assertEquals(0, diff.getChildrenDiff().getList(ListType.DELETED).size());
+    assertEquals(0, diff.getChildrenDiff().getList(ListType.CREATED).size());
+  }
 }

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java Sun Nov 10 20:09:09 2013
@@ -21,6 +21,7 @@ import static org.apache.hadoop.test.Gen
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
@@ -31,11 +32,14 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -205,4 +209,187 @@ public class TestSnapshotBlocksMap {
       assertExceptionContains("File does not exist: " + s1f0, e);
     }
   }
+
+  /*
+   * Try to read the files inside snapshot but deleted in original place after
+   * restarting post checkpoint. refer HDFS-5427
+   */
+  @Test(timeout = 30000)
+  public void testReadSnapshotFileWithCheckpoint() throws Exception {
+    Path foo = new Path("/foo");
+    hdfs.mkdirs(foo);
+    hdfs.allowSnapshot(foo);
+    Path bar = new Path("/foo/bar");
+    DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L);
+    hdfs.createSnapshot(foo, "s1");
+    assertTrue(hdfs.delete(bar, true));
+
+    // checkpoint
+    NameNode nameNode = cluster.getNameNode();
+    NameNodeAdapter.enterSafeMode(nameNode, false);
+    NameNodeAdapter.saveNamespace(nameNode);
+    NameNodeAdapter.leaveSafeMode(nameNode);
+
+    // restart namenode to load snapshot files from fsimage
+    cluster.restartNameNode(true);
+    String snapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar");
+    DFSTestUtil.readFile(hdfs, new Path(snapshotPath));
+  }
+
+  /*
+   * Try to read the files inside snapshot but renamed to different file and
+   * deleted after restarting post checkpoint. refer HDFS-5427
+   */
+  @Test(timeout = 30000)
+  public void testReadRenamedSnapshotFileWithCheckpoint() throws Exception {
+    final Path foo = new Path("/foo");
+    final Path foo2 = new Path("/foo2");
+    hdfs.mkdirs(foo);
+    hdfs.mkdirs(foo2);
+
+    hdfs.allowSnapshot(foo);
+    hdfs.allowSnapshot(foo2);
+    final Path bar = new Path(foo, "bar");
+    final Path bar2 = new Path(foo2, "bar");
+    DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L);
+    hdfs.createSnapshot(foo, "s1");
+    // rename to another snapshottable directory and take snapshot
+    assertTrue(hdfs.rename(bar, bar2));
+    hdfs.createSnapshot(foo2, "s2");
+    // delete the original renamed file to make sure blocks are not updated by
+    // the original file
+    assertTrue(hdfs.delete(bar2, true));
+
+    // checkpoint
+    NameNode nameNode = cluster.getNameNode();
+    NameNodeAdapter.enterSafeMode(nameNode, false);
+    NameNodeAdapter.saveNamespace(nameNode);
+    NameNodeAdapter.leaveSafeMode(nameNode);
+    // restart namenode to load snapshot files from fsimage
+    cluster.restartNameNode(true);
+    // file in first snapshot
+    String barSnapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar");
+    DFSTestUtil.readFile(hdfs, new Path(barSnapshotPath));
+    // file in second snapshot after rename+delete
+    String bar2SnapshotPath = Snapshot.getSnapshotPath(foo2.toString(),
+        "s2/bar");
+    DFSTestUtil.readFile(hdfs, new Path(bar2SnapshotPath));
+  }
+
+  /**
+   * Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot
+   */
+  @Test
+  public void testDeletionWithZeroSizeBlock() throws Exception {
+    final Path foo = new Path("/foo");
+    final Path bar = new Path(foo, "bar");
+    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);
+
+    SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
+    hdfs.append(bar);
+
+    INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
+    BlockInfo[] blks = barNode.getBlocks();
+    assertEquals(1, blks.length);
+    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
+    ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
+    cluster.getNameNodeRpc()
+        .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
+            null, barNode.getId(), null);
+
+    SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
+
+    barNode = fsdir.getINode4Write(bar.toString()).asFile();
+    blks = barNode.getBlocks();
+    assertEquals(2, blks.length);
+    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
+    assertEquals(0, blks[1].getNumBytes());
+
+    hdfs.delete(bar, true);
+    final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1",
+        bar.getName());
+    barNode = fsdir.getINode(sbar.toString()).asFile();
+    blks = barNode.getBlocks();
+    assertEquals(1, blks.length);
+    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
+  }
+
+  /** Make sure we delete 0-sized block when deleting an INodeFileUC */
+  @Test
+  public void testDeletionWithZeroSizeBlock2() throws Exception {
+    final Path foo = new Path("/foo");
+    final Path subDir = new Path(foo, "sub");
+    final Path bar = new Path(subDir, "bar");
+    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);
+
+    hdfs.append(bar);
+
+    INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
+    BlockInfo[] blks = barNode.getBlocks();
+    assertEquals(1, blks.length);
+    ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
+    cluster.getNameNodeRpc()
+        .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
+            null, barNode.getId(), null);
+
+    SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
+
+    barNode = fsdir.getINode4Write(bar.toString()).asFile();
+    blks = barNode.getBlocks();
+    assertEquals(2, blks.length);
+    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
+    assertEquals(0, blks[1].getNumBytes());
+
+    hdfs.delete(subDir, true);
+    final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
+    barNode = fsdir.getINode(sbar.toString()).asFile();
+    blks = barNode.getBlocks();
+    assertEquals(1, blks.length);
+    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
+  }
+  
+  /**
+   * 1. rename under-construction file with 0-sized blocks after snapshot.
+   * 2. delete the renamed directory.
+   * make sure we delete the 0-sized block.
+   * see HDFS-5476.
+   */
+  @Test
+  public void testDeletionWithZeroSizeBlock3() throws Exception {
+    final Path foo = new Path("/foo");
+    final Path subDir = new Path(foo, "sub");
+    final Path bar = new Path(subDir, "bar");
+    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);
+
+    hdfs.append(bar);
+
+    INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
+    BlockInfo[] blks = barNode.getBlocks();
+    assertEquals(1, blks.length);
+    ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
+    cluster.getNameNodeRpc()
+        .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
+            null, barNode.getId(), null);
+
+    SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
+
+    // rename bar
+    final Path bar2 = new Path(subDir, "bar2");
+    hdfs.rename(bar, bar2);
+    
+    INodeFile bar2Node = fsdir.getINode4Write(bar2.toString()).asFile();
+    blks = bar2Node.getBlocks();
+    assertEquals(2, blks.length);
+    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
+    assertEquals(0, blks[1].getNumBytes());
+
+    // delete subDir
+    hdfs.delete(subDir, true);
+    
+    final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
+    barNode = fsdir.getINode(sbar.toString()).asFile();
+    blks = barNode.getBlocks();
+    assertEquals(1, blks.length);
+    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
+  }
 }

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgress.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgress.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgress.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgress.java Sun Nov 10 20:09:09 2013
@@ -179,6 +179,14 @@ public class TestStartupProgress {
     startupProgress.endStep(LOADING_FSIMAGE, step);
     startupProgress.endPhase(LOADING_FSIMAGE);
 
+    // Also attempt a whole new step that wasn't used last time.
+    startupProgress.beginPhase(LOADING_EDITS);
+    Step newStep = new Step("file1");
+    startupProgress.beginStep(LOADING_EDITS, newStep);
+    incrementCounter(startupProgress, LOADING_EDITS, newStep, 100L);
+    startupProgress.endStep(LOADING_EDITS, newStep);
+    startupProgress.endPhase(LOADING_EDITS);
+
     StartupProgressView after = startupProgress.createView();
 
     // Expect that data was frozen after completion of entire startup process, so
@@ -200,6 +208,7 @@ public class TestStartupProgress {
       after.getTotal(LOADING_FSIMAGE));
     assertEquals(before.getTotal(LOADING_FSIMAGE, step),
       after.getTotal(LOADING_FSIMAGE, step));
+    assertFalse(after.getSteps(LOADING_EDITS).iterator().hasNext());
   }
 
   @Test(timeout=10000)

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java Sun Nov 10 20:09:09 2013
@@ -78,11 +78,6 @@ public class WebHdfsTestUtil {
     Assert.assertEquals(expectedResponseCode, conn.getResponseCode());
     return WebHdfsFileSystem.jsonParse(conn, false);
   }
-  
-  public static HttpURLConnection twoStepWrite(final WebHdfsFileSystem webhdfs,
-      final HttpOpParam.Op op, HttpURLConnection conn) throws IOException {
-    return webhdfs.new ConnRunner(op, conn).twoStepWrite();
-  }
 
   public static FSDataOutputStream write(final WebHdfsFileSystem webhdfs,
       final HttpOpParam.Op op, final HttpURLConnection conn,