You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by wa...@apache.org on 2014/08/18 20:41:37 UTC

svn commit: r1618700 [2/2] - in /hadoop/common/branches/fs-encryption/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/ hadoop-hdfs-httpfs/src/main/ja...

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java?rev=1618700&r1=1618699&r2=1618700&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java Mon Aug 18 18:41:31 2014
@@ -50,6 +50,17 @@ public class TestCommitBlockSynchronizat
 
     FSNamesystem namesystem = new FSNamesystem(conf, image);
     namesystem.setImageLoaded(true);
+
+    // set file's parent as root and put the file to inodeMap, so
+    // FSNamesystem's isFileDeleted() method will return false on this file
+    if (file.getParent() == null) {
+      INodeDirectory parent = mock(INodeDirectory.class);
+      parent.setLocalName(new byte[0]);
+      parent.addChild(file);
+      file.setParent(parent);
+    }
+    namesystem.dir.getINodeMap().put(file);
+
     FSNamesystem namesystemSpy = spy(namesystem);
     BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
         block, 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java?rev=1618700&r1=1618699&r2=1618700&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java Mon Aug 18 18:41:31 2014
@@ -18,7 +18,9 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.FileNotFoundException;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.commons.logging.Log;
@@ -27,19 +29,30 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.Mockito;
 import org.mockito.internal.util.reflection.Whitebox;
 
 
@@ -49,6 +62,7 @@ import org.mockito.internal.util.reflect
  * whole duration.
  */
 public class TestDeleteRace {
+  private static final int BLOCK_SIZE = 4096;
   private static final Log LOG = LogFactory.getLog(TestDeleteRace.class);
   private static final Configuration conf = new HdfsConfiguration();
   private MiniDFSCluster cluster;
@@ -201,7 +215,126 @@ public class TestDeleteRace {
         cluster.shutdown();
       }
     }
+  }
+
+  /**
+   * Test race between delete operation and commitBlockSynchronization method.
+   * See HDFS-6825.
+   * @param hasSnapshot
+   * @throws Exception
+   */
+  private void testDeleteAndCommitBlockSynchronizationRace(boolean hasSnapshot)
+      throws Exception {
+    LOG.info("Start testing, hasSnapshot: " + hasSnapshot);
+    final String testPaths[] = {
+        "/test-file",
+        "/testdir/testdir1/test-file"
+    };
+    final Path rootPath = new Path("/");
+    final Configuration conf = new Configuration();
+    // Disable permissions so that another user can recover the lease.
+    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
+    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+    FSDataOutputStream stm = null;
+    Map<DataNode, DatanodeProtocolClientSideTranslatorPB> dnMap =
+        new HashMap<DataNode, DatanodeProtocolClientSideTranslatorPB>();
+
+    try {
+      cluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(3)
+          .build();
+      cluster.waitActive();
+
+      DistributedFileSystem fs = cluster.getFileSystem();
+      int stId = 0;
+      for (String testPath : testPaths) {
+        LOG.info("test on " + testPath + " snapshot: " + hasSnapshot);
+        Path fPath = new Path(testPath);
+        //find grandest non-root parent
+        Path grandestNonRootParent = fPath;
+        while (!grandestNonRootParent.getParent().equals(rootPath)) {
+          grandestNonRootParent = grandestNonRootParent.getParent();
+        }
+        stm = fs.create(fPath);
+        LOG.info("test on " + testPath + " created " + fPath);
+
+        // write a half block
+        AppendTestUtil.write(stm, 0, BLOCK_SIZE / 2);
+        stm.hflush();
+
+        if (hasSnapshot) {
+          SnapshotTestHelper.createSnapshot(fs, rootPath,
+              "st" + String.valueOf(stId));
+          ++stId;
+        }
+
+        // Look into the block manager on the active node for the block
+        // under construction.
+        NameNode nn = cluster.getNameNode();
+        ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, fPath);
+        DatanodeDescriptor expectedPrimary =
+            DFSTestUtil.getExpectedPrimaryNode(nn, blk);
+        LOG.info("Expecting block recovery to be triggered on DN " +
+            expectedPrimary);
+
+        // Find the corresponding DN daemon, and spy on its connection to the
+        // active.
+        DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
+        DatanodeProtocolClientSideTranslatorPB nnSpy = dnMap.get(primaryDN);
+        if (nnSpy == null) {
+          nnSpy = DataNodeTestUtils.spyOnBposToNN(primaryDN, nn);
+          dnMap.put(primaryDN, nnSpy);
+        }
+
+        // Delay the commitBlockSynchronization call
+        DelayAnswer delayer = new DelayAnswer(LOG);
+        Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(
+            Mockito.eq(blk),
+            Mockito.anyInt(),  // new genstamp
+            Mockito.anyLong(), // new length
+            Mockito.eq(true),  // close file
+            Mockito.eq(false), // delete block
+            (DatanodeID[]) Mockito.anyObject(), // new targets
+            (String[]) Mockito.anyObject());    // new target storages
+
+        fs.recoverLease(fPath);
+
+        LOG.info("Waiting for commitBlockSynchronization call from primary");
+        delayer.waitForCall();
+
+        LOG.info("Deleting recursively " + grandestNonRootParent);
+        fs.delete(grandestNonRootParent, true);
+
+        delayer.proceed();
+        LOG.info("Now wait for result");
+        delayer.waitForResult();
+        Throwable t = delayer.getThrown();
+        if (t != null) {
+          LOG.info("Result exception (snapshot: " + hasSnapshot + "): " + t);
+        }
+      } // end of loop each fPath
+      LOG.info("Now check we can restart");
+      cluster.restartNameNodes();
+      LOG.info("Restart finished");
+    } finally {
+      if (stm != null) {
+        IOUtils.closeStream(stm);
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 
+  @Test(timeout=600000)
+  public void testDeleteAndCommitBlockSynchonizationRaceNoSnapshot()
+      throws Exception {
+    testDeleteAndCommitBlockSynchronizationRace(false);
+  }
 
+  @Test(timeout=600000)
+  public void testDeleteAndCommitBlockSynchronizationRaceHasSnapshot()
+      throws Exception {
+    testDeleteAndCommitBlockSynchronizationRace(true);
   }
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java?rev=1618700&r1=1618699&r2=1618700&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java Mon Aug 18 18:41:31 2014
@@ -356,7 +356,8 @@ public class TestPipelinesFailover {
       
       NameNode nn0 = cluster.getNameNode(0);
       ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
-      DatanodeDescriptor expectedPrimary = getExpectedPrimaryNode(nn0, blk);
+      DatanodeDescriptor expectedPrimary =
+          DFSTestUtil.getExpectedPrimaryNode(nn0, blk);
       LOG.info("Expecting block recovery to be triggered on DN " +
           expectedPrimary);
       
@@ -506,37 +507,6 @@ public class TestPipelinesFailover {
     }
   }
 
-
-
-  /**
-   * @return the node which is expected to run the recovery of the
-   * given block, which is known to be under construction inside the
-   * given NameNOde.
-   */
-  private DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
-      ExtendedBlock blk) {
-    BlockManager bm0 = nn.getNamesystem().getBlockManager();
-    BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
-    assertTrue("Block " + blk + " should be under construction, " +
-        "got: " + storedBlock,
-        storedBlock instanceof BlockInfoUnderConstruction);
-    BlockInfoUnderConstruction ucBlock =
-      (BlockInfoUnderConstruction)storedBlock;
-    // We expect that the replica with the most recent heart beat will be
-    // the one to be in charge of the synchronization / recovery protocol.
-    final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
-    DatanodeStorageInfo expectedPrimary = storages[0];
-    long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdate();
-    for (int i = 1; i < storages.length; i++) {
-      final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdate();
-      if (lastUpdate > mostRecentLastUpdate) {
-        expectedPrimary = storages[i];
-        mostRecentLastUpdate = lastUpdate;
-      }
-    }
-    return expectedPrimary.getDatanodeDescriptor();
-  }
-
   private DistributedFileSystem createFsAsOtherUser(
       final MiniDFSCluster cluster, final Configuration conf)
       throws IOException, InterruptedException {

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml?rev=1618700&r1=1618699&r2=1618700&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml Mon Aug 18 18:41:31 2014
@@ -8655,6 +8655,50 @@
       </comparators>
     </test>
 
+    <test> <!-- TESTED -->
+      <description>count: file using -h option</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p dir</command> <!-- make sure user home dir exists -->
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes file1</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data1k file2</command>
+        <command>-fs NAMENODE -count -h file1 file2</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file1 file2</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*15 file1</expected-output>
+        </comparator>
+      </comparators>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*1\.0 K file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: directory using -q and -h options</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir1</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 /dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m /dir1 </dfs-admin-command>
+        <command>-fs NAMENODE -count -q -h /dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1 M( |\t)*1 M( |\t)*1( |\t)*0( |\t)*0 /dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
     <!-- Tests for chmod -->
     <test> <!-- TESTED -->
       <description>chmod: change permission(octal mode) of file in absolute path</description>