You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zh...@apache.org on 2015/03/31 19:46:42 UTC
hadoop git commit: HDFS-7617. Add unit tests for editlog transactions
for EC. Contributed by Hui Zheng.
Repository: hadoop
Updated Branches:
refs/heads/HDFS-7285 86a9b65df -> b3e2fc1ed
HDFS-7617. Add unit tests for editlog transactions for EC. Contributed by Hui Zheng.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3e2fc1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3e2fc1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3e2fc1e
Branch: refs/heads/HDFS-7285
Commit: b3e2fc1edae6f4794d9c34e1dd001861af8f283f
Parents: 86a9b65
Author: Zhe Zhang <zh...@apache.org>
Authored: Tue Mar 31 10:46:04 2015 -0700
Committer: Zhe Zhang <zh...@apache.org>
Committed: Tue Mar 31 10:46:04 2015 -0700
----------------------------------------------------------------------
.../server/namenode/TestFSEditLogLoader.java | 157 +++++++++++++++++++
1 file changed, 157 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3e2fc1e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index 833ef95..d3cb749 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -39,14 +39,18 @@ import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
+import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.log4j.Level;
@@ -414,4 +418,157 @@ public class TestFSEditLogLoader {
fromByte(code), FSEditLogOpCodes.fromByte(code));
}
}
+
+ @Test
+ public void testAddNewStripedBlock() throws IOException{
+ // start a cluster
+ Configuration conf = new HdfsConfiguration();
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9)
+ .build();
+ cluster.waitActive();
+ DistributedFileSystem fs = cluster.getFileSystem();
+ FSNamesystem fns = cluster.getNamesystem();
+
+ String testDir = "/ec";
+ String testFile = "testfile_001";
+ String testFilePath = testDir + "/" + testFile;
+ String clientName = "testUser1";
+ String clientMachine = "testMachine1";
+ long blkId = 1;
+ long blkNumBytes = 1024;
+ long timestamp = 1426222918;
+ short blockNum = HdfsConstants.NUM_DATA_BLOCKS;
+ short parityNum = HdfsConstants.NUM_PARITY_BLOCKS;
+
+ //set the storage policy of the directory
+ fs.mkdir(new Path(testDir), new FsPermission("755"));
+ fs.setStoragePolicy(new Path(testDir),
+ HdfsConstants.EC_STORAGE_POLICY_NAME);
+
+ // Create a file with striped block
+ Path p = new Path(testFilePath);
+ DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
+
+ fns.enterSafeMode(false);
+ fns.saveNamespace(0, 0);
+ fns.leaveSafeMode();
+
+ // Add a striped block to the file
+ BlockInfoStriped stripedBlk = new BlockInfoStriped(
+ new Block(blkId, blkNumBytes, timestamp), blockNum, parityNum);
+ INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath);
+ file.toUnderConstruction(clientName, clientMachine);
+ file.getStripedBlocksFeature().addBlock(stripedBlk);
+ fns.getEditLog().logAddBlock(testFilePath, file);
+ file.toCompleteFile(System.currentTimeMillis());
+
+ //If the block by loaded is the same as above it means that
+ //we have successfully applied the edit log to the fsimage.
+ cluster.restartNameNodes();
+ cluster.waitActive();
+ fns = cluster.getNamesystem();
+
+ INodeFile inodeLoaded = (INodeFile)fns.getFSDirectory()
+ .getINode(testFilePath);
+
+ assertTrue(inodeLoaded.isWithStripedBlocks());
+
+ BlockInfoStriped[] blks = (BlockInfoStriped[])inodeLoaded.getBlocks();
+ assertEquals(1, blks.length);
+ assertEquals(blkId, blks[0].getBlockId());
+ assertEquals(blkNumBytes, blks[0].getNumBytes());
+ assertEquals(timestamp, blks[0].getGenerationStamp());
+ assertEquals(blockNum, blks[0].getDataBlockNum());
+ assertEquals(parityNum, blks[0].getParityBlockNum());
+
+ cluster.shutdown();
+ cluster = null;
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ @Test
+ public void testUpdateStripedBlocks() throws IOException{
+ // start a cluster
+ Configuration conf = new HdfsConfiguration();
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9)
+ .build();
+ cluster.waitActive();
+ DistributedFileSystem fs = cluster.getFileSystem();
+ FSNamesystem fns = cluster.getNamesystem();
+
+ String testDir = "/ec";
+ String testFile = "testfile_002";
+ String testFilePath = testDir + "/" + testFile;
+ String clientName = "testUser2";
+ String clientMachine = "testMachine2";
+ long blkId = 1;
+ long blkNumBytes = 1024;
+ long timestamp = 1426222918;
+ short blockNum = HdfsConstants.NUM_DATA_BLOCKS;
+ short parityNum = HdfsConstants.NUM_PARITY_BLOCKS;
+
+ //set the storage policy of the directory
+ fs.mkdir(new Path(testDir), new FsPermission("755"));
+ fs.setStoragePolicy(new Path(testDir),
+ HdfsConstants.EC_STORAGE_POLICY_NAME);
+
+ //create a file with striped blocks
+ Path p = new Path(testFilePath);
+ DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
+ BlockInfoStriped stripedBlk = new BlockInfoStriped(
+ new Block(blkId, blkNumBytes, timestamp), blockNum, parityNum);
+ INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath);
+ file.toUnderConstruction(clientName, clientMachine);
+ file.getStripedBlocksFeature().addBlock(stripedBlk);
+ fns.getEditLog().logAddBlock(testFilePath, file);
+ file.toCompleteFile(System.currentTimeMillis());
+ fns.enterSafeMode(false);
+ fns.saveNamespace(0, 0);
+ fns.leaveSafeMode();
+
+ //update the last block
+ long newBlkNumBytes = 1024*8;
+ long newTimestamp = 1426222918+3600;
+ file.toUnderConstruction(clientName, clientMachine);
+ file.getLastBlock().setNumBytes(newBlkNumBytes);
+ file.getLastBlock().setGenerationStamp(newTimestamp);
+ fns.getEditLog().logUpdateBlocks(testFilePath, file, true);
+ file.toCompleteFile(System.currentTimeMillis());
+
+ //After the namenode restarts if the block by loaded is the same as above
+ //(new block size and timestamp) it means that we have successfully
+ //applied the edit log to the fsimage.
+ cluster.restartNameNodes();
+ cluster.waitActive();
+ fns = cluster.getNamesystem();
+
+ INodeFile inodeLoaded = (INodeFile)fns.getFSDirectory()
+ .getINode(testFilePath);
+
+ assertTrue(inodeLoaded.isWithStripedBlocks());
+
+ BlockInfoStriped[] blks = (BlockInfoStriped[])inodeLoaded.getBlocks();
+ assertEquals(1, blks.length);
+ assertEquals(blkId, blks[0].getBlockId());
+ assertEquals(newBlkNumBytes, blks[0].getNumBytes());
+ assertEquals(newTimestamp, blks[0].getGenerationStamp());
+ assertEquals(blockNum, blks[0].getDataBlockNum());
+ assertEquals(parityNum, blks[0].getParityBlockNum());
+
+ cluster.shutdown();
+ cluster = null;
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}