You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zh...@apache.org on 2016/02/11 19:14:20 UTC
hadoop git commit: HDFS-9755. Erasure Coding: allow to use multiple
EC policies in striping related tests [Part 2]. Contributed by Rui Li.
Repository: hadoop
Updated Branches:
refs/heads/trunk fa00d3e20 -> 0aa8c8289
HDFS-9755. Erasure Coding: allow to use multiple EC policies in striping related tests [Part 2]. Contributed by Rui Li.
Change-Id: I2100bc27ad484f83c9cb2d2e5bb232f4f74fd286
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0aa8c828
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0aa8c828
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0aa8c828
Branch: refs/heads/trunk
Commit: 0aa8c828943e184f72699378c67873a406d457cc
Parents: fa00d3e
Author: Zhe Zhang <zh...@apache.org>
Authored: Thu Feb 11 10:14:09 2016 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Thu Feb 11 10:14:09 2016 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../hdfs/TestReadStripedFileWithDecoding.java | 24 ++++++---
.../TestReadStripedFileWithMissingBlocks.java | 50 +++++++-----------
.../hadoop/hdfs/TestReconstructStripedFile.java | 53 +++++++++++---------
.../hdfs/TestSafeModeWithStripedFile.java | 4 +-
.../hdfs/TestWriteStripedFileWithFailure.java | 7 +--
6 files changed, 72 insertions(+), 69 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0aa8c828/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 17a05d1..612451c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -927,6 +927,9 @@ Trunk (Unreleased)
HDFS-9775. Erasure Coding : Rename BlockRecoveryWork to
BlockReconstructionWork. (Rakesh R via zhz)
+ HDFS-9755. Erasure Coding: allow to use multiple EC policies in striping
+ related tests [Part 2]. (Rui Li via zhz)
+
Release 2.9.0 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0aa8c828/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
index e1232d0..6d2227f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
@@ -68,13 +68,21 @@ public class TestReadStripedFileWithDecoding {
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
- private final short dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
- private final short parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
+ private static final short dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
+ private static final short parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
private final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
private final int smallFileLength = blockSize * dataBlocks - 123;
private final int largeFileLength = blockSize * dataBlocks + 123;
private final int[] fileLengths = {smallFileLength, largeFileLength};
- private final int[] dnFailureNums = {1, 2, 3};
+ private static final int[] dnFailureNums = getDnFailureNums();
+
+ private static int[] getDnFailureNums() {
+ int[] dnFailureNums = new int[parityBlocks];
+ for (int i = 0; i < dnFailureNums.length; i++) {
+ dnFailureNums[i] = i + 1;
+ }
+ return dnFailureNums;
+ }
@Rule
public Timeout globalTimeout = new Timeout(300000);
@@ -132,8 +140,9 @@ public class TestReadStripedFileWithDecoding {
@Test(timeout=300000)
public void testReadCorruptedData() throws IOException {
for (int fileLength : fileLengths) {
- for (int dataDelNum = 1; dataDelNum < 4; dataDelNum++) {
- for (int parityDelNum = 0; (dataDelNum+parityDelNum) < 4; parityDelNum++) {
+ for (int dataDelNum = 1; dataDelNum <= parityBlocks; dataDelNum++) {
+ for (int parityDelNum = 0; (dataDelNum + parityDelNum) <= parityBlocks;
+ parityDelNum++) {
String src = "/corrupted_" + dataDelNum + "_" + parityDelNum;
testReadWithBlockCorrupted(src, fileLength,
dataDelNum, parityDelNum, false);
@@ -149,8 +158,9 @@ public class TestReadStripedFileWithDecoding {
@Test(timeout=300000)
public void testReadCorruptedDataByDeleting() throws IOException {
for (int fileLength : fileLengths) {
- for (int dataDelNum = 1; dataDelNum < 4; dataDelNum++) {
- for (int parityDelNum = 0; (dataDelNum+parityDelNum) < 4; parityDelNum++) {
+ for (int dataDelNum = 1; dataDelNum <= parityBlocks; dataDelNum++) {
+ for (int parityDelNum = 0; (dataDelNum + parityDelNum) <= parityBlocks;
+ parityDelNum++) {
String src = "/deleted_" + dataDelNum + "_" + parityDelNum;
testReadWithBlockCorrupted(src, fileLength,
dataDelNum, parityDelNum, true);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0aa8c828/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
index 77bb61e..fe89401 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
@@ -21,18 +21,17 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.junit.After;
import org.junit.Assert;
-import org.junit.Before;
import org.junit.Test;
import org.junit.Rule;
import org.junit.rules.Timeout;
import java.io.IOException;
+import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
+import static org.apache.hadoop.hdfs.StripedFileTestUtil.TEST_EC_POLICY;
import static org.apache.hadoop.hdfs.StripedFileTestUtil.blockSize;
import static org.apache.hadoop.hdfs.StripedFileTestUtil.numDNs;
@@ -53,16 +52,15 @@ public class TestReadStripedFileWithMissingBlocks {
@Rule
public Timeout globalTimeout = new Timeout(300000);
- @Before
public void setup() throws IOException {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
- cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
+ cluster.getFileSystem().getClient().setErasureCodingPolicy(
+ "/", TEST_EC_POLICY);
fs = cluster.getFileSystem();
}
- @After
public void tearDown() throws IOException {
if (cluster != null) {
cluster.shutdown();
@@ -71,33 +69,19 @@ public class TestReadStripedFileWithMissingBlocks {
}
@Test
- public void testReadFileWithMissingBlocks1() throws Exception {
- readFileWithMissingBlocks(new Path("/foo"), fileLength, 1, 0);
- }
-
- @Test
- public void testReadFileWithMissingBlocks2() throws Exception {
- readFileWithMissingBlocks(new Path("/foo"), fileLength, 1, 1);
- }
-
- @Test
- public void testReadFileWithMissingBlocks3() throws Exception {
- readFileWithMissingBlocks(new Path("/foo"), fileLength, 1, 2);
- }
-
- @Test
- public void testReadFileWithMissingBlocks4() throws Exception {
- readFileWithMissingBlocks(new Path("/foo"), fileLength, 2, 0);
- }
-
- @Test
- public void testReadFileWithMissingBlocks5() throws Exception {
- readFileWithMissingBlocks(new Path("/foo"), fileLength, 2, 1);
- }
-
- @Test
- public void testReadFileWithMissingBlocks6() throws Exception {
- readFileWithMissingBlocks(new Path("/foo"), fileLength, 3, 0);
+ public void testReadFileWithMissingBlocks() throws Exception {
+ for (int missingData = 1; missingData <= NUM_PARITY_BLOCKS; missingData++) {
+ for (int missingParity = 0; missingParity <=
+ NUM_PARITY_BLOCKS - missingData; missingParity++) {
+ try {
+ setup();
+ readFileWithMissingBlocks(new Path("/foo"), fileLength,
+ missingData, missingParity);
+ } finally {
+ tearDown();
+ }
+ }
+ }
}
private void readFileWithMissingBlocks(Path srcPath, int fileLength,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0aa8c828/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index 97edaf1..8241882 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -113,14 +113,14 @@ public class TestReconstructStripedFile {
@Test(timeout = 120000)
public void testRecoverOneParityBlock() throws Exception {
- int fileLen = 10 * blockSize + blockSize/10;
+ int fileLen = (dataBlkNum + 1) * blockSize + blockSize / 10;
assertFileBlocksReconstruction("/testRecoverOneParityBlock", fileLen,
ReconstructionType.ParityOnly, 1);
}
@Test(timeout = 120000)
public void testRecoverOneParityBlock1() throws Exception {
- int fileLen = cellSize + cellSize/10;
+ int fileLen = cellSize + cellSize / 10;
assertFileBlocksReconstruction("/testRecoverOneParityBlock1", fileLen,
ReconstructionType.ParityOnly, 1);
}
@@ -134,35 +134,35 @@ public class TestReconstructStripedFile {
@Test(timeout = 120000)
public void testRecoverOneParityBlock3() throws Exception {
- int fileLen = 3 * blockSize + blockSize/10;
+ int fileLen = (dataBlkNum - 1) * blockSize + blockSize / 10;
assertFileBlocksReconstruction("/testRecoverOneParityBlock3", fileLen,
ReconstructionType.ParityOnly, 1);
}
@Test(timeout = 120000)
- public void testRecoverThreeParityBlocks() throws Exception {
- int fileLen = 10 * blockSize + blockSize/10;
- assertFileBlocksReconstruction("/testRecoverThreeParityBlocks", fileLen,
- ReconstructionType.ParityOnly, 3);
+ public void testRecoverAllParityBlocks() throws Exception {
+ int fileLen = dataBlkNum * blockSize + blockSize / 10;
+ assertFileBlocksReconstruction("/testRecoverAllParityBlocks", fileLen,
+ ReconstructionType.ParityOnly, parityBlkNum);
}
@Test(timeout = 120000)
- public void testRecoverThreeDataBlocks() throws Exception {
- int fileLen = 10 * blockSize + blockSize/10;
- assertFileBlocksReconstruction("/testRecoverThreeDataBlocks", fileLen,
- ReconstructionType.DataOnly, 3);
+ public void testRecoverAllDataBlocks() throws Exception {
+ int fileLen = (dataBlkNum + parityBlkNum) * blockSize + blockSize / 10;
+ assertFileBlocksReconstruction("/testRecoverAllDataBlocks", fileLen,
+ ReconstructionType.DataOnly, parityBlkNum);
}
@Test(timeout = 120000)
- public void testRecoverThreeDataBlocks1() throws Exception {
- int fileLen = 3 * blockSize + blockSize/10;
- assertFileBlocksReconstruction("/testRecoverThreeDataBlocks1", fileLen,
- ReconstructionType.DataOnly, 3);
+ public void testRecoverAllDataBlocks1() throws Exception {
+ int fileLen = parityBlkNum * blockSize + blockSize / 10;
+ assertFileBlocksReconstruction("/testRecoverAllDataBlocks1", fileLen,
+ ReconstructionType.DataOnly, parityBlkNum);
}
@Test(timeout = 120000)
public void testRecoverOneDataBlock() throws Exception {
- int fileLen = 10 * blockSize + blockSize/10;
+ int fileLen = (dataBlkNum + 1) * blockSize + blockSize / 10;
assertFileBlocksReconstruction("/testRecoverOneDataBlock", fileLen,
ReconstructionType.DataOnly, 1);
}
@@ -183,16 +183,16 @@ public class TestReconstructStripedFile {
@Test(timeout = 120000)
public void testRecoverAnyBlocks() throws Exception {
- int fileLen = 3 * blockSize + blockSize/10;
+ int fileLen = parityBlkNum * blockSize + blockSize / 10;
assertFileBlocksReconstruction("/testRecoverAnyBlocks", fileLen,
- ReconstructionType.Any, 2);
+ ReconstructionType.Any, random.nextInt(parityBlkNum) + 1);
}
@Test(timeout = 120000)
public void testRecoverAnyBlocks1() throws Exception {
- int fileLen = 10 * blockSize + blockSize/10;
+ int fileLen = (dataBlkNum + parityBlkNum) * blockSize + blockSize / 10;
assertFileBlocksReconstruction("/testRecoverAnyBlocks1", fileLen,
- ReconstructionType.Any, 3);
+ ReconstructionType.Any, random.nextInt(parityBlkNum) + 1);
}
private int[] generateDeadDnIndices(ReconstructionType type, int deadNum,
@@ -259,6 +259,7 @@ public class TestReconstructStripedFile {
if (toRecoverBlockNum < 1 || toRecoverBlockNum > parityBlkNum) {
Assert.fail("toRecoverBlockNum should be between 1 ~ " + parityBlkNum);
}
+ assertTrue("File length must be positive.", fileLen > 0);
Path file = new Path(fileName);
@@ -289,6 +290,7 @@ public class TestReconstructStripedFile {
int[] deadDnIndices = new int[toRecoverBlockNum];
ExtendedBlock[] blocks = new ExtendedBlock[toRecoverBlockNum];
File[] replicas = new File[toRecoverBlockNum];
+ long[] replicaLengths = new long[toRecoverBlockNum];
File[] metadatas = new File[toRecoverBlockNum];
byte[][] replicaContents = new byte[toRecoverBlockNum][];
Map<ExtendedBlock, DataNode> errorMap = new HashMap<>(dead.length);
@@ -301,9 +303,10 @@ public class TestReconstructStripedFile {
lastBlock.getBlock(), cellSize, dataBlkNum, indices[dead[i]]);
errorMap.put(blocks[i], cluster.getDataNodes().get(deadDnIndices[i]));
replicas[i] = cluster.getBlockFile(deadDnIndices[i], blocks[i]);
+ replicaLengths[i] = replicas[i].length();
metadatas[i] = cluster.getBlockMetadataFile(deadDnIndices[i], blocks[i]);
// the block replica on the datanode should be the same as expected
- assertEquals(replicas[i].length(),
+ assertEquals(replicaLengths[i],
StripedBlockUtil.getInternalBlockLength(
lastBlock.getBlockSize(), cellSize, dataBlkNum, indices[dead[i]]));
assertTrue(metadatas[i].getName().
@@ -312,8 +315,10 @@ public class TestReconstructStripedFile {
replicaContents[i] = DFSTestUtil.readFileAsBytes(replicas[i]);
}
- int cellsNum = (fileLen - 1) / cellSize + 1;
- int groupSize = Math.min(cellsNum, dataBlkNum) + parityBlkNum;
+ int lastGroupDataLen = fileLen % (dataBlkNum * blockSize);
+ int lastGroupNumBlk = lastGroupDataLen == 0 ? dataBlkNum :
+ Math.min(dataBlkNum, ((lastGroupDataLen - 1) / cellSize + 1));
+ int groupSize = lastGroupNumBlk + parityBlkNum;
// shutdown datanodes or generate corruption
int stoppedDN = generateErrors(errorMap, type);
@@ -342,7 +347,7 @@ public class TestReconstructStripedFile {
LOG.info("replica after reconstruction " + replicaAfterReconstruction);
File metadataAfterReconstruction =
cluster.getBlockMetadataFile(targetDNs[i], blocks[i]);
- assertEquals(replicaAfterReconstruction.length(), replicas[i].length());
+ assertEquals(replicaLengths[i], replicaAfterReconstruction.length());
LOG.info("replica before " + replicas[i]);
assertTrue(metadataAfterReconstruction.getName().
endsWith(blocks[i].getGenerationStamp() + ".meta"));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0aa8c828/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
index 146abe7..2b6b65a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
@@ -23,7 +23,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -79,7 +78,8 @@ public class TestSafeModeWithStripedFile {
@Test
public void testStripedFile1() throws IOException {
- doTest(cellSize * 5, 5);
+ int numCell = DATA_BLK_NUM - 1;
+ doTest(cellSize * numCell, numCell);
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0aa8c828/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
index 6dcff69..fad2dcd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
@@ -72,8 +72,9 @@ public class TestWriteStripedFileWithFailure {
@Test(timeout = 300000)
public void testWriteStripedFileWithDNFailure() throws IOException {
for (int fileLength : fileLengths) {
- for (int dataDelNum = 1; dataDelNum < 4; dataDelNum++) {
- for (int parityDelNum = 0; (dataDelNum+parityDelNum) < 4; parityDelNum++) {
+ for (int dataDelNum = 1; dataDelNum <= parityBlocks; dataDelNum++) {
+ for (int parityDelNum = 0; (dataDelNum + parityDelNum) <= parityBlocks;
+ parityDelNum++) {
try {
// setup a new cluster with no dead datanode
setup();
@@ -82,7 +83,7 @@ public class TestWriteStripedFileWithFailure {
String fileType = fileLength < (blockSize * dataBlocks) ?
"smallFile" : "largeFile";
LOG.error("Failed to write file with DN failure:"
- + " fileType = "+ fileType
+ + " fileType = " + fileType
+ ", dataDelNum = " + dataDelNum
+ ", parityDelNum = " + parityDelNum);
throw ioe;