You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vi...@apache.org on 2017/05/31 17:30:17 UTC
hadoop git commit: HDFS-11791. [READ] Test for increasing replication
of provided files.
Repository: hadoop
Updated Branches:
refs/heads/HDFS-9806 5d021f38e -> 1fd812443
HDFS-11791. [READ] Test for increasing replication of provided files.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fd81244
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fd81244
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fd81244
Branch: refs/heads/HDFS-9806
Commit: 1fd812443b8ae2e8b8dbeb5c10b8e81d03e2c71c
Parents: 5d021f3
Author: Virajith Jalaparti <vi...@apache.org>
Authored: Wed May 31 10:29:53 2017 -0700
Committer: Virajith Jalaparti <vi...@apache.org>
Committed: Wed May 31 10:29:53 2017 -0700
----------------------------------------------------------------------
.../TestNameNodeProvidedImplementation.java | 55 ++++++++++++++++++++
1 file changed, 55 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fd81244/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
index 5062439..e171557 100644
--- a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
+++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
@@ -23,6 +23,7 @@ import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
+import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
import java.nio.channels.Channels;
import java.nio.channels.ReadableByteChannel;
@@ -34,10 +35,15 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockFormatProvider;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockProvider;
import org.apache.hadoop.hdfs.server.common.BlockFormat;
@@ -378,4 +384,53 @@ public class TestNameNodeProvidedImplementation {
assertEquals(1, locations.length);
assertEquals(2, locations[0].getHosts().length);
}
+
+ private DatanodeInfo[] getAndCheckBlockLocations(DFSClient client,
+ String filename, int expectedLocations) throws IOException {
+ LocatedBlocks locatedBlocks = client.getLocatedBlocks(
+ filename, 0, baseFileLen);
+ //given the start and length in the above call,
+ //only one LocatedBlock in LocatedBlocks
+ assertEquals(1, locatedBlocks.getLocatedBlocks().size());
+ LocatedBlock locatedBlock = locatedBlocks.getLocatedBlocks().get(0);
+ assertEquals(expectedLocations, locatedBlock.getLocations().length);
+ return locatedBlock.getLocations();
+ }
+
+ /**
+ * Tests setting replication of provided files.
+ * @throws Exception
+ */
+ @Test
+ public void testSetReplicationForProvidedFiles() throws Exception {
+ createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
+ FixedBlockResolver.class);
+ startCluster(NNDIRPATH, 2, null,
+ new StorageType[][] {
+ {StorageType.PROVIDED},
+ {StorageType.DISK}},
+ false);
+
+ String filename = "/" + filePrefix + (numFiles - 1) + fileSuffix;
+ Path file = new Path(filename);
+ FileSystem fs = cluster.getFileSystem();
+
+ //set the replication to 2, and test that the file has
+ //the required replication.
+ fs.setReplication(file, (short) 2);
+ DFSTestUtil.waitForReplication((DistributedFileSystem) fs,
+ file, (short) 2, 10000);
+ DFSClient client = new DFSClient(new InetSocketAddress("localhost",
+ cluster.getNameNodePort()), cluster.getConfiguration(0));
+ getAndCheckBlockLocations(client, filename, 2);
+
+ //set the replication back to 1
+ fs.setReplication(file, (short) 1);
+ DFSTestUtil.waitForReplication((DistributedFileSystem) fs,
+ file, (short) 1, 10000);
+ //the only replica left should be the PROVIDED datanode
+ DatanodeInfo[] infos = getAndCheckBlockLocations(client, filename, 1);
+ assertEquals(cluster.getDataNodes().get(0).getDatanodeUuid(),
+ infos[0].getDatanodeUuid());
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org