You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cn...@apache.org on 2013/12/13 18:28:18 UTC
svn commit: r1550774 [7/10] - in
/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs: ./
src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/
src/main/java/ src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apach...
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Fri Dec 13 17:28:14 2013
@@ -56,6 +56,7 @@ import java.security.PrivilegedException
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
+import java.util.Map;
import java.util.Random;
import org.apache.commons.logging.Log;
@@ -88,6 +89,7 @@ import org.apache.hadoop.hdfs.server.dat
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.hdfs.web.HftpFileSystem;
@@ -125,6 +127,9 @@ public class MiniDFSCluster {
public static final String DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY
= DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + ".testing";
+ // Changing this value may break some tests that assume it is 2.
+ public static final int DIRS_PER_DATANODE = 2;
+
static { DefaultMetricsSystem.setMiniClusterMode(true); }
/**
@@ -329,9 +334,10 @@ public class MiniDFSCluster {
builder.nameNodePort, builder.nameNodeHttpPort);
}
- LOG.info("starting cluster with " +
- builder.nnTopology.countNameNodes() + " namenodes.");
- nameNodes = new NameNodeInfo[builder.nnTopology.countNameNodes()];
+ final int numNameNodes = builder.nnTopology.countNameNodes();
+ LOG.info("starting cluster: numNameNodes=" + numNameNodes
+ + ", numDataNodes=" + builder.numDataNodes);
+ nameNodes = new NameNodeInfo[numNameNodes];
initMiniDFSCluster(builder.conf,
builder.numDataNodes,
@@ -1148,15 +1154,16 @@ public class MiniDFSCluster {
// Set up datanode address
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
if (manageDfsDirs) {
- File dir1 = getInstanceStorageDir(i, 0);
- File dir2 = getInstanceStorageDir(i, 1);
- dir1.mkdirs();
- dir2.mkdirs();
- if (!dir1.isDirectory() || !dir2.isDirectory()) {
- throw new IOException("Mkdirs failed to create directory for DataNode "
- + i + ": " + dir1 + " or " + dir2);
+ StringBuilder sb = new StringBuilder();
+ for (int j = 0; j < DIRS_PER_DATANODE; ++j) {
+ File dir = getInstanceStorageDir(i, j);
+ dir.mkdirs();
+ if (!dir.isDirectory()) {
+ throw new IOException("Mkdirs failed to create directory for DataNode " + dir);
+ }
+ sb.append((j > 0 ? "," : "") + fileAsURI(dir));
}
- String dirs = fileAsURI(dir1) + "," + fileAsURI(dir2);
+ String dirs = sb.toString();
dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
}
@@ -1487,8 +1494,9 @@ public class MiniDFSCluster {
*/
public synchronized void restartNameNodes() throws IOException {
for (int i = 0; i < nameNodes.length; i++) {
- restartNameNode(i);
+ restartNameNode(i, false);
}
+ waitActive();
}
/**
@@ -1925,12 +1933,14 @@ public class MiniDFSCluster {
// Wait for expected number of datanodes to start
if (dnInfo.length != numDataNodes) {
+ LOG.info("dnInfo.length != numDataNodes");
return true;
}
// if one of the data nodes is not fully started, continue to wait
for (DataNodeProperties dn : dataNodes) {
if (!dn.datanode.isDatanodeFullyStarted()) {
+ LOG.info("!dn.datanode.isDatanodeFullyStarted()");
return true;
}
}
@@ -1939,6 +1949,7 @@ public class MiniDFSCluster {
// using (capacity == 0) as proxy.
for (DatanodeInfo dn : dnInfo) {
if (dn.getCapacity() == 0) {
+ LOG.info("dn.getCapacity() == 0");
return true;
}
}
@@ -1946,6 +1957,7 @@ public class MiniDFSCluster {
// If datanode dataset is not initialized then wait
for (DataNodeProperties dn : dataNodes) {
if (DataNodeTestUtils.getFSDataset(dn.datanode) == null) {
+ LOG.info("DataNodeTestUtils.getFSDataset(dn.datanode) == null");
return true;
}
}
@@ -1965,12 +1977,12 @@ public class MiniDFSCluster {
* @param dataNodeIndex - data node whose block report is desired - the index is same as for getDataNodes()
* @return the block report for the specified data node
*/
- public Iterable<Block> getBlockReport(String bpid, int dataNodeIndex) {
+ public Map<DatanodeStorage, BlockListAsLongs> getBlockReport(String bpid, int dataNodeIndex) {
if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
throw new IndexOutOfBoundsException();
}
final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
- return DataNodeTestUtils.getFSDataset(dn).getBlockReport(bpid);
+ return DataNodeTestUtils.getFSDataset(dn).getBlockReports(bpid);
}
@@ -1979,11 +1991,12 @@ public class MiniDFSCluster {
* @return block reports from all data nodes
* BlockListAsLongs is indexed in the same order as the list of datanodes returned by getDataNodes()
*/
- public Iterable<Block>[] getAllBlockReports(String bpid) {
+ public List<Map<DatanodeStorage, BlockListAsLongs>> getAllBlockReports(String bpid) {
int numDataNodes = dataNodes.size();
- Iterable<Block>[] result = new BlockListAsLongs[numDataNodes];
+ final List<Map<DatanodeStorage, BlockListAsLongs>> result
+ = new ArrayList<Map<DatanodeStorage, BlockListAsLongs>>(numDataNodes);
for (int i = 0; i < numDataNodes; ++i) {
- result[i] = getBlockReport(bpid, i);
+ result.add(getBlockReport(bpid, i));
}
return result;
}
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Fri Dec 13 17:28:14 2013
@@ -23,6 +23,7 @@ import java.security.PrivilegedException
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
+import java.util.Map;
import java.util.Random;
import java.util.Scanner;
import java.util.concurrent.atomic.AtomicInteger;
@@ -35,8 +36,10 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
@@ -1392,11 +1395,14 @@ public class TestDFSShell {
List<File> files = new ArrayList<File>();
List<DataNode> datanodes = cluster.getDataNodes();
String poolId = cluster.getNamesystem().getBlockPoolId();
- Iterable<Block>[] blocks = cluster.getAllBlockReports(poolId);
- for(int i = 0; i < blocks.length; i++) {
+ List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
+ for(int i = 0; i < blocks.size(); i++) {
DataNode dn = datanodes.get(i);
- for(Block b : blocks[i]) {
- files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
+ Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
+ for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
+ for(Block b : e.getValue()) {
+ files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
+ }
}
}
return files;
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java Fri Dec 13 17:28:14 2013
@@ -237,7 +237,7 @@ public class TestDFSStartupVersions {
* this iterations version 3-tuple
* </pre>
*/
- @Test
+ @Test (timeout=300000)
public void testVersions() throws Exception {
UpgradeUtilities.initialize();
Configuration conf = UpgradeUtilities.initializeStorageStateConf(1,
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java Fri Dec 13 17:28:14 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import static org.junit.Assume.assumeTrue;
import java.io.File;
@@ -82,7 +83,8 @@ public class TestDatanodeConfig {
DataNode dn = null;
try {
dn = DataNode.createDataNode(new String[]{}, conf);
- } catch(IOException e) {
+ fail();
+ } catch(Exception e) {
// expecting exception here
}
if(dn != null)
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java Fri Dec 13 17:28:14 2013
@@ -173,7 +173,7 @@ public class TestDatanodeRegistration {
// register a datanode
DatanodeID dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
- "fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_INFO_SECURE_PORT,
+ "fake-datanode-id", DN_XFER_PORT, DN_INFO_PORT, DN_INFO_SECURE_PORT,
DN_IPC_PORT);
long nnCTime = cluster.getNamesystem().getFSImage().getStorage()
.getCTime();
@@ -190,7 +190,7 @@ public class TestDatanodeRegistration {
// register the same datanode again with a different storage ID
dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
- "changed-fake-storage-id", DN_XFER_PORT, DN_INFO_PORT,
+ "changed-fake-datanode-id", DN_XFER_PORT, DN_INFO_PORT,
DN_INFO_SECURE_PORT, DN_IPC_PORT);
dnReg = new DatanodeRegistration(dnId,
mockStorageInfo, null, VersionInfo.getVersion());
@@ -226,7 +226,7 @@ public class TestDatanodeRegistration {
DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
doReturn(123).when(mockDnReg).getXferPort();
- doReturn("fake-storage-id").when(mockDnReg).getStorageID();
+ doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
// Should succeed when software versions are the same.
@@ -273,7 +273,7 @@ public class TestDatanodeRegistration {
DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
- doReturn("fake-storage-id").when(mockDnReg).getStorageID();
+ doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
// Should succeed when software versions are the same and CTimes are the
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java Fri Dec 13 17:28:14 2013
@@ -158,7 +158,7 @@ public class TestFileCorruption {
ns.writeLock();
try {
cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(
- blk, new DatanodeInfo(dnR), "TEST");
+ blk, new DatanodeInfo(dnR), "TEST", "STORAGE_ID");
} finally {
ns.writeUnlock();
}
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java Fri Dec 13 17:28:14 2013
@@ -22,20 +22,21 @@ import static org.junit.Assert.assertEqu
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.util.Time;
import org.junit.Test;
@@ -136,7 +137,7 @@ public class TestInjectionForSimulatedSt
DFSTestUtil.createFile(cluster.getFileSystem(), testPath, filesize,
filesize, blockSize, (short) numDataNodes, 0L);
waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, 20);
- Iterable<Block>[] blocksList = cluster.getAllBlockReports(bpid);
+ List<Map<DatanodeStorage, BlockListAsLongs>> blocksList = cluster.getAllBlockReports(bpid);
cluster.shutdown();
cluster = null;
@@ -157,9 +158,11 @@ public class TestInjectionForSimulatedSt
.build();
cluster.waitActive();
Set<Block> uniqueBlocks = new HashSet<Block>();
- for (int i=0; i<blocksList.length; ++i) {
- for (Block b : blocksList[i]) {
- uniqueBlocks.add(new Block(b));
+ for(Map<DatanodeStorage, BlockListAsLongs> map : blocksList) {
+ for(BlockListAsLongs blockList : map.values()) {
+ for(Block b : blockList) {
+ uniqueBlocks.add(new Block(b));
+ }
}
}
// Insert all the blocks in the first data node
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java Fri Dec 13 17:28:14 2013
@@ -151,7 +151,7 @@ public class TestPeerCache {
public void testAddAndRetrieve() throws Exception {
PeerCache cache = new PeerCache(3, 100000);
DatanodeID dnId = new DatanodeID("192.168.0.1",
- "fakehostname", "fake_storage_id",
+ "fakehostname", "fake_datanode_id",
100, 101, 102, 103);
FakePeer peer = new FakePeer(dnId, false);
cache.put(dnId, peer);
@@ -171,7 +171,7 @@ public class TestPeerCache {
FakePeer peers[] = new FakePeer[CAPACITY];
for (int i = 0; i < CAPACITY; ++i) {
dnIds[i] = new DatanodeID("192.168.0.1",
- "fakehostname_" + i, "fake_storage_id",
+ "fakehostname_" + i, "fake_datanode_id",
100, 101, 102, 103);
peers[i] = new FakePeer(dnIds[i], false);
}
@@ -202,7 +202,7 @@ public class TestPeerCache {
FakePeer peers[] = new FakePeer[CAPACITY + 1];
for (int i = 0; i < dnIds.length; ++i) {
dnIds[i] = new DatanodeID("192.168.0.1",
- "fakehostname_" + i, "fake_storage_id_" + i,
+ "fakehostname_" + i, "fake_datanode_id_" + i,
100, 101, 102, 103);
peers[i] = new FakePeer(dnIds[i], false);
}
@@ -233,7 +233,7 @@ public class TestPeerCache {
final int CAPACITY = 3;
PeerCache cache = new PeerCache(CAPACITY, 100000);
DatanodeID dnId = new DatanodeID("192.168.0.1",
- "fakehostname", "fake_storage_id",
+ "fakehostname", "fake_datanode_id",
100, 101, 102, 103);
HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY);
for (int i = 0; i < CAPACITY; ++i) {
@@ -258,7 +258,7 @@ public class TestPeerCache {
final int CAPACITY = 3;
PeerCache cache = new PeerCache(CAPACITY, 100000);
DatanodeID dnId = new DatanodeID("192.168.0.1",
- "fakehostname", "fake_storage_id",
+ "fakehostname", "fake_datanode_id",
100, 101, 102, 103);
HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY);
for (int i = 0; i < CAPACITY; ++i) {
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java Fri Dec 13 17:28:14 2013
@@ -453,12 +453,14 @@ public class UpgradeUtilities {
*/
public static void createDataNodeVersionFile(File[] parent,
StorageInfo version, String bpid, String bpidToWrite) throws IOException {
- DataStorage storage = new DataStorage(version, "doNotCare");
+ DataStorage storage = new DataStorage(version);
+ storage.setDatanodeUuid("FixedDatanodeUuid");
File[] versionFiles = new File[parent.length];
for (int i = 0; i < parent.length; i++) {
File versionFile = new File(parent[i], "VERSION");
StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
+ storage.createStorageID(sd);
storage.writeProperties(versionFile, sd);
versionFiles[i] = versionFile;
File bpDir = BlockPoolSliceStorage.getBpRoot(bpid, parent[i]);
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Fri Dec 13 17:28:14 2013
@@ -17,7 +17,9 @@
*/
package org.apache.hadoop.hdfs.protocolPB;
+import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
@@ -29,6 +31,7 @@ import org.apache.hadoop.fs.permission.A
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -40,6 +43,7 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
@@ -62,17 +66,9 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
-import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
-import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
+import org.apache.hadoop.hdfs.server.protocol.*;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
-import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
-import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.security.token.Token;
@@ -155,12 +151,18 @@ public class TestPBHelper {
void compare(DatanodeID dn, DatanodeID dn2) {
assertEquals(dn.getIpAddr(), dn2.getIpAddr());
assertEquals(dn.getHostName(), dn2.getHostName());
- assertEquals(dn.getStorageID(), dn2.getStorageID());
+ assertEquals(dn.getDatanodeUuid(), dn2.getDatanodeUuid());
assertEquals(dn.getXferPort(), dn2.getXferPort());
assertEquals(dn.getInfoPort(), dn2.getInfoPort());
assertEquals(dn.getIpcPort(), dn2.getIpcPort());
}
+ void compare(DatanodeStorage dns1, DatanodeStorage dns2) {
+ assertThat(dns2.getStorageID(), is(dns1.getStorageID()));
+ assertThat(dns2.getState(), is(dns1.getState()));
+ assertThat(dns2.getStorageType(), is(dns1.getStorageType()));
+ }
+
@Test
public void testConvertBlock() {
Block b = new Block(1, 100, 3);
@@ -170,8 +172,10 @@ public class TestPBHelper {
}
private static BlockWithLocations getBlockWithLocations(int bid) {
- return new BlockWithLocations(new Block(bid, 0, 1), new String[] { "dn1",
- "dn2", "dn3" });
+ final String[] datanodeUuids = {"dn1", "dn2", "dn3"};
+ final String[] storageIDs = {"s1", "s2", "s3"};
+ return new BlockWithLocations(new Block(bid, 0, 1),
+ datanodeUuids, storageIDs);
}
private void compare(BlockWithLocations locs1, BlockWithLocations locs2) {
@@ -434,6 +438,30 @@ public class TestPBHelper {
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
AdminStates.NORMAL)
};
+ String[] storageIDs = {"s1", "s2", "s3"};
+ StorageType[] media = {
+ StorageType.DISK,
+ StorageType.SSD,
+ StorageType.DISK
+ };
+ LocatedBlock lb = new LocatedBlock(
+ new ExtendedBlock("bp12", 12345, 10, 53),
+ dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
+ lb.setBlockToken(new Token<BlockTokenIdentifier>(
+ "identifier".getBytes(), "password".getBytes(), new Text("kind"),
+ new Text("service")));
+ return lb;
+ }
+
+ private LocatedBlock createLocatedBlockNoStorageMedia() {
+ DatanodeInfo[] dnInfos = {
+ DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
+ AdminStates.DECOMMISSION_INPROGRESS),
+ DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
+ AdminStates.DECOMMISSIONED),
+ DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
+ AdminStates.NORMAL)
+ };
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
lb.setBlockToken(new Token<BlockTokenIdentifier>(
@@ -451,6 +479,14 @@ public class TestPBHelper {
}
@Test
+ public void testConvertLocatedBlockNoStorageMedia() {
+ LocatedBlock lb = createLocatedBlockNoStorageMedia();
+ LocatedBlockProto lbProto = PBHelper.convert(lb);
+ LocatedBlock lb2 = PBHelper.convert(lbProto);
+ compare(lb,lb2);
+ }
+
+ @Test
public void testConvertLocatedBlockList() {
ArrayList<LocatedBlock> lbl = new ArrayList<LocatedBlock>();
for (int i=0;i<3;i++) {
@@ -493,6 +529,16 @@ public class TestPBHelper {
compare(reg, reg2);
assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
}
+
+ @Test
+ public void TestConvertDatanodeStorage() {
+ DatanodeStorage dns1 = new DatanodeStorage(
+ "id1", DatanodeStorage.State.NORMAL, StorageType.SSD);
+
+ DatanodeStorageProto proto = PBHelper.convert(dns1);
+ DatanodeStorage dns2 = PBHelper.convert(proto);
+ compare(dns1, dns2);
+ }
@Test
public void testConvertBlockCommand() {
@@ -502,8 +548,9 @@ public class TestPBHelper {
dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
+ String[][] storageIDs = {{"s00"}, {"s10", "s11"}};
BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1",
- blocks, dnInfos);
+ blocks, dnInfos, storageIDs);
BlockCommandProto bcProto = PBHelper.convert(bc);
BlockCommand bc2 = PBHelper.convert(bcProto);
assertEquals(bc.getAction(), bc2.getAction());
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java Fri Dec 13 17:28:14 2013
@@ -18,7 +18,6 @@
package org.apache.hadoop.hdfs.qjournal;
import static org.junit.Assert.*;
-import static org.junit.Assume.*;
import java.io.File;
import java.io.IOException;
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java Fri Dec 13 17:28:14 2013
@@ -916,7 +916,7 @@ public class TestQuorumJournalManager {
NNStorage.getFinalizedEditsFileName(41, 50));
ArrayList<EditLogInputStream> streams = new ArrayList<EditLogInputStream>();
- qjm.selectInputStreams(streams, 25, false, false);
+ qjm.selectInputStreams(streams, 25, false);
verifyEdits(streams, 25, 50);
}
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Fri Dec 13 17:28:14 2013
@@ -31,6 +31,7 @@ import java.util.concurrent.TimeoutExcep
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -50,6 +51,7 @@ import org.apache.hadoop.hdfs.server.bal
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
+import org.apache.log4j.Level;
import org.junit.Test;
/**
@@ -58,7 +60,10 @@ import org.junit.Test;
public class TestBalancer {
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestBalancer");
-
+ static {
+ ((Log4JLogger)Balancer.LOG).getLogger().setLevel(Level.ALL);
+ }
+
final static long CAPACITY = 500L;
final static String RACK0 = "/rack0";
final static String RACK1 = "/rack1";
@@ -292,6 +297,16 @@ public class TestBalancer {
} while (!balanced);
}
+ String long2String(long[] array) {
+ if (array.length == 0) {
+ return "<empty>";
+ }
+ StringBuilder b = new StringBuilder("[").append(array[0]);
+ for(int i = 1; i < array.length; i++) {
+ b.append(", ").append(array[i]);
+ }
+ return b.append("]").toString();
+ }
/** This test start a cluster with specified number of nodes,
* and fills it to be 30% full (with a single file replicated identically
* to all datanodes);
@@ -308,6 +323,11 @@ public class TestBalancer {
*/
private void doTest(Configuration conf, long[] capacities, String[] racks,
long newCapacity, String newRack, boolean useTool) throws Exception {
+ LOG.info("capacities = " + long2String(capacities));
+ LOG.info("racks = " + Arrays.asList(racks));
+ LOG.info("newCapacity= " + newCapacity);
+ LOG.info("newRack = " + newRack);
+ LOG.info("useTool = " + useTool);
assertEquals(capacities.length, racks.length);
int numOfDatanodes = capacities.length;
cluster = new MiniDFSCluster.Builder(conf)
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java Fri Dec 13 17:28:14 2013
@@ -18,15 +18,18 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
-import java.util.Iterator;
import java.util.Set;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.util.Daemon;
import org.junit.Assert;
@@ -83,9 +86,8 @@ public class BlockManagerTestUtil {
final Set<String> rackSet = new HashSet<String>(0);
final Collection<DatanodeDescriptor> corruptNodes =
getCorruptReplicas(blockManager).getNodes(b);
- for (Iterator<DatanodeDescriptor> it = blockManager.blocksMap.nodeIterator(b);
- it.hasNext();) {
- DatanodeDescriptor cur = it.next();
+ for(DatanodeStorageInfo storage : blockManager.blocksMap.getStorages(b)) {
+ final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) {
if ((corruptNodes == null ) || !corruptNodes.contains(cur)) {
String rackName = cur.getNetworkLocation();
@@ -215,4 +217,52 @@ public class BlockManagerTestUtil {
public static void checkHeartbeat(BlockManager bm) {
bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
+
+ public static DatanodeStorageInfo updateStorage(DatanodeDescriptor dn,
+ DatanodeStorage s) {
+ return dn.updateStorage(s);
+ }
+
+ public static DatanodeDescriptor getLocalDatanodeDescriptor(
+ boolean initializeStorage) {
+ DatanodeDescriptor dn = new DatanodeDescriptor(DFSTestUtil.getLocalDatanodeID());
+ if (initializeStorage) {
+ dn.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
+ }
+ return dn;
+ }
+
+ public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
+ String rackLocation, boolean initializeStorage) {
+ return getDatanodeDescriptor(ipAddr, rackLocation,
+ initializeStorage? new DatanodeStorage(DatanodeStorage.generateUuid()): null);
+ }
+
+ public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
+ String rackLocation, DatanodeStorage storage) {
+ DatanodeDescriptor dn = DFSTestUtil.getDatanodeDescriptor(ipAddr,
+ DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, rackLocation);
+ if (storage != null) {
+ dn.updateStorage(storage);
+ }
+ return dn;
+ }
+
+ public static DatanodeStorageInfo newDatanodeStorageInfo(
+ DatanodeDescriptor dn, DatanodeStorage s) {
+ return new DatanodeStorageInfo(dn, s);
+ }
+
+ public static StorageReport[] getStorageReportsForDatanode(
+ DatanodeDescriptor dnd) {
+ ArrayList<StorageReport> reports = new ArrayList<StorageReport>();
+ for (DatanodeStorageInfo storage : dnd.getStorageInfos()) {
+ StorageReport report = new StorageReport(
+ storage.getStorageID(), false, storage.getCapacity(),
+ storage.getDfsUsed(), storage.getRemaining(),
+ storage.getBlockPoolUsed());
+ reports.add(report);
+ }
+ return reports.toArray(StorageReport.EMPTY_ARRAY);
+ }
}
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java Fri Dec 13 17:28:14 2013
@@ -31,18 +31,19 @@ import org.junit.Test;
public class TestBlockInfoUnderConstruction {
@Test
public void testInitializeBlockRecovery() throws Exception {
- DatanodeDescriptor dd1 = DFSTestUtil.getDatanodeDescriptor("10.10.1.1",
- "default");
- DatanodeDescriptor dd2 = DFSTestUtil.getDatanodeDescriptor("10.10.1.2",
- "default");
- DatanodeDescriptor dd3 = DFSTestUtil.getDatanodeDescriptor("10.10.1.3",
- "default");
+ DatanodeStorageInfo s1 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.1", "s1");
+ DatanodeDescriptor dd1 = s1.getDatanodeDescriptor();
+ DatanodeStorageInfo s2 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.2", "s2");
+ DatanodeDescriptor dd2 = s2.getDatanodeDescriptor();
+ DatanodeStorageInfo s3 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.3", "s3");
+ DatanodeDescriptor dd3 = s3.getDatanodeDescriptor();
+
dd1.isAlive = dd2.isAlive = dd3.isAlive = true;
BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP),
3,
BlockUCState.UNDER_CONSTRUCTION,
- new DatanodeDescriptor[] {dd1, dd2, dd3});
+ new DatanodeStorageInfo[] {s1, s2, s3});
// Recovery attempt #1.
long currentTime = System.currentTimeMillis();
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java Fri Dec 13 17:28:14 2013
@@ -22,9 +22,14 @@ import static org.junit.Assert.assertFal
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Map.Entry;
@@ -39,11 +44,11 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.net.NetworkTopology;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
-import static org.mockito.Mockito.*;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
@@ -51,6 +56,7 @@ import com.google.common.collect.LinkedL
import com.google.common.collect.Lists;
public class TestBlockManager {
+ private DatanodeStorageInfo[] storages;
private List<DatanodeDescriptor> nodes;
private List<DatanodeDescriptor> rackA;
private List<DatanodeDescriptor> rackB;
@@ -79,28 +85,29 @@ public class TestBlockManager {
fsn = Mockito.mock(FSNamesystem.class);
Mockito.doReturn(true).when(fsn).hasWriteLock();
bm = new BlockManager(fsn, fsn, conf);
- nodes = ImmutableList.of(
- DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"),
- DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"),
- DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"),
- DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackB"),
- DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackB"),
- DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackB")
- );
+ final String[] racks = {
+ "/rackA",
+ "/rackA",
+ "/rackA",
+ "/rackB",
+ "/rackB",
+ "/rackB"};
+ storages = DFSTestUtil.createDatanodeStorageInfos(racks);
+ nodes = Arrays.asList(DFSTestUtil.toDatanodeDescriptor(storages));
rackA = nodes.subList(0, 3);
rackB = nodes.subList(3, 6);
}
-
+
private void addNodes(Iterable<DatanodeDescriptor> nodesToAdd) {
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
// construct network topology
for (DatanodeDescriptor dn : nodesToAdd) {
cluster.add(dn);
+ dn.getStorageInfos()[0].setUtilizationForTesting(
+ 2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+ 2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L);
dn.updateHeartbeat(
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
- 0L, 0L,
- 0, 0);
+ BlockManagerTestUtil.getStorageReportsForDatanode(dn), 0L, 0L, 0, 0);
bm.getDatanodeManager().checkIfClusterIsNowMultiRack(dn);
}
}
@@ -125,17 +132,18 @@ public class TestBlockManager {
}
private void doBasicTest(int testIndex) {
- List<DatanodeDescriptor> origNodes = getNodes(0, 1);
- BlockInfo blockInfo = addBlockOnNodes((long)testIndex, origNodes);
+ List<DatanodeStorageInfo> origStorages = getStorages(0, 1);
+ List<DatanodeDescriptor> origNodes = getNodes(origStorages);
+ BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
- DatanodeDescriptor[] pipeline = scheduleSingleReplication(blockInfo);
+ DatanodeStorageInfo[] pipeline = scheduleSingleReplication(blockInfo);
assertEquals(2, pipeline.length);
assertTrue("Source of replication should be one of the nodes the block " +
"was on. Was: " + pipeline[0],
- origNodes.contains(pipeline[0]));
+ origStorages.contains(pipeline[0]));
assertTrue("Destination of replication should be on the other rack. " +
"Was: " + pipeline[1],
- rackB.contains(pipeline[1]));
+ rackB.contains(pipeline[1].getDatanodeDescriptor()));
}
@@ -156,21 +164,22 @@ public class TestBlockManager {
private void doTestTwoOfThreeNodesDecommissioned(int testIndex) throws Exception {
// Block originally on A1, A2, B1
- List<DatanodeDescriptor> origNodes = getNodes(0, 1, 3);
+ List<DatanodeStorageInfo> origStorages = getStorages(0, 1, 3);
+ List<DatanodeDescriptor> origNodes = getNodes(origStorages);
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
// Decommission two of the nodes (A1, A2)
List<DatanodeDescriptor> decomNodes = startDecommission(0, 1);
- DatanodeDescriptor[] pipeline = scheduleSingleReplication(blockInfo);
+ DatanodeStorageInfo[] pipeline = scheduleSingleReplication(blockInfo);
assertTrue("Source of replication should be one of the nodes the block " +
"was on. Was: " + pipeline[0],
- origNodes.contains(pipeline[0]));
+ origStorages.contains(pipeline[0]));
assertEquals("Should have three targets", 3, pipeline.length);
boolean foundOneOnRackA = false;
for (int i = 1; i < pipeline.length; i++) {
- DatanodeDescriptor target = pipeline[i];
+ DatanodeDescriptor target = pipeline[i].getDatanodeDescriptor();
if (rackA.contains(target)) {
foundOneOnRackA = true;
}
@@ -199,22 +208,23 @@ public class TestBlockManager {
private void doTestAllNodesHoldingReplicasDecommissioned(int testIndex) throws Exception {
// Block originally on A1, A2, B1
- List<DatanodeDescriptor> origNodes = getNodes(0, 1, 3);
+ List<DatanodeStorageInfo> origStorages = getStorages(0, 1, 3);
+ List<DatanodeDescriptor> origNodes = getNodes(origStorages);
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
// Decommission all of the nodes
List<DatanodeDescriptor> decomNodes = startDecommission(0, 1, 3);
- DatanodeDescriptor[] pipeline = scheduleSingleReplication(blockInfo);
+ DatanodeStorageInfo[] pipeline = scheduleSingleReplication(blockInfo);
assertTrue("Source of replication should be one of the nodes the block " +
"was on. Was: " + pipeline[0],
- origNodes.contains(pipeline[0]));
+ origStorages.contains(pipeline[0]));
assertEquals("Should have three targets", 4, pipeline.length);
boolean foundOneOnRackA = false;
boolean foundOneOnRackB = false;
for (int i = 1; i < pipeline.length; i++) {
- DatanodeDescriptor target = pipeline[i];
+ DatanodeDescriptor target = pipeline[i].getDatanodeDescriptor();
if (rackA.contains(target)) {
foundOneOnRackA = true;
} else if (rackB.contains(target)) {
@@ -251,21 +261,22 @@ public class TestBlockManager {
private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception {
// Block originally on A1, A2, B1
- List<DatanodeDescriptor> origNodes = getNodes(0, 1, 3);
+ List<DatanodeStorageInfo> origStorages = getStorages(0, 1, 3);
+ List<DatanodeDescriptor> origNodes = getNodes(origStorages);
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
// Decommission all of the nodes in rack A
List<DatanodeDescriptor> decomNodes = startDecommission(0, 1, 2);
- DatanodeDescriptor[] pipeline = scheduleSingleReplication(blockInfo);
+ DatanodeStorageInfo[] pipeline = scheduleSingleReplication(blockInfo);
assertTrue("Source of replication should be one of the nodes the block " +
"was on. Was: " + pipeline[0],
- origNodes.contains(pipeline[0]));
+ origStorages.contains(pipeline[0]));
assertEquals("Should have three targets", 3, pipeline.length);
boolean foundOneOnRackB = false;
for (int i = 1; i < pipeline.length; i++) {
- DatanodeDescriptor target = pipeline[i];
+ DatanodeDescriptor target = pipeline[i].getDatanodeDescriptor();
if (rackB.contains(target)) {
foundOneOnRackB = true;
}
@@ -284,11 +295,12 @@ public class TestBlockManager {
// the third off-rack replica.
DatanodeDescriptor rackCNode =
DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/rackC");
+ rackCNode.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
addNodes(ImmutableList.of(rackCNode));
try {
- DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo);
+ DatanodeStorageInfo[] pipeline2 = scheduleSingleReplication(blockInfo);
assertEquals(2, pipeline2.length);
- assertEquals(rackCNode, pipeline2[1]);
+ assertEquals(rackCNode, pipeline2[1].getDatanodeDescriptor());
} finally {
removeNode(rackCNode);
}
@@ -309,30 +321,30 @@ public class TestBlockManager {
private void doTestSufficientlyReplBlocksUsesNewRack(int testIndex) {
// Originally on only nodes in rack A.
List<DatanodeDescriptor> origNodes = rackA;
- BlockInfo blockInfo = addBlockOnNodes((long)testIndex, origNodes);
- DatanodeDescriptor pipeline[] = scheduleSingleReplication(blockInfo);
+ BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
+ DatanodeStorageInfo pipeline[] = scheduleSingleReplication(blockInfo);
assertEquals(2, pipeline.length); // single new copy
assertTrue("Source of replication should be one of the nodes the block " +
"was on. Was: " + pipeline[0],
- origNodes.contains(pipeline[0]));
+ origNodes.contains(pipeline[0].getDatanodeDescriptor()));
assertTrue("Destination of replication should be on the other rack. " +
"Was: " + pipeline[1],
- rackB.contains(pipeline[1]));
+ rackB.contains(pipeline[1].getDatanodeDescriptor()));
}
@Test
public void testBlocksAreNotUnderreplicatedInSingleRack() throws Exception {
List<DatanodeDescriptor> nodes = ImmutableList.of(
- DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"),
- DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"),
- DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"),
- DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackA"),
- DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackA"),
- DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackA")
+ BlockManagerTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA", true),
+ BlockManagerTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA", true),
+ BlockManagerTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA", true),
+ BlockManagerTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackA", true),
+ BlockManagerTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackA", true),
+ BlockManagerTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackA", true)
);
addNodes(nodes);
- List<DatanodeDescriptor> origNodes = nodes.subList(0, 3);;
+ List<DatanodeDescriptor> origNodes = nodes.subList(0, 3);
for (int i = 0; i < NUM_TEST_ITERS; i++) {
doTestSingleRackClusterIsSufficientlyReplicated(i, origNodes);
}
@@ -342,7 +354,7 @@ public class TestBlockManager {
List<DatanodeDescriptor> origNodes)
throws Exception {
assertEquals(0, bm.numOfUnderReplicatedBlocks());
- addBlockOnNodes((long)testIndex, origNodes);
+ addBlockOnNodes(testIndex, origNodes);
bm.processMisReplicatedBlocks();
assertEquals(0, bm.numOfUnderReplicatedBlocks());
}
@@ -353,9 +365,11 @@ public class TestBlockManager {
* pipeline.
*/
private void fulfillPipeline(BlockInfo blockInfo,
- DatanodeDescriptor[] pipeline) throws IOException {
+ DatanodeStorageInfo[] pipeline) throws IOException {
for (int i = 1; i < pipeline.length; i++) {
- bm.addBlock(pipeline[i], blockInfo, null);
+ DatanodeStorageInfo storage = pipeline[i];
+ bm.addBlock(storage.getDatanodeDescriptor(), storage.getStorageID(), blockInfo, null);
+ blockInfo.addStorage(storage);
}
}
@@ -364,7 +378,9 @@ public class TestBlockManager {
BlockInfo blockInfo = new BlockInfo(block, 3);
for (DatanodeDescriptor dn : nodes) {
- blockInfo.addNode(dn);
+ for (DatanodeStorageInfo storage : dn.getStorageInfos()) {
+ blockInfo.addStorage(storage);
+ }
}
return blockInfo;
}
@@ -376,6 +392,22 @@ public class TestBlockManager {
}
return ret;
}
+
+ private List<DatanodeDescriptor> getNodes(List<DatanodeStorageInfo> storages) {
+ List<DatanodeDescriptor> ret = Lists.newArrayList();
+ for (DatanodeStorageInfo s : storages) {
+ ret.add(s.getDatanodeDescriptor());
+ }
+ return ret;
+ }
+
+ private List<DatanodeStorageInfo> getStorages(int ... indexes) {
+ List<DatanodeStorageInfo> ret = Lists.newArrayList();
+ for (int idx : indexes) {
+ ret.add(storages[idx]);
+ }
+ return ret;
+ }
private List<DatanodeDescriptor> startDecommission(int ... indexes) {
List<DatanodeDescriptor> nodes = getNodes(indexes);
@@ -394,7 +426,7 @@ public class TestBlockManager {
return blockInfo;
}
- private DatanodeDescriptor[] scheduleSingleReplication(Block block) {
+ private DatanodeStorageInfo[] scheduleSingleReplication(Block block) {
// list for priority 1
List<Block> list_p1 = new ArrayList<Block>();
list_p1.add(block);
@@ -412,27 +444,29 @@ public class TestBlockManager {
assertTrue("replication is pending after work is computed",
bm.pendingReplications.getNumReplicas(block) > 0);
- LinkedListMultimap<DatanodeDescriptor, BlockTargetPair> repls = getAllPendingReplications();
+ LinkedListMultimap<DatanodeStorageInfo, BlockTargetPair> repls = getAllPendingReplications();
assertEquals(1, repls.size());
- Entry<DatanodeDescriptor, BlockTargetPair> repl =
+ Entry<DatanodeStorageInfo, BlockTargetPair> repl =
repls.entries().iterator().next();
- DatanodeDescriptor[] targets = repl.getValue().targets;
+ DatanodeStorageInfo[] targets = repl.getValue().targets;
- DatanodeDescriptor[] pipeline = new DatanodeDescriptor[1 + targets.length];
+ DatanodeStorageInfo[] pipeline = new DatanodeStorageInfo[1 + targets.length];
pipeline[0] = repl.getKey();
System.arraycopy(targets, 0, pipeline, 1, targets.length);
return pipeline;
}
- private LinkedListMultimap<DatanodeDescriptor, BlockTargetPair> getAllPendingReplications() {
- LinkedListMultimap<DatanodeDescriptor, BlockTargetPair> repls =
+ private LinkedListMultimap<DatanodeStorageInfo, BlockTargetPair> getAllPendingReplications() {
+ LinkedListMultimap<DatanodeStorageInfo, BlockTargetPair> repls =
LinkedListMultimap.create();
for (DatanodeDescriptor dn : nodes) {
List<BlockTargetPair> thisRepls = dn.getReplicationCommand(10);
if (thisRepls != null) {
- repls.putAll(dn, thisRepls);
+ for(DatanodeStorageInfo storage : dn.getStorageInfos()) {
+ repls.putAll(storage, thisRepls);
+ }
}
}
return repls;
@@ -455,7 +489,7 @@ public class TestBlockManager {
addBlockOnNodes(blockId,origNodes.subList(0,1));
List<DatanodeDescriptor> cntNodes = new LinkedList<DatanodeDescriptor>();
- List<DatanodeDescriptor> liveNodes = new LinkedList<DatanodeDescriptor>();
+ List<DatanodeStorageInfo> liveNodes = new LinkedList<DatanodeStorageInfo>();
assertNotNull("Chooses source node for a highest-priority replication"
+ " even if all available source nodes have reached their replication"
@@ -478,7 +512,7 @@ public class TestBlockManager {
UnderReplicatedBlocks.QUEUE_VERY_UNDER_REPLICATED));
// Increase the replication count to test replication count > hard limit
- DatanodeDescriptor targets[] = { origNodes.get(1) };
+ DatanodeStorageInfo targets[] = { origNodes.get(1).getStorageInfos()[0] };
origNodes.get(0).addBlockToBeReplicated(aBlock, targets);
assertNull("Does not choose a source node for a highest-priority"
@@ -494,7 +528,11 @@ public class TestBlockManager {
@Test
public void testSafeModeIBR() throws Exception {
DatanodeDescriptor node = spy(nodes.get(0));
- node.setStorageID("dummy-storage");
+ DatanodeStorageInfo ds = node.getStorageInfos()[0];
+
+ // TODO: Needs to be fixed. DatanodeUuid is not storageID.
+ node.setDatanodeUuidForTesting(ds.getStorageID());
+
node.isAlive = true;
DatanodeRegistration nodeReg =
@@ -507,35 +545,40 @@ public class TestBlockManager {
bm.getDatanodeManager().registerDatanode(nodeReg);
bm.getDatanodeManager().addDatanode(node); // swap in spy
assertEquals(node, bm.getDatanodeManager().getDatanode(node));
- assertTrue(node.isFirstBlockReport());
+ assertEquals(0, ds.getBlockReportCount());
// send block report, should be processed
reset(node);
- bm.processReport(node, "pool", new BlockListAsLongs(null, null));
- verify(node).receivedBlockReport();
- assertFalse(node.isFirstBlockReport());
+
+ bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool",
+ new BlockListAsLongs(null, null));
+ assertEquals(1, ds.getBlockReportCount());
// send block report again, should NOT be processed
reset(node);
- bm.processReport(node, "pool", new BlockListAsLongs(null, null));
- verify(node, never()).receivedBlockReport();
- assertFalse(node.isFirstBlockReport());
+ bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool",
+ new BlockListAsLongs(null, null));
+ assertEquals(1, ds.getBlockReportCount());
// re-register as if node restarted, should update existing node
bm.getDatanodeManager().removeDatanode(node);
reset(node);
bm.getDatanodeManager().registerDatanode(nodeReg);
verify(node).updateRegInfo(nodeReg);
- assertTrue(node.isFirstBlockReport()); // ready for report again
+ assertEquals(0, ds.getBlockReportCount()); // ready for report again
// send block report, should be processed after restart
reset(node);
- bm.processReport(node, "pool", new BlockListAsLongs(null, null));
- verify(node).receivedBlockReport();
- assertFalse(node.isFirstBlockReport());
+ bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool",
+ new BlockListAsLongs(null, null));
+ assertEquals(1, ds.getBlockReportCount());
}
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
DatanodeDescriptor node = spy(nodes.get(0));
- node.setStorageID("dummy-storage");
+ DatanodeStorageInfo ds = node.getStorageInfos()[0];
+
+ // TODO: Needs to be fixed. DatanodeUuid is not storageID.
+ node.setDatanodeUuidForTesting(ds.getStorageID());
+
node.isAlive = true;
DatanodeRegistration nodeReg =
@@ -548,12 +591,13 @@ public class TestBlockManager {
bm.getDatanodeManager().registerDatanode(nodeReg);
bm.getDatanodeManager().addDatanode(node); // swap in spy
assertEquals(node, bm.getDatanodeManager().getDatanode(node));
- assertTrue(node.isFirstBlockReport());
+ assertEquals(0, ds.getBlockReportCount());
// send block report while pretending to already have blocks
reset(node);
doReturn(1).when(node).numBlocks();
- bm.processReport(node, "pool", new BlockListAsLongs(null, null));
- verify(node).receivedBlockReport();
- assertFalse(node.isFirstBlockReport());
+ bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool",
+ new BlockListAsLongs(null, null));
+ assertEquals(1, ds.getBlockReportCount());
}
}
+
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java Fri Dec 13 17:28:14 2013
@@ -55,21 +55,24 @@ public class TestDatanodeDescriptor {
@Test
public void testBlocksCounter() throws Exception {
- DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
+ DatanodeDescriptor dd = BlockManagerTestUtil.getLocalDatanodeDescriptor(true);
assertEquals(0, dd.numBlocks());
BlockInfo blk = new BlockInfo(new Block(1L), 1);
BlockInfo blk1 = new BlockInfo(new Block(2L), 2);
+ DatanodeStorageInfo[] storages = dd.getStorageInfos();
+ assertTrue(storages.length > 0);
+ final String storageID = storages[0].getStorageID();
// add first block
- assertTrue(dd.addBlock(blk));
+ assertTrue(dd.addBlock(storageID, blk));
assertEquals(1, dd.numBlocks());
// remove a non-existent block
assertFalse(dd.removeBlock(blk1));
assertEquals(1, dd.numBlocks());
// add an existent block
- assertFalse(dd.addBlock(blk));
+ assertFalse(dd.addBlock(storageID, blk));
assertEquals(1, dd.numBlocks());
// add second block
- assertTrue(dd.addBlock(blk1));
+ assertTrue(dd.addBlock(storageID, blk1));
assertEquals(2, dd.numBlocks());
// remove first block
assertTrue(dd.removeBlock(blk));
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java Fri Dec 13 17:28:14 2013
@@ -76,7 +76,7 @@ public class TestDatanodeManager {
it.next();
}
DatanodeRegistration toRemove = it.next().getValue();
- Log.info("Removing node " + toRemove.getStorageID() + " ip " +
+ Log.info("Removing node " + toRemove.getDatanodeUuid() + " ip " +
toRemove.getXferAddr() + " version : " + toRemove.getSoftwareVersion());
//Remove that random node
@@ -90,7 +90,7 @@ public class TestDatanodeManager {
String storageID = "someStorageID" + rng.nextInt(5000);
DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
- Mockito.when(dr.getStorageID()).thenReturn(storageID);
+ Mockito.when(dr.getDatanodeUuid()).thenReturn(storageID);
//If this storageID had already been registered before
if(sIdToDnReg.containsKey(storageID)) {
@@ -110,7 +110,7 @@ public class TestDatanodeManager {
Mockito.when(dr.getSoftwareVersion()).thenReturn(
"version" + rng.nextInt(5));
- Log.info("Registering node storageID: " + dr.getStorageID() +
+ Log.info("Registering node storageID: " + dr.getDatanodeUuid() +
", version: " + dr.getSoftwareVersion() + ", IP address: "
+ dr.getXferAddr());
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java Fri Dec 13 17:28:14 2013
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.bl
import static org.junit.Assert.assertEquals;
import java.util.ArrayList;
-import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -38,6 +37,7 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.junit.Test;
/**
@@ -63,6 +63,8 @@ public class TestHeartbeatHandling {
final DatanodeRegistration nodeReg =
DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
+ final String storageID = DatanodeStorage.generateUuid();
+ dd.updateStorage(new DatanodeStorage(storageID));
final int REMAINING_BLOCKS = 1;
final int MAX_REPLICATE_LIMIT =
@@ -70,7 +72,7 @@ public class TestHeartbeatHandling {
final int MAX_INVALIDATE_LIMIT = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
final int MAX_INVALIDATE_BLOCKS = 2*MAX_INVALIDATE_LIMIT+REMAINING_BLOCKS;
final int MAX_REPLICATE_BLOCKS = 2*MAX_REPLICATE_LIMIT+REMAINING_BLOCKS;
- final DatanodeDescriptor[] ONE_TARGET = new DatanodeDescriptor[1];
+ final DatanodeStorageInfo[] ONE_TARGET = {dd.getStorageInfo(storageID)};
try {
namesystem.writeLock();
@@ -144,12 +146,15 @@ public class TestHeartbeatHandling {
final DatanodeRegistration nodeReg1 =
DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
final DatanodeDescriptor dd1 = NameNodeAdapter.getDatanode(namesystem, nodeReg1);
+ dd1.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
final DatanodeRegistration nodeReg2 =
DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(1), poolId);
final DatanodeDescriptor dd2 = NameNodeAdapter.getDatanode(namesystem, nodeReg2);
+ dd2.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
final DatanodeRegistration nodeReg3 =
DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2), poolId);
final DatanodeDescriptor dd3 = NameNodeAdapter.getDatanode(namesystem, nodeReg3);
+ dd3.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
try {
namesystem.writeLock();
@@ -162,10 +167,13 @@ public class TestHeartbeatHandling {
dd1.setLastUpdate(System.currentTimeMillis());
dd2.setLastUpdate(System.currentTimeMillis());
dd3.setLastUpdate(System.currentTimeMillis());
+ final DatanodeStorageInfo[] storages = {
+ dd1.getStorageInfos()[0],
+ dd2.getStorageInfos()[0],
+ dd3.getStorageInfos()[0]};
BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), 3,
- BlockUCState.UNDER_RECOVERY,
- new DatanodeDescriptor[] {dd1, dd2, dd3});
+ BlockUCState.UNDER_RECOVERY, storages);
dd1.addBlockToBeRecovered(blockInfo);
DatanodeCommand[] cmds =
NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
@@ -187,8 +195,7 @@ public class TestHeartbeatHandling {
dd3.setLastUpdate(System.currentTimeMillis());
blockInfo = new BlockInfoUnderConstruction(
new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), 3,
- BlockUCState.UNDER_RECOVERY,
- new DatanodeDescriptor[] {dd1, dd2, dd3});
+ BlockUCState.UNDER_RECOVERY, storages);
dd1.addBlockToBeRecovered(blockInfo);
cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
assertEquals(1, cmds.length);
@@ -209,8 +216,7 @@ public class TestHeartbeatHandling {
dd3.setLastUpdate(System.currentTimeMillis() - 80 * 1000);
blockInfo = new BlockInfoUnderConstruction(
new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), 3,
- BlockUCState.UNDER_RECOVERY,
- new DatanodeDescriptor[] {dd1, dd2, dd3});
+ BlockUCState.UNDER_RECOVERY, storages);
dd1.addBlockToBeRecovered(blockInfo);
cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
assertEquals(1, cmds.length);
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java Fri Dec 13 17:28:14 2013
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.bl
import static org.junit.Assert.assertTrue;
import java.util.Collection;
-import java.util.Iterator;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.conf.Configuration;
@@ -98,12 +97,10 @@ public class TestNodeCount {
}
// find out a non-excess node
- final Iterator<DatanodeDescriptor> iter = bm.blocksMap
- .nodeIterator(block.getLocalBlock());
DatanodeDescriptor nonExcessDN = null;
- while (iter.hasNext()) {
- DatanodeDescriptor dn = iter.next();
- Collection<Block> blocks = bm.excessReplicateMap.get(dn.getStorageID());
+ for(DatanodeStorageInfo storage : bm.blocksMap.getStorages(block.getLocalBlock())) {
+ final DatanodeDescriptor dn = storage.getDatanodeDescriptor();
+ Collection<Block> blocks = bm.excessReplicateMap.get(dn.getDatanodeUuid());
if (blocks == null || !blocks.contains(block.getLocalBlock()) ) {
nonExcessDN = dn;
break;
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java Fri Dec 13 17:28:14 2013
@@ -103,7 +103,10 @@ public class TestOverReplicatedBlocks {
String corruptMachineName = corruptDataNode.getXferAddr();
for (DatanodeDescriptor datanode : hm.getDatanodes()) {
if (!corruptMachineName.equals(datanode.getXferAddr())) {
- datanode.updateHeartbeat(100L, 100L, 0L, 100L, 0L, 0L, 0, 0);
+ datanode.getStorageInfos()[0].setUtilizationForTesting(100L, 100L, 0, 100L);
+ datanode.updateHeartbeat(
+ BlockManagerTestUtil.getStorageReportsForDatanode(datanode),
+ 0L, 0L, 0, 0);
}
}
@@ -155,7 +158,7 @@ public class TestOverReplicatedBlocks {
DataNode lastDN = cluster.getDataNodes().get(3);
DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
lastDN, namesystem.getBlockPoolId());
- String lastDNid = dnReg.getStorageID();
+ String lastDNid = dnReg.getDatanodeUuid();
final Path fileName = new Path("/foo2");
DFSTestUtil.createFile(fs, fileName, SMALL_FILE_LENGTH, (short)4, 0L);
@@ -220,3 +223,4 @@ public class TestOverReplicatedBlocks {
}
}
}
+
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java Fri Dec 13 17:28:14 2013
@@ -43,8 +43,8 @@ public class TestPendingDataNodeMessages
@Test
public void testQueues() {
DatanodeDescriptor fakeDN = DFSTestUtil.getLocalDatanodeDescriptor();
- msgs.enqueueReportedBlock(fakeDN, block1Gs1, ReplicaState.FINALIZED);
- msgs.enqueueReportedBlock(fakeDN, block1Gs2, ReplicaState.FINALIZED);
+ msgs.enqueueReportedBlock(fakeDN, "STORAGE_ID", block1Gs1, ReplicaState.FINALIZED);
+ msgs.enqueueReportedBlock(fakeDN, "STORAGE_ID", block1Gs2, ReplicaState.FINALIZED);
assertEquals(2, msgs.count());
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java?rev=1550774&r1=1550773&r2=1550774&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java Fri Dec 13 17:28:14 2013
@@ -43,8 +43,6 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.junit.Test;
-import com.google.common.base.Preconditions;
-
/**
* This class tests the internals of PendingReplicationBlocks.java,
* as well as how PendingReplicationBlocks acts in BlockManager
@@ -54,22 +52,7 @@ public class TestPendingReplication {
private static final int DFS_REPLICATION_INTERVAL = 1;
// Number of datanodes in the cluster
private static final int DATANODE_COUNT = 5;
-
- private DatanodeDescriptor genDatanodeId(int seed) {
- seed = seed % 256;
- String ip = seed + "." + seed + "." + seed + "." + seed;
- return DFSTestUtil.getDatanodeDescriptor(ip, null);
- }
- private DatanodeDescriptor[] genDatanodes(int number) {
- Preconditions.checkArgument(number >= 0);
- DatanodeDescriptor[] nodes = new DatanodeDescriptor[number];
- for (int i = 0; i < number; i++) {
- nodes[i] = genDatanodeId(i);
- }
- return nodes;
- }
-
@Test
public void testPendingReplication() {
PendingReplicationBlocks pendingReplications;
@@ -79,9 +62,13 @@ public class TestPendingReplication {
//
// Add 10 blocks to pendingReplications.
//
- for (int i = 0; i < 10; i++) {
+ DatanodeStorageInfo[] storages = DFSTestUtil.createDatanodeStorageInfos(10);
+ for (int i = 0; i < storages.length; i++) {
Block block = new Block(i, i, 0);
- pendingReplications.increment(block, genDatanodes(i));
+ DatanodeStorageInfo[] targets = new DatanodeStorageInfo[i];
+ System.arraycopy(storages, 0, targets, 0, i);
+ pendingReplications.increment(block,
+ DatanodeStorageInfo.toDatanodeDescriptors(targets));
}
assertEquals("Size of pendingReplications ",
10, pendingReplications.size());
@@ -91,16 +78,18 @@ public class TestPendingReplication {
// remove one item and reinsert it
//
Block blk = new Block(8, 8, 0);
- pendingReplications.decrement(blk, genDatanodeId(7)); // removes one replica
+ pendingReplications.decrement(blk, storages[7].getDatanodeDescriptor()); // removes one replica
assertEquals("pendingReplications.getNumReplicas ",
7, pendingReplications.getNumReplicas(blk));
for (int i = 0; i < 7; i++) {
// removes all replicas
- pendingReplications.decrement(blk, genDatanodeId(i));
+ pendingReplications.decrement(blk, storages[i].getDatanodeDescriptor());
}
assertTrue(pendingReplications.size() == 9);
- pendingReplications.increment(blk, genDatanodes(8));
+ pendingReplications.increment(blk,
+ DatanodeStorageInfo.toDatanodeDescriptors(
+ DFSTestUtil.createDatanodeStorageInfos(8)));
assertTrue(pendingReplications.size() == 10);
//
@@ -128,7 +117,9 @@ public class TestPendingReplication {
for (int i = 10; i < 15; i++) {
Block block = new Block(i, i, 0);
- pendingReplications.increment(block, genDatanodes(i));
+ pendingReplications.increment(block,
+ DatanodeStorageInfo.toDatanodeDescriptors(
+ DFSTestUtil.createDatanodeStorageInfos(i)));
}
assertTrue(pendingReplications.size() == 15);
@@ -210,7 +201,7 @@ public class TestPendingReplication {
DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(
poolId);
StorageReceivedDeletedBlocks[] report = {
- new StorageReceivedDeletedBlocks(dnR.getStorageID(),
+ new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored",
new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(
blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
@@ -227,7 +218,7 @@ public class TestPendingReplication {
DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(
poolId);
StorageReceivedDeletedBlocks[] report =
- { new StorageReceivedDeletedBlocks(dnR.getStorageID(),
+ { new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored",
new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(
blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
@@ -291,9 +282,9 @@ public class TestPendingReplication {
cluster.getNamesystem().writeLock();
try {
bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
- "TEST");
+ "STORAGE_ID", "TEST");
bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[1],
- "TEST");
+ "STORAGE_ID", "TEST");
} finally {
cluster.getNamesystem().writeUnlock();
}