You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2011/08/08 12:06:47 UTC
svn commit: r1154899 [2/2] - in /hadoop/common/trunk/hdfs: ./
src/java/org/apache/hadoop/hdfs/server/blockmanagement/
src/java/org/apache/hadoop/hdfs/server/common/
src/java/org/apache/hadoop/hdfs/server/namenode/
src/test/hdfs/org/apache/hadoop/hdfs/ ...
Modified: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1154899&r1=1154898&r2=1154899&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Mon Aug 8 10:06:45 2011
@@ -42,13 +42,14 @@ import org.apache.hadoop.hdfs.protocol.B
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@@ -229,14 +230,10 @@ class NamenodeJspHelper {
void generateHealthReport(JspWriter out, NameNode nn,
HttpServletRequest request) throws IOException {
FSNamesystem fsn = nn.getNamesystem();
- ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
- ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
- fsn.DFSNodesStatus(live, dead);
- // If a data node has been first included in the include list,
- // then decommissioned, then removed from both include and exclude list.
- // We make the web console to "forget" this node by not displaying it.
- fsn.removeDecomNodeFromList(live);
- fsn.removeDecomNodeFromList(dead);
+ final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
+ final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+ final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+ dm.fetchDatanodes(live, dead, true);
int liveDecommissioned = 0;
for (DatanodeDescriptor d : live) {
@@ -248,8 +245,7 @@ class NamenodeJspHelper {
deadDecommissioned += d.isDecommissioned() ? 1 : 0;
}
- ArrayList<DatanodeDescriptor> decommissioning = fsn
- .getDecommissioningNodes();
+ final List<DatanodeDescriptor> decommissioning = dm.getDecommissioningNodes();
sorterField = request.getParameter("sorter/field");
sorterOrder = request.getParameter("sorter/order");
@@ -370,15 +366,10 @@ class NamenodeJspHelper {
return token == null ? null : token.encodeToUrlString();
}
- /** @return the network topology. */
- static NetworkTopology getNetworkTopology(final NameNode namenode) {
- return namenode.getNamesystem().getBlockManager().getDatanodeManager(
- ).getNetworkTopology();
- }
-
/** @return a randomly chosen datanode. */
static DatanodeDescriptor getRandomDatanode(final NameNode namenode) {
- return (DatanodeDescriptor)getNetworkTopology(namenode).chooseRandom(
+ return (DatanodeDescriptor)namenode.getNamesystem().getBlockManager(
+ ).getDatanodeManager().getNetworkTopology().chooseRandom(
NodeBase.ROOT);
}
@@ -564,12 +555,14 @@ class NamenodeJspHelper {
void generateNodesList(ServletContext context, JspWriter out,
HttpServletRequest request) throws IOException {
- ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
- ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
- nn.getNamesystem().DFSNodesStatus(live, dead);
- nn.getNamesystem().removeDecomNodeFromList(live);
- nn.getNamesystem().removeDecomNodeFromList(dead);
+ final FSNamesystem ns = nn.getNamesystem();
+ final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
+
+ final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+ final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+ dm.fetchDatanodes(live, dead, true);
+
InetSocketAddress nnSocketAddress = (InetSocketAddress) context
.getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY);
String nnaddr = nnSocketAddress.getAddress().getHostAddress() + ":"
@@ -678,8 +671,7 @@ class NamenodeJspHelper {
}
} else if (whatNodes.equals("DECOMMISSIONING")) {
// Decommissioning Nodes
- ArrayList<DatanodeDescriptor> decommissioning = nn.getNamesystem()
- .getDecommissioningNodes();
+ final List<DatanodeDescriptor> decommissioning = dm.getDecommissioningNodes();
out.print("<br> <a name=\"DecommissioningNodes\" id=\"title\"> "
+ " Decommissioning Datanodes : " + decommissioning.size()
+ "</a><br><br>\n");
@@ -715,16 +707,17 @@ class NamenodeJspHelper {
static class XMLBlockInfo {
final Block block;
final INodeFile inode;
- final FSNamesystem fsn;
+ final BlockManager blockManager;
- public XMLBlockInfo(FSNamesystem fsn, Long blockId) {
- this.fsn = fsn;
+ XMLBlockInfo(FSNamesystem fsn, Long blockId) {
+ this.blockManager = fsn.getBlockManager();
+
if (blockId == null) {
this.block = null;
this.inode = null;
} else {
this.block = new Block(blockId);
- this.inode = fsn.getBlockManager().getINode(block);
+ this.inode = blockManager.getINode(block);
}
}
@@ -798,31 +791,25 @@ class NamenodeJspHelper {
}
doc.startTag("replicas");
-
- if (fsn.getBlockManager().blocksMap.contains(block)) {
- Iterator<DatanodeDescriptor> it =
- fsn.getBlockManager().blocksMap.nodeIterator(block);
-
- while (it.hasNext()) {
- doc.startTag("replica");
-
- DatanodeDescriptor dd = it.next();
-
- doc.startTag("host_name");
- doc.pcdata(dd.getHostName());
- doc.endTag();
-
- boolean isCorrupt = fsn.getCorruptReplicaBlockIds(0,
- block.getBlockId()) != null;
-
- doc.startTag("is_corrupt");
- doc.pcdata(""+isCorrupt);
- doc.endTag();
-
- doc.endTag(); // </replica>
- }
+ for(final Iterator<DatanodeDescriptor> it = blockManager.datanodeIterator(block);
+ it.hasNext(); ) {
+ doc.startTag("replica");
- }
+ DatanodeDescriptor dd = it.next();
+
+ doc.startTag("host_name");
+ doc.pcdata(dd.getHostName());
+ doc.endTag();
+
+ boolean isCorrupt = blockManager.getCorruptReplicaBlockIds(0,
+ block.getBlockId()) != null;
+
+ doc.startTag("is_corrupt");
+ doc.pcdata(""+isCorrupt);
+ doc.endTag();
+
+ doc.endTag(); // </replica>
+ }
doc.endTag(); // </replicas>
}
@@ -834,14 +821,14 @@ class NamenodeJspHelper {
// utility class used in corrupt_replicas_xml.jsp
static class XMLCorruptBlockInfo {
- final FSNamesystem fsn;
final Configuration conf;
final Long startingBlockId;
final int numCorruptBlocks;
+ final BlockManager blockManager;
- public XMLCorruptBlockInfo(FSNamesystem fsn, Configuration conf,
+ XMLCorruptBlockInfo(FSNamesystem fsn, Configuration conf,
int numCorruptBlocks, Long startingBlockId) {
- this.fsn = fsn;
+ this.blockManager = fsn.getBlockManager();
this.conf = conf;
this.numCorruptBlocks = numCorruptBlocks;
this.startingBlockId = startingBlockId;
@@ -864,17 +851,16 @@ class NamenodeJspHelper {
doc.endTag();
doc.startTag("num_missing_blocks");
- doc.pcdata(""+fsn.getMissingBlocksCount());
+ doc.pcdata(""+blockManager.getMissingBlocksCount());
doc.endTag();
doc.startTag("num_corrupt_replica_blocks");
- doc.pcdata(""+fsn.getCorruptReplicaBlocks());
+ doc.pcdata(""+blockManager.getCorruptReplicaBlocksCount());
doc.endTag();
doc.startTag("corrupt_replica_block_ids");
- long[] corruptBlockIds
- = fsn.getCorruptReplicaBlockIds(numCorruptBlocks,
- startingBlockId);
+ final long[] corruptBlockIds = blockManager.getCorruptReplicaBlockIds(
+ numCorruptBlocks, startingBlockId);
if (corruptBlockIds != null) {
for (Long blockId: corruptBlockIds) {
doc.startTag("block_id");
Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1154899&r1=1154898&r2=1154899&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java Mon Aug 8 10:06:45 2011
@@ -61,6 +61,7 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
@@ -375,10 +376,9 @@ public class DFSTestUtil {
/*
* Return the total capacity of all live DNs.
*/
- public static long getLiveDatanodeCapacity(FSNamesystem ns) {
- ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
- ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
- ns.DFSNodesStatus(live, dead);
+ public static long getLiveDatanodeCapacity(DatanodeManager dm) {
+ final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+ dm.fetchDatanodes(live, null, false);
long capacity = 0;
for (final DatanodeDescriptor dn : live) {
capacity += dn.getCapacity();
@@ -389,21 +389,20 @@ public class DFSTestUtil {
/*
* Return the capacity of the given live DN.
*/
- public static long getDatanodeCapacity(FSNamesystem ns, int index) {
- ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
- ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
- ns.DFSNodesStatus(live, dead);
+ public static long getDatanodeCapacity(DatanodeManager dm, int index) {
+ final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+ dm.fetchDatanodes(live, null, false);
return live.get(index).getCapacity();
}
/*
* Wait for the given # live/dead DNs, total capacity, and # vol failures.
*/
- public static void waitForDatanodeStatus(FSNamesystem ns, int expectedLive,
+ public static void waitForDatanodeStatus(DatanodeManager dm, int expectedLive,
int expectedDead, long expectedVolFails, long expectedTotalCapacity,
long timeout) throws InterruptedException, TimeoutException {
- ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
- ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+ final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+ final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
final int ATTEMPTS = 10;
int count = 0;
long currTotalCapacity = 0;
@@ -413,7 +412,7 @@ public class DFSTestUtil {
Thread.sleep(timeout);
live.clear();
dead.clear();
- ns.DFSNodesStatus(live, dead);
+ dm.fetchDatanodes(live, dead, false);
currTotalCapacity = 0;
volFails = 0;
for (final DatanodeDescriptor dd : live) {
Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java?rev=1154899&r1=1154898&r2=1154899&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java Mon Aug 8 10:06:45 2011
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.FSDataOutput
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
/**
* This class tests DatanodeDescriptor.getBlocksScheduled() at the
@@ -50,7 +51,9 @@ public class TestBlocksScheduledCounter
((DFSOutputStream)(out.getWrappedStream())).hflush();
ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
- cluster.getNamesystem().DFSNodesStatus(dnList, dnList);
+ final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
+ ).getDatanodeManager();
+ dm.fetchDatanodes(dnList, dnList, false);
DatanodeDescriptor dn = dnList.get(0);
assertEquals(1, dn.getBlocksScheduled());
Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=1154899&r1=1154898&r2=1154899&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java Mon Aug 8 10:06:45 2011
@@ -146,8 +146,8 @@ public class TestFileCorruption extends
// report corrupted block by the third datanode
DatanodeRegistration dnR =
DataNodeTestUtils.getDNRegistrationForBP(dataNode, blk.getBlockPoolId());
- cluster.getNamesystem().markBlockAsCorrupt(blk,
- new DatanodeInfo(dnR));
+ cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(
+ blk, new DatanodeInfo(dnR));
// open the file
fs.open(FILE_PATH);
Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java?rev=1154899&r1=1154898&r2=1154899&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java Mon Aug 8 10:06:45 2011
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.protocol.D
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.log4j.Level;
import org.junit.Test;
@@ -133,7 +134,7 @@ public class TestBlocksWithNotEnoughRack
DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);
REPLICATION_FACTOR = 2;
- ns.setReplication("/testFile", REPLICATION_FACTOR);
+ NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
cluster.shutdown();
@@ -172,7 +173,7 @@ public class TestBlocksWithNotEnoughRack
String newRacks[] = {"/rack2", "/rack2"};
cluster.startDataNodes(conf, 2, true, null, newRacks);
REPLICATION_FACTOR = 5;
- ns.setReplication("/testFile", REPLICATION_FACTOR);
+ NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
@@ -258,7 +259,7 @@ public class TestBlocksWithNotEnoughRack
// was not the one that lived on the rack with only one replica,
// ie we should still have 2 racks after reducing the repl factor.
REPLICATION_FACTOR = 2;
- ns.setReplication("/testFile", REPLICATION_FACTOR);
+ NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java?rev=1154899&r1=1154898&r2=1154899&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java Mon Aug 8 10:06:45 2011
@@ -33,10 +33,9 @@ import org.apache.hadoop.hdfs.MiniDFSClu
import org.apache.hadoop.hdfs.TestDatanodeBlockScanner;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
public class TestOverReplicatedBlocks extends TestCase {
/** Test processOverReplicatedBlock can handle corrupt replicas fine.
@@ -100,7 +99,7 @@ public class TestOverReplicatedBlocks ex
}
// decrease the replication factor to 1;
- namesystem.setReplication(fileName.toString(), (short)1);
+ NameNodeAdapter.setReplication(namesystem, fileName.toString(), (short)1);
// corrupt one won't be chosen to be excess one
// without 4910 the number of live replicas would be 0: block gets lost
Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java?rev=1154899&r1=1154898&r2=1154899&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java Mon Aug 8 10:06:45 2011
@@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.DFSTestUti
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
@@ -113,10 +113,11 @@ public class TestDataNodeVolumeFailureRe
* heartbeat their capacities.
*/
Thread.sleep(WAIT_FOR_HEARTBEATS);
- FSNamesystem ns = cluster.getNamesystem();
+ final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
+ ).getDatanodeManager();
- long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(ns);
- long dnCapacity = DFSTestUtil.getDatanodeCapacity(ns, 0);
+ final long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
+ long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
@@ -160,7 +161,7 @@ public class TestDataNodeVolumeFailureRe
assert (WAIT_FOR_HEARTBEATS * 10) > WAIT_FOR_DEATH;
// Eventually the NN should report two volume failures
- DFSTestUtil.waitForDatanodeStatus(ns, 3, 0, 2,
+ DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 2,
origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
/*
@@ -177,10 +178,10 @@ public class TestDataNodeVolumeFailureRe
ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
- ns.DFSNodesStatus(live, dead);
+ dm.fetchDatanodes(live, dead, false);
live.clear();
dead.clear();
- ns.DFSNodesStatus(live, dead);
+ dm.fetchDatanodes(live, dead, false);
assertEquals("DN3 should have 1 failed volume",
1, live.get(2).getVolumeFailures());
@@ -189,8 +190,8 @@ public class TestDataNodeVolumeFailureRe
* total capacity should be down by three volumes (assuming the host
* did not grow or shrink the data volume while the test was running).
*/
- dnCapacity = DFSTestUtil.getDatanodeCapacity(ns, 0);
- DFSTestUtil.waitForDatanodeStatus(ns, 3, 0, 3,
+ dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
+ DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 3,
origCapacity - (3*dnCapacity), WAIT_FOR_HEARTBEATS);
/*
@@ -212,7 +213,7 @@ public class TestDataNodeVolumeFailureRe
getMetrics(dns.get(2).getMetrics().name()));
// The NN considers the DN dead
- DFSTestUtil.waitForDatanodeStatus(ns, 2, 1, 2,
+ DFSTestUtil.waitForDatanodeStatus(dm, 2, 1, 2,
origCapacity - (4*dnCapacity), WAIT_FOR_HEARTBEATS);
/*
@@ -236,7 +237,7 @@ public class TestDataNodeVolumeFailureRe
* and that the volume failure count should be reported as zero by
* both the metrics and the NN.
*/
- DFSTestUtil.waitForDatanodeStatus(ns, 3, 0, 0, origCapacity,
+ DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 0, origCapacity,
WAIT_FOR_HEARTBEATS);
}
@@ -251,9 +252,10 @@ public class TestDataNodeVolumeFailureRe
cluster.startDataNodes(conf, 2, true, null, null);
cluster.waitActive();
- FSNamesystem ns = cluster.getNamesystem();
- long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(ns);
- long dnCapacity = DFSTestUtil.getDatanodeCapacity(ns, 0);
+ final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
+ ).getDatanodeManager();
+ long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
+ long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
// Fail the first volume on both datanodes (we have to keep the
// third healthy so one node in the pipeline will not fail).
@@ -267,13 +269,13 @@ public class TestDataNodeVolumeFailureRe
DFSTestUtil.waitReplication(fs, file1, (short)2);
// The NN reports two volumes failures
- DFSTestUtil.waitForDatanodeStatus(ns, 3, 0, 2,
+ DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 2,
origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
// After restarting the NN it still see the two failures
cluster.restartNameNode(0);
cluster.waitActive();
- DFSTestUtil.waitForDatanodeStatus(ns, 3, 0, 2,
+ DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 2,
origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
}
}
Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java?rev=1154899&r1=1154898&r2=1154899&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java Mon Aug 8 10:06:45 2011
@@ -17,29 +17,30 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeTrue;
+
import java.io.File;
import java.io.IOException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.log4j.Level;
-
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
-import static org.junit.Assert.*;
-import static org.junit.Assume.assumeTrue;
/**
* Test the ability of a DN to tolerate volume failures.
@@ -154,9 +155,10 @@ public class TestDataNodeVolumeFailureTo
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 0);
cluster.startDataNodes(conf, 2, true, null, null);
cluster.waitActive();
- FSNamesystem ns = cluster.getNamesystem();
- long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(ns);
- long dnCapacity = DFSTestUtil.getDatanodeCapacity(ns, 0);
+ final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
+ ).getDatanodeManager();
+ long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
+ long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
// Fail a volume on the 2nd DN
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
@@ -168,7 +170,7 @@ public class TestDataNodeVolumeFailureTo
DFSTestUtil.waitReplication(fs, file1, (short)2);
// Check that this single failure caused a DN to die.
- DFSTestUtil.waitForDatanodeStatus(ns, 2, 1, 0,
+ DFSTestUtil.waitForDatanodeStatus(dm, 2, 1, 0,
origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
// If we restore the volume we should still only be able to get
Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1154899&r1=1154898&r2=1154899&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Mon Aug 8 10:06:45 2011
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.na
import java.io.IOException;
-import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@@ -52,6 +51,11 @@ public class NameNodeAdapter {
public static Server getRpcServer(NameNode namenode) {
return namenode.server;
}
+
+ public static boolean setReplication(final FSNamesystem ns,
+ final String src, final short replication) throws IOException {
+ return ns.setReplication(src, replication);
+ }
public static String getLeaseHolderForPath(NameNode namenode, String path) {
return namenode.getNamesystem().leaseManager.getLeaseByPath(path).getHolder();
Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java?rev=1154899&r1=1154898&r2=1154899&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java Mon Aug 8 10:06:45 2011
@@ -24,6 +24,7 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Iterator;
+import java.util.List;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
@@ -37,6 +38,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -199,13 +201,13 @@ public class TestDecommissioningStatus {
Thread.sleep(5000);
FSNamesystem fsn = cluster.getNamesystem();
+ final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
for (int iteration = 0; iteration < numDatanodes; iteration++) {
String downnode = decommissionNode(fsn, conf, client, localFileSys,
iteration);
decommissionedNodes.add(downnode);
Thread.sleep(5000);
- ArrayList<DatanodeDescriptor> decommissioningNodes = fsn
- .getDecommissioningNodes();
+ final List<DatanodeDescriptor> decommissioningNodes = dm.getDecommissioningNodes();
if (iteration == 0) {
assertEquals(decommissioningNodes.size(), 1);
DatanodeDescriptor decommNode = decommissioningNodes.get(0);
Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java?rev=1154899&r1=1154898&r2=1154899&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java Mon Aug 8 10:06:45 2011
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
import java.io.File;
import java.util.ArrayList;
+import java.util.List;
import junit.framework.TestCase;
@@ -32,6 +33,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
@@ -59,11 +61,13 @@ public class TestNamenodeCapacityReport
cluster.waitActive();
final FSNamesystem namesystem = cluster.getNamesystem();
+ final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
+ ).getDatanodeManager();
// Ensure the data reported for each data node is right
- ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
- ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
- namesystem.DFSNodesStatus(live, dead);
+ final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+ final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+ dm.fetchDatanodes(live, dead, false);
assertTrue(live.size() == 1);
Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1154899&r1=1154898&r2=1154899&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Mon Aug 8 10:06:45 2011
@@ -75,6 +75,7 @@ public class TestNameNodeMetrics extends
private DistributedFileSystem fs;
private Random rand = new Random();
private FSNamesystem namesystem;
+ private BlockManager bm;
private static Path getTestPath(String fileName) {
return new Path(TEST_ROOT_DIR_PATH, fileName);
@@ -85,6 +86,7 @@ public class TestNameNodeMetrics extends
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(DATANODE_COUNT).build();
cluster.waitActive();
namesystem = cluster.getNamesystem();
+ bm = namesystem.getBlockManager();
fs = (DistributedFileSystem) cluster.getFileSystem();
}
@@ -167,7 +169,7 @@ public class TestNameNodeMetrics extends
// Corrupt first replica of the block
LocatedBlock block = NameNodeAdapter.getBlockLocations(
cluster.getNameNode(), file.toString(), 0, 1).get(0);
- namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
+ bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
updateMetrics();
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("CorruptBlocks", 1L, rb);
@@ -188,7 +190,7 @@ public class TestNameNodeMetrics extends
Path file = getTestPath("testExcessBlocks");
createFile(file, 100, (short)2);
long totalBlocks = 1;
- namesystem.setReplication(file.toString(), (short)1);
+ NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1);
updateMetrics();
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("ExcessBlocks", totalBlocks, rb);
@@ -204,7 +206,7 @@ public class TestNameNodeMetrics extends
// Corrupt the only replica of the block to result in a missing block
LocatedBlock block = NameNodeAdapter.getBlockLocations(
cluster.getNameNode(), file.toString(), 0, 1).get(0);
- namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
+ bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
updateMetrics();
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("UnderReplicatedBlocks", 1L, rb);
Modified: hadoop/common/trunk/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java?rev=1154899&r1=1154898&r2=1154899&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java (original)
+++ hadoop/common/trunk/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java Mon Aug 8 10:06:45 2011
@@ -455,7 +455,7 @@ public class TestNNLeaseRecovery {
fsn.leaseManager.addLease("mock-lease", file.toString());
if (setStoredBlock) {
when(b1.getINode()).thenReturn(iNFmock);
- fsn.getBlockManager().blocksMap.addINode(b1, iNFmock);
+ fsn.getBlockManager().addINode(b1, iNFmock);
}
when(fsDir.getFileINode(anyString())).thenReturn(iNFmock);