You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by at...@apache.org on 2011/12/13 20:02:43 UTC
svn commit: r1213867 [2/2] - in
/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/ src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apache/hadoop/hdfs/protocol/
src/main/java/org/apache/hadoop/hdfs/protocolPB/ s...
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java Tue Dec 13 19:02:37 2011
@@ -113,7 +113,7 @@ implements Writable, NodeRegistration {
/////////////////////////////////////////////////
// Writable
/////////////////////////////////////////////////
- /** {@inheritDoc} */
+ @Override
public void write(DataOutput out) throws IOException {
super.write(out);
@@ -124,7 +124,7 @@ implements Writable, NodeRegistration {
exportedKeys.write(out);
}
- /** {@inheritDoc} */
+ @Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeRegistrationWritable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeRegistrationWritable.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeRegistrationWritable.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeRegistrationWritable.java Tue Dec 13 19:02:37 2011
@@ -74,7 +74,7 @@ public class DatanodeRegistrationWritabl
/////////////////////////////////////////////////
// Writable
/////////////////////////////////////////////////
- /** {@inheritDoc} */
+ @Override
public void write(DataOutput out) throws IOException {
datanodeId.write(out);
@@ -85,7 +85,7 @@ public class DatanodeRegistrationWritabl
exportedKeys.write(out);
}
- /** {@inheritDoc} */
+ @Override
public void readFields(DataInput in) throws IOException {
datanodeId.readFields(in);
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 13 19:02:37 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1213389
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1213862
/hadoop/core/branches/branch-0.19/hdfs/src/main/native:713112
/hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
/hadoop/core/trunk/src/c++/libhdfs:776175-784663
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Tue Dec 13 19:02:37 2011
@@ -39,7 +39,7 @@ message GetBlockLocationsRequestProto {
}
message GetBlockLocationsResponseProto {
- required LocatedBlocksProto locations = 1;
+ optional LocatedBlocksProto locations = 1;
}
message GetServerDefaultsRequestProto { // No parameters
@@ -115,7 +115,7 @@ message AbandonBlockResponseProto { // v
message AddBlockRequestProto {
required string src = 1;
required string clientName = 2;
- required ExtendedBlockProto previous = 3;
+ optional ExtendedBlockProto previous = 3;
repeated DatanodeInfoProto excludeNodes = 4;
}
@@ -306,7 +306,7 @@ message DistributedUpgradeProgressReques
required UpgradeActionProto action = 1;
}
message DistributedUpgradeProgressResponseProto {
- required UpgradeStatusReportProto report = 1;
+ optional UpgradeStatusReportProto report = 1;
}
message ListCorruptFileBlocksRequestProto {
@@ -330,7 +330,7 @@ message GetFileInfoRequestProto {
}
message GetFileInfoResponseProto {
- required HdfsFileStatusProto fs = 1;
+ optional HdfsFileStatusProto fs = 1;
}
message GetFileLinkInfoRequestProto {
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto Tue Dec 13 19:02:37 2011
@@ -184,7 +184,7 @@ message NNHAStatusHeartbeatProto {
* haStatus - Status (from an HA perspective) of the NN sending this response
*/
message HeartbeatResponseProto {
- repeated DatanodeCommandProto cmds = 1;
+ repeated DatanodeCommandProto cmds = 1; // Returned commands can be null
required NNHAStatusHeartbeatProto haStatus = 2;
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Tue Dec 13 19:02:37 2011
@@ -30,7 +30,8 @@ message ExtendedBlockProto {
required string poolId = 1; // Block pool id - gloablly unique across clusters
required uint64 blockId = 2; // the local id within a pool
required uint64 generationStamp = 3;
- optional uint64 numBytes = 4; // block len does not belong in ebid - here for historical reasons
+ optional uint64 numBytes = 4 [default = 0]; // len does not belong in ebid
+ // here for historical reasons
}
/**
@@ -65,12 +66,12 @@ message DatanodeInfosProto {
*/
message DatanodeInfoProto {
required DatanodeIDProto id = 1;
- optional uint64 capacity = 2;
- optional uint64 dfsUsed = 3;
- optional uint64 remaining = 4;
- optional uint64 blockPoolUsed = 5;
- optional uint64 lastUpdate = 6;
- optional uint32 xceiverCount = 7;
+ optional uint64 capacity = 2 [default = 0];
+ optional uint64 dfsUsed = 3 [default = 0];
+ optional uint64 remaining = 4 [default = 0];
+ optional uint64 blockPoolUsed = 5 [default = 0];
+ optional uint64 lastUpdate = 6 [default = 0];
+ optional uint32 xceiverCount = 7 [default = 0];
optional string location = 8;
optional string hostName = 9;
enum AdminState {
@@ -79,7 +80,7 @@ message DatanodeInfoProto {
DECOMMISSIONED = 2;
}
- optional AdminState adminState = 10;
+ optional AdminState adminState = 10 [default = NORMAL];
}
/**
@@ -162,8 +163,8 @@ message HdfsFileStatusProto {
optional bytes symlink = 9; // if symlink, target encoded java UTF8
// Optional fields for file
- optional uint32 block_replication = 10; // Actually a short - only 16bits used
- optional uint64 blocksize = 11;
+ optional uint32 block_replication = 10 [default = 0]; // only 16bits used
+ optional uint64 blocksize = 11 [default = 0];
optional LocatedBlocksProto locations = 12; // suppled only if asked by client
}
@@ -218,7 +219,7 @@ message NamenodeRegistrationProto {
CHECKPOINT = 3;
}
required StorageInfoProto storageInfo = 3; // Node information
- optional NamenodeRoleProto role = 4; // Namenode role
+ optional NamenodeRoleProto role = 4 [default = NAMENODE]; // Namenode role
}
/**
@@ -264,7 +265,7 @@ message CheckpointCommandProto {
message BlockProto {
required uint64 blockId = 1;
required uint64 genStamp = 2;
- optional uint64 numBytes = 3;
+ optional uint64 numBytes = 3 [default = 0];
}
/**
@@ -313,7 +314,7 @@ message NamespaceInfoProto {
message BlockKeyProto {
required uint32 keyId = 1; // Key identifier
required uint64 expiryDate = 2; // Expiry time in milliseconds
- required bytes keyBytes = 3; // Key secret
+ optional bytes keyBytes = 3; // Key secret
}
/**
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 13 19:02:37 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1213389
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1213862
/hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/datanode:713112
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
/hadoop/core/trunk/src/webapps/datanode:776175-784663
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 13 19:02:37 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1213389
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1213862
/hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/hdfs:713112
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
/hadoop/core/trunk/src/webapps/hdfs:776175-784663
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 13 19:02:37 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1213389
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1213862
/hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/secondary:713112
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
/hadoop/core/trunk/src/webapps/secondary:776175-784663
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 13 19:02:37 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1213389
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1213862
/hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
/hadoop/core/trunk/src/test/hdfs:776175-785643
/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:987665-1095512
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Tue Dec 13 19:02:37 2011
@@ -515,6 +515,11 @@ public class MiniDFSCluster {
this.waitSafeMode = waitSafeMode;
// use alternate RPC engine if spec'd
+ /*
+ Turned off - see HDFS-2647 and HDFS-2660 for related comments.
+ This test can be turned on when Avro RPC is enabled using mechanism
+ similar to protobuf.
+
String rpcEngineName = System.getProperty("hdfs.rpc.engine");
if (rpcEngineName != null && !"".equals(rpcEngineName)) {
@@ -538,6 +543,7 @@ public class MiniDFSCluster {
conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION,
false);
}
+ */
int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java Tue Dec 13 19:02:37 2011
@@ -28,9 +28,16 @@ public class TestDfsOverAvroRpc extends
@Test(timeout=20000)
public void testWorkingDirectory() throws IOException {
+ /*
+ Test turned off - see HDFS-2647 and HDFS-2660 for related comments.
+ This test can be turned on when Avro RPC is enabled using mechanism
+ similar to protobuf.
+ */
+ /*
System.setProperty("hdfs.rpc.engine",
"org.apache.hadoop.ipc.AvroRpcEngine");
super.testWorkingDirectory();
+ */
}
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Tue Dec 13 19:02:37 2011
@@ -17,26 +17,32 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
+import static org.junit.Assert.*;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
-
-import junit.framework.TestCase;
+import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
+import org.junit.Test;
-public class TestReplicationPolicy extends TestCase {
+public class TestReplicationPolicy {
+ private Random random= DFSUtil.getRandom();
private static final int BLOCK_SIZE = 1024;
private static final int NUM_OF_DATANODES = 6;
private static final Configuration CONF = new HdfsConfiguration();
@@ -90,6 +96,7 @@ public class TestReplicationPolicy exten
* the 1st is on dataNodes[0] and the 2nd is on a different rack.
* @throws Exception
*/
+ @Test
public void testChooseTarget1() throws Exception {
dataNodes[0].updateHeartbeat(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
@@ -150,6 +157,7 @@ public class TestReplicationPolicy exten
* should be placed on a third rack.
* @throws Exception
*/
+ @Test
public void testChooseTarget2() throws Exception {
HashMap<Node, Node> excludedNodes;
DatanodeDescriptor[] targets;
@@ -225,6 +233,7 @@ public class TestReplicationPolicy exten
* and the rest should be placed on the third rack.
* @throws Exception
*/
+ @Test
public void testChooseTarget3() throws Exception {
// make data node 0 to be not qualified to choose
dataNodes[0].updateHeartbeat(
@@ -278,6 +287,7 @@ public class TestReplicationPolicy exten
* the 3rd replica should be placed on the same rack as the 1st replica,
* @throws Exception
*/
+ @Test
public void testChoooseTarget4() throws Exception {
// make data node 0 & 1 to be not qualified to choose: not enough disk space
for(int i=0; i<2; i++) {
@@ -325,6 +335,7 @@ public class TestReplicationPolicy exten
* the 3rd replica should be placed on the same rack as the 2nd replica,
* @throws Exception
*/
+ @Test
public void testChooseTarget5() throws Exception {
DatanodeDescriptor[] targets;
targets = replicator.chooseTarget(filename,
@@ -354,6 +365,7 @@ public class TestReplicationPolicy exten
* the 1st replica. The 3rd replica can be placed randomly.
* @throws Exception
*/
+ @Test
public void testRereplicate1() throws Exception {
List<DatanodeDescriptor> chosenNodes = new ArrayList<DatanodeDescriptor>();
chosenNodes.add(dataNodes[0]);
@@ -388,6 +400,7 @@ public class TestReplicationPolicy exten
* the rest replicas can be placed randomly,
* @throws Exception
*/
+ @Test
public void testRereplicate2() throws Exception {
List<DatanodeDescriptor> chosenNodes = new ArrayList<DatanodeDescriptor>();
chosenNodes.add(dataNodes[0]);
@@ -417,6 +430,7 @@ public class TestReplicationPolicy exten
* the rest replicas can be placed randomly,
* @throws Exception
*/
+ @Test
public void testRereplicate3() throws Exception {
List<DatanodeDescriptor> chosenNodes = new ArrayList<DatanodeDescriptor>();
chosenNodes.add(dataNodes[0]);
@@ -450,4 +464,122 @@ public class TestReplicationPolicy exten
assertTrue(cluster.isOnSameRack(dataNodes[2], targets[0]));
}
+ /**
+ * Test for the high priority blocks are processed before the low priority
+ * blocks.
+ */
+ @Test(timeout = 60000)
+ public void testReplicationWithPriority() throws Exception {
+ int DFS_NAMENODE_REPLICATION_INTERVAL = 1000;
+ int HIGH_PRIORITY = 0;
+ Configuration conf = new Configuration();
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
+ .format(true).build();
+ try {
+ cluster.waitActive();
+ final UnderReplicatedBlocks neededReplications = (UnderReplicatedBlocks) cluster
+ .getNameNode().getNamesystem().getBlockManager().neededReplications;
+ for (int i = 0; i < 100; i++) {
+ // Adding the blocks directly to normal priority
+ neededReplications.add(new Block(random.nextLong()), 2, 0, 3);
+ }
+ // Lets wait for the replication interval, to start process normal
+ // priority blocks
+ Thread.sleep(DFS_NAMENODE_REPLICATION_INTERVAL);
+
+ // Adding the block directly to high priority list
+ neededReplications.add(new Block(random.nextLong()), 1, 0, 3);
+
+ // Lets wait for the replication interval
+ Thread.sleep(DFS_NAMENODE_REPLICATION_INTERVAL);
+
+ // Check replication completed successfully. Need not wait till it process
+ // all the 100 normal blocks.
+ assertFalse("Not able to clear the element from high priority list",
+ neededReplications.iterator(HIGH_PRIORITY).hasNext());
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ /**
+ * Test for the ChooseUnderReplicatedBlocks are processed based on priority
+ */
+ @Test
+ public void testChooseUnderReplicatedBlocks() throws Exception {
+ UnderReplicatedBlocks underReplicatedBlocks = new UnderReplicatedBlocks();
+
+ for (int i = 0; i < 5; i++) {
+ // Adding QUEUE_HIGHEST_PRIORITY block
+ underReplicatedBlocks.add(new Block(random.nextLong()), 1, 0, 3);
+
+ // Adding QUEUE_VERY_UNDER_REPLICATED block
+ underReplicatedBlocks.add(new Block(random.nextLong()), 2, 0, 7);
+
+ // Adding QUEUE_UNDER_REPLICATED block
+ underReplicatedBlocks.add(new Block(random.nextLong()), 6, 0, 6);
+
+ // Adding QUEUE_REPLICAS_BADLY_DISTRIBUTED block
+ underReplicatedBlocks.add(new Block(random.nextLong()), 5, 0, 6);
+
+ // Adding QUEUE_WITH_CORRUPT_BLOCKS block
+ underReplicatedBlocks.add(new Block(random.nextLong()), 0, 0, 3);
+ }
+
+ // Choose 6 blocks from UnderReplicatedBlocks. Then it should pick 5 blocks
+ // from
+ // QUEUE_HIGHEST_PRIORITY and 1 block from QUEUE_VERY_UNDER_REPLICATED.
+ List<List<Block>> chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(6);
+ assertTheChosenBlocks(chosenBlocks, 5, 1, 0, 0, 0);
+
+ // Choose 10 blocks from UnderReplicatedBlocks. Then it should pick 4 blocks from
+ // QUEUE_VERY_UNDER_REPLICATED, 5 blocks from QUEUE_UNDER_REPLICATED and 1
+ // block from QUEUE_REPLICAS_BADLY_DISTRIBUTED.
+ chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(10);
+ assertTheChosenBlocks(chosenBlocks, 0, 4, 5, 1, 0);
+
+ // Adding QUEUE_HIGHEST_PRIORITY
+ underReplicatedBlocks.add(new Block(random.nextLong()), 1, 0, 3);
+
+ // Choose 10 blocks from UnderReplicatedBlocks. Then it should pick 1 block from
+ // QUEUE_HIGHEST_PRIORITY, 4 blocks from QUEUE_REPLICAS_BADLY_DISTRIBUTED
+ // and 5 blocks from QUEUE_WITH_CORRUPT_BLOCKS.
+ chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(10);
+ assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 4, 5);
+
+ // Since it is reached to end of all lists,
+ // should start picking the blocks from start.
+ // Choose 7 blocks from UnderReplicatedBlocks. Then it should pick 6 blocks from
+ // QUEUE_HIGHEST_PRIORITY, 1 block from QUEUE_VERY_UNDER_REPLICATED.
+ chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(7);
+ assertTheChosenBlocks(chosenBlocks, 6, 1, 0, 0, 0);
+ }
+
+ /** asserts the chosen blocks with expected priority blocks */
+ private void assertTheChosenBlocks(
+ List<List<Block>> chosenBlocks, int firstPrioritySize,
+ int secondPrioritySize, int thirdPrioritySize, int fourthPrioritySize,
+ int fifthPrioritySize) {
+ assertEquals(
+ "Not returned the expected number of QUEUE_HIGHEST_PRIORITY blocks",
+ firstPrioritySize, chosenBlocks.get(
+ UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY).size());
+ assertEquals(
+ "Not returned the expected number of QUEUE_VERY_UNDER_REPLICATED blocks",
+ secondPrioritySize, chosenBlocks.get(
+ UnderReplicatedBlocks.QUEUE_VERY_UNDER_REPLICATED).size());
+ assertEquals(
+ "Not returned the expected number of QUEUE_UNDER_REPLICATED blocks",
+ thirdPrioritySize, chosenBlocks.get(
+ UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED).size());
+ assertEquals(
+ "Not returned the expected number of QUEUE_REPLICAS_BADLY_DISTRIBUTED blocks",
+ fourthPrioritySize, chosenBlocks.get(
+ UnderReplicatedBlocks.QUEUE_REPLICAS_BADLY_DISTRIBUTED).size());
+ assertEquals(
+ "Not returned the expected number of QUEUE_WITH_CORRUPT_BLOCKS blocks",
+ fifthPrioritySize, chosenBlocks.get(
+ UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS).size());
+ }
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1213867&r1=1213866&r2=1213867&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Tue Dec 13 19:02:37 2011
@@ -145,9 +145,7 @@ public class TestNameNodeMetrics extends
fs.delete(file, true);
filesTotal--; // reduce the filecount for deleted file
- // Wait for more than DATANODE_COUNT replication intervals to ensure all
- // the blocks pending deletion are sent for deletion to the datanodes.
- Thread.sleep(DFS_REPLICATION_INTERVAL * (DATANODE_COUNT + 1) * 1000);
+ waitForDeletion();
updateMetrics();
rb = getMetrics(NS_METRICS);
assertGauge("FilesTotal", filesTotal, rb);
@@ -176,7 +174,7 @@ public class TestNameNodeMetrics extends
assertGauge("PendingReplicationBlocks", 1L, rb);
assertGauge("ScheduledReplicationBlocks", 1L, rb);
fs.delete(file, true);
- updateMetrics();
+ waitForDeletion();
rb = getMetrics(NS_METRICS);
assertGauge("CorruptBlocks", 0L, rb);
assertGauge("PendingReplicationBlocks", 0L, rb);
@@ -212,9 +210,15 @@ public class TestNameNodeMetrics extends
assertGauge("UnderReplicatedBlocks", 1L, rb);
assertGauge("MissingBlocks", 1L, rb);
fs.delete(file, true);
- updateMetrics();
+ waitForDeletion();
assertGauge("UnderReplicatedBlocks", 0L, getMetrics(NS_METRICS));
}
+
+ private void waitForDeletion() throws InterruptedException {
+ // Wait for more than DATANODE_COUNT replication intervals to ensure all
+ // the blocks pending deletion are sent for deletion to the datanodes.
+ Thread.sleep(DFS_REPLICATION_INTERVAL * (DATANODE_COUNT + 1) * 1000);
+ }
public void testRenameMetrics() throws Exception {
Path src = getTestPath("src");