You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2012/10/30 06:56:25 UTC
svn commit: r1403619 - in
/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/
src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/
Author: suresh
Date: Tue Oct 30 05:56:24 2012
New Revision: 1403619
URL: http://svn.apache.org/viewvc?rev=1403619&view=rev
Log:
HDFS-4127. Merge change 1403616 from trunk.
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1403619&r1=1403618&r2=1403619&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Oct 30 05:56:24 2012
@@ -179,6 +179,9 @@ Release 2.0.3-alpha - Unreleased
HDFS-3616. Fix a ConcurrentModificationException bug that BP actor threads
may not be shutdown properly in DataNode. (Jing Zhao via szetszwo)
+ HDFS-4127. Log message is not correct in case of short of replica.
+ (Junping Du via suresh)
+
Release 2.0.2-alpha - 2012-09-07
INCOMPATIBLE CHANGES
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java?rev=1403619&r1=1403618&r2=1403619&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java Tue Oct 30 05:56:24 2012
@@ -184,7 +184,7 @@ public class BlockPlacementPolicyDefault
if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) {
return writer;
}
- int totalReplicasExpected = numOfReplicas;
+ int totalReplicasExpected = numOfReplicas + results.size();
int numOfResults = results.size();
boolean newBlock = (numOfResults==0);
@@ -230,7 +230,8 @@ public class BlockPlacementPolicyDefault
maxNodesPerRack, results, avoidStaleNodes);
} catch (NotEnoughReplicasException e) {
LOG.warn("Not able to place enough replicas, still in need of "
- + numOfReplicas + " to reach " + totalReplicasExpected + "\n"
+ + (totalReplicasExpected - results.size()) + " to reach "
+ + totalReplicasExpected + "\n"
+ e.getMessage());
if (avoidStaleNodes) {
// ecxludedNodes now has - initial excludedNodes, any nodes that were
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1403619&r1=1403618&r2=1403619&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Tue Oct 30 05:56:24 2012
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.bl
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
@@ -43,6 +44,10 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.util.Time;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
@@ -374,7 +379,71 @@ public class TestReplicationPolicy {
new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 3);
assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
- assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
+ assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
+ }
+
+ /**
+ * In this testcase, it tries to choose more targets than available nodes and
+ * check the result.
+ * @throws Exception
+ */
+ @Test
+ public void testChooseTargetWithMoreThanAvaiableNodes() throws Exception {
+ // make data node 0 & 1 to be not qualified to choose: not enough disk space
+ for(int i=0; i<2; i++) {
+ dataNodes[i].updateHeartbeat(
+ 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+ (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0);
+ }
+
+ final TestAppender appender = new TestAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
+
+ // try to choose NUM_OF_DATANODES which is more than actually available
+ // nodes.
+ DatanodeDescriptor[] targets = replicator.chooseTarget(filename,
+ NUM_OF_DATANODES, dataNodes[0], new ArrayList<DatanodeDescriptor>(),
+ BLOCK_SIZE);
+ assertEquals(targets.length, NUM_OF_DATANODES - 2);
+
+ final List<LoggingEvent> log = appender.getLog();
+ assertNotNull(log);
+ assertFalse(log.size() == 0);
+ final LoggingEvent lastLogEntry = log.get(log.size() - 1);
+
+ assertEquals(lastLogEntry.getLevel(), Level.WARN);
+ // Suppose to place replicas on each node but two data nodes are not
+ // available for placing replica, so here we expect a short of 2
+ assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));
+
+ for(int i=0; i<2; i++) {
+ dataNodes[i].updateHeartbeat(
+ 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+ HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
+ }
+ }
+
+ class TestAppender extends AppenderSkeleton {
+ private final List<LoggingEvent> log = new ArrayList<LoggingEvent>();
+
+ @Override
+ public boolean requiresLayout() {
+ return false;
+ }
+
+ @Override
+ protected void append(final LoggingEvent loggingEvent) {
+ log.add(loggingEvent);
+ }
+
+ @Override
+ public void close() {
+ }
+
+ public List<LoggingEvent> getLog() {
+ return new ArrayList<LoggingEvent>(log);
+ }
}
private boolean containsWithinRange(DatanodeDescriptor target,