You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cu...@apache.org on 2007/03/23 21:40:36 UTC
svn commit: r521896 - in /lucene/hadoop/trunk: CHANGES.txt
conf/hadoop-default.xml src/java/org/apache/hadoop/dfs/FSNamesystem.java
src/test/org/apache/hadoop/dfs/TestReplication.java
Author: cutting
Date: Fri Mar 23 13:40:36 2007
New Revision: 521896
URL: http://svn.apache.org/viewvc?view=rev&rev=521896
Log:
HADOOP-1047. Fix TestReplication to succeed more reliably. Contributed by Hairong.
Modified:
lucene/hadoop/trunk/CHANGES.txt
lucene/hadoop/trunk/conf/hadoop-default.xml
lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java
Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=521896&r1=521895&r2=521896
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Fri Mar 23 13:40:36 2007
@@ -1,6 +1,12 @@
Hadoop Change Log
+Trunk (unreleased changes)
+
+ 1. HADOOP-1047. Fix TestReplication to succeed more reliably.
+ (Hairong Kuang via cutting)
+
+
Release 0.12.2 - 2007-23-17
1. HADOOP-1135. Fix bug in block report processing which may cause
Modified: lucene/hadoop/trunk/conf/hadoop-default.xml
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/conf/hadoop-default.xml?view=diff&rev=521896&r1=521895&r2=521896
==============================================================================
--- lucene/hadoop/trunk/conf/hadoop-default.xml (original)
+++ lucene/hadoop/trunk/conf/hadoop-default.xml Fri Mar 23 13:40:36 2007
@@ -238,6 +238,12 @@
</property>
<property>
+ <name>dfs.replication.considerLoad</name>
+ <value>true</value>
+ <description>Decide if chooseTarget considers the target's load or not
+ </description>
+</property>
+<property>
<name>dfs.default.chunk.view.size</name>
<value>32768</value>
<description>The number of bytes to view for a file on the browser.
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?view=diff&rev=521896&r1=521895&r2=521896
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Fri Mar 23 13:40:36 2007
@@ -203,7 +203,7 @@
// datanode networktoplogy
NetworkTopology clusterMap = new NetworkTopology();
// for block replicas placement
- ReplicationTargetChooser replicator = new ReplicationTargetChooser();
+ ReplicationTargetChooser replicator;
private HostsFileReader hostsReader;
private Daemon dnthread = null;
@@ -217,6 +217,8 @@
int port,
NameNode nn, Configuration conf) throws IOException {
fsNamesystemObject = this;
+ this.replicator = new ReplicationTargetChooser(
+ conf.getBoolean("dfs.replication.considerLoad", true));
this.defaultReplication = conf.getInt("dfs.replication", 3);
this.maxReplication = conf.getInt("dfs.replication.max", 512);
this.minReplication = conf.getInt("dfs.replication.min", 1);
@@ -2729,6 +2731,12 @@
*
*/
class ReplicationTargetChooser {
+ final boolean considerLoad;
+
+ ReplicationTargetChooser( boolean considerLoad ) {
+ this.considerLoad = considerLoad;
+ }
+
private class NotEnoughReplicasException extends Exception {
NotEnoughReplicasException( String msg ) {
super( msg );
@@ -3054,7 +3062,8 @@
private boolean isGoodTarget( DatanodeDescriptor node,
long blockSize, int maxTargetPerLoc,
List<DatanodeDescriptor> results) {
- return isGoodTarget(node, blockSize, maxTargetPerLoc, true, results);
+ return isGoodTarget(node, blockSize, maxTargetPerLoc,
+ this.considerLoad, results);
}
private boolean isGoodTarget( DatanodeDescriptor node,
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java?view=diff&rev=521896&r1=521895&r2=521896
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java Fri Mar 23 13:40:36 2007
@@ -46,8 +46,6 @@
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.dfs.TestReplication");
-
-
private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException {
// create and write a file that contains three blocks of data
@@ -70,13 +68,31 @@
ClientProtocol.versionID,
DataNode.createSocketAddr(conf.get("fs.default.name")),
conf);
-
- LocatedBlock[] locations = namenode.open(name.toString());
+
+ LocatedBlock[] locations;
+ boolean isReplicationDone;
+ do {
+ locations = namenode.open(name.toString());
+ isReplicationDone = true;
+ for (int idx = 0; idx < locations.length; idx++) {
+ DatanodeInfo[] datanodes = locations[idx].getLocations();
+ if(Math.min(numDatanodes, repl) != datanodes.length) {
+ isReplicationDone=false;
+ LOG.warn("File has "+datanodes.length+" replicas, expecting "
+ +Math.min(numDatanodes, repl));
+ try {
+ Thread.sleep(15000L);
+ } catch (InterruptedException e) {
+ // nothing
+ }
+ break;
+ }
+ }
+ } while(!isReplicationDone);
+
boolean isOnSameRack = true, isNotOnSameRack = true;
for (int idx = 0; idx < locations.length; idx++) {
DatanodeInfo[] datanodes = locations[idx].getLocations();
- assertEquals("Number of replicas for block" + idx,
- Math.min(numDatanodes, repl), datanodes.length);
if(datanodes.length <= 1) break;
if(datanodes.length == 2) {
isNotOnSameRack = !( datanodes[0].getNetworkLocation().equals(
@@ -114,16 +130,12 @@
*/
public void testReplication() throws IOException {
Configuration conf = new Configuration();
+ conf.setBoolean("dfs.replication.considerLoad", false);
MiniDFSCluster cluster = new MiniDFSCluster(65312, conf, numDatanodes, false, true, racks);
- // Now wait for 15 seconds to give datanodes chance to register
- // themselves and to report heartbeat
- try {
- Thread.sleep(15000L);
- } catch (InterruptedException e) {
- // nothing
- }
+ cluster.waitActive();
- InetSocketAddress addr = new InetSocketAddress("localhost", 65312);
+ InetSocketAddress addr = new InetSocketAddress("localhost",
+ cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
DatanodeInfo[] info = client.datanodeReport();