You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by dh...@apache.org on 2009/12/24 00:07:52 UTC

svn commit: r893643 - in /hadoop/hdfs/trunk: CHANGES.txt src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java

Author: dhruba
Date: Wed Dec 23 23:07:52 2009
New Revision: 893643

URL: http://svn.apache.org/viewvc?rev=893643&view=rev
Log:
HDFS-762. Balancer causes Null Pointer Exception. 
(Cristian Ivascu via dhruba)


Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=893643&r1=893642&r2=893643&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Wed Dec 23 23:07:52 2009
@@ -584,6 +584,9 @@
     HDFS-849. TestFiDataTransferProtocol2#pipeline_Fi_18 sometimes fails.
     (hairong)
 
+    HDFS-762. Balancer causes Null Pointer Exception. 
+    (Cristian Ivascu via dhruba)
+
 Release 0.20.2 - Unreleased
 
   IMPROVEMENTS

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=893643&r1=893642&r2=893643&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java Wed Dec 23 23:07:52 2009
@@ -789,7 +789,6 @@
   
   /** Default constructor */
   Balancer() throws UnsupportedActionException {
-    checkReplicationPolicyCompatibility(getConf());
   }
   
   /** Construct a balancer from the given configuration */

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=893643&r1=893642&r2=893643&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Wed Dec 23 23:07:52 2009
@@ -180,6 +180,7 @@
       totalCapacity += capacity;
     }
     runBalancer(conf, totalUsedSpace, totalCapacity);
+    cluster.shutdown();
   }
 
   /* wait for one heartbeat */
@@ -261,6 +262,38 @@
     } while(!balanced);
 
   }
+
+  private void runBalancerDefaultConstructor(Configuration conf,
+      long totalUsedSpace, long totalCapacity) throws Exception {
+    waitForHeartBeat(totalUsedSpace, totalCapacity);
+
+    // start rebalancing
+    balancer = new Balancer();
+    balancer.setConf(conf);
+    balancer.run(new String[0]);
+
+    waitForHeartBeat(totalUsedSpace, totalCapacity);
+    boolean balanced;
+    do {
+      DatanodeInfo[] datanodeReport = client
+          .getDatanodeReport(DatanodeReportType.ALL);
+      assertEquals(datanodeReport.length, cluster.getDataNodes().size());
+      balanced = true;
+      double avgUtilization = ((double) totalUsedSpace) / totalCapacity * 100;
+      for (DatanodeInfo datanode : datanodeReport) {
+        if (Math.abs(avgUtilization - ((double) datanode.getDfsUsed())
+            / datanode.getCapacity() * 100) > 10) {
+          balanced = false;
+          try {
+            Thread.sleep(100);
+          } catch (InterruptedException ignored) {
+          }
+          break;
+        }
+      }
+    } while (!balanced);
+
+  }
   
   /** one-node cluster test*/
   private void oneNodeTest(Configuration conf) throws Exception {
@@ -298,6 +331,44 @@
         new long[]{CAPACITY, CAPACITY},
         new String[] {RACK0, RACK1});
   }
+  
+  public void testBalancer2() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    initConf(conf);
+    testBalancerDefaultConstructor(conf, new long[] { CAPACITY, CAPACITY },
+        new String[] { RACK0, RACK1 }, CAPACITY, RACK2);
+  }
+
+  private void testBalancerDefaultConstructor(Configuration conf,
+      long[] capacities, String[] racks, long newCapacity, String newRack)
+      throws Exception {
+    int numOfDatanodes = capacities.length;
+    assertEquals(numOfDatanodes, racks.length);
+    cluster = new MiniDFSCluster(0, conf, capacities.length, true, true, null,
+        racks, capacities);
+    try {
+      cluster.waitActive();
+      client = DFSClient.createNamenode(conf);
+
+      long totalCapacity = 0L;
+      for (long capacity : capacities) {
+        totalCapacity += capacity;
+      }
+      // fill up the cluster to be 30% full
+      long totalUsedSpace = totalCapacity * 3 / 10;
+      createFile(totalUsedSpace / numOfDatanodes, (short) numOfDatanodes);
+      // start up an empty node with the same capacity and on the same rack
+      cluster.startDataNodes(conf, 1, true, null, new String[] { newRack },
+          new long[] { newCapacity });
+
+      totalCapacity += newCapacity;
+
+      // run balancer and validate results
+      runBalancerDefaultConstructor(conf, totalUsedSpace, totalCapacity);
+    } finally {
+      cluster.shutdown();
+    }
+  }
 
   /**
    * @param args
@@ -306,5 +377,6 @@
     TestBalancer balancerTest = new TestBalancer();
     balancerTest.testBalancer0();
     balancerTest.testBalancer1();
+    balancerTest.testBalancer2();
   }
 }