You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2012/02/25 00:52:20 UTC

svn commit: r1293482 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java

Author: suresh
Date: Fri Feb 24 23:52:20 2012
New Revision: 1293482

URL: http://svn.apache.org/viewvc?rev=1293482&view=rev
Log:
HDFS-3002. TestNameNodeMetrics need not wait for metrics update. Contributed by Suresh Srinivas.


Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1293482&r1=1293481&r2=1293482&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Feb 24 23:52:20 2012
@@ -141,6 +141,9 @@ Trunk (unreleased changes)
     HDFS-3009. Remove duplicate code in DFSClient#isLocalAddress by using 
     NetUtils. (Hari Mankude via suresh)
 
+    HDFS-3002. TestNameNodeMetrics need not wait for metrics update.
+    (suresh)
+
   OPTIMIZATIONS
     HDFS-2477. Optimize computing the diff between a block report and the
     namenode state. (Tomasz Nykiel via hairong)

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1293482&r1=1293481&r2=1293482&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Fri Feb 24 23:52:20 2012
@@ -103,12 +103,6 @@ public class TestNameNodeMetrics {
     DFSTestUtil.createFile(fs, file, fileLen, replicas, rand.nextLong());
   }
 
-  private void updateMetrics() throws Exception {
-    // Wait for metrics update (corresponds to dfs.namenode.replication.interval
-    // for some block related metrics to get updated)
-    Thread.sleep(1000);
-  }
-
   private void readFile(FileSystem fileSys,Path name) throws IOException {
     //Just read file so that getNumBlockLocations are incremented
     DataInputStream stm = fileSys.open(name);
@@ -125,7 +119,6 @@ public class TestNameNodeMetrics {
     createFile(file, 3200, (short)3);
     final long blockCount = 32;
     int blockCapacity = namesystem.getBlockCapacity();
-    updateMetrics();
     assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS));
 
     MetricsRecordBuilder rb = getMetrics(NN_METRICS);
@@ -140,7 +133,6 @@ public class TestNameNodeMetrics {
     while (threshold < blockCount) {
       blockCapacity <<= 1;
     }
-    updateMetrics();
     long filesTotal = file.depth() + 1; // Add 1 for root
     rb = getMetrics(NS_METRICS);
     assertGauge("FilesTotal", filesTotal, rb);
@@ -150,7 +142,6 @@ public class TestNameNodeMetrics {
     filesTotal--; // reduce the filecount for deleted file
     
     waitForDeletion();
-    updateMetrics();
     rb = getMetrics(NS_METRICS);
     assertGauge("FilesTotal", filesTotal, rb);
     assertGauge("BlocksTotal", 0L, rb);
@@ -174,7 +165,7 @@ public class TestNameNodeMetrics {
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
     bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
         "TEST");
-    updateMetrics();
+    Thread.sleep(1000); // Wait for block to be marked corrupt
     MetricsRecordBuilder rb = getMetrics(NS_METRICS);
     assertGauge("CorruptBlocks", 1L, rb);
     assertGauge("PendingReplicationBlocks", 1L, rb);
@@ -196,7 +187,6 @@ public class TestNameNodeMetrics {
     createFile(file, 100, (short)2);
     long totalBlocks = 1;
     NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1);
-    updateMetrics();
     MetricsRecordBuilder rb = getMetrics(NS_METRICS);
     assertGauge("ExcessBlocks", totalBlocks, rb);
     fs.delete(file, true);
@@ -214,7 +204,7 @@ public class TestNameNodeMetrics {
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
     bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
         "TEST");
-    updateMetrics();
+    Thread.sleep(1000); // Wait for block to be marked corrupt
     MetricsRecordBuilder rb = getMetrics(NS_METRICS);
     assertGauge("UnderReplicatedBlocks", 1L, rb);
     assertGauge("MissingBlocks", 1L, rb);
@@ -236,7 +226,6 @@ public class TestNameNodeMetrics {
     Path target = getTestPath("target");
     createFile(target, 100, (short)1);
     fs.rename(src, target, Rename.OVERWRITE);
-    updateMetrics();
     MetricsRecordBuilder rb = getMetrics(NN_METRICS);
     assertCounter("FilesRenamed", 1L, rb);
     assertCounter("FilesDeleted", 1L, rb);
@@ -264,7 +253,6 @@ public class TestNameNodeMetrics {
 
     //Perform create file operation
     createFile(file1_Path,100,(short)2);
-    updateMetrics();
   
     //Create file does not change numGetBlockLocations metric
     //expect numGetBlockLocations = 0 for previous and current interval 
@@ -273,14 +261,12 @@ public class TestNameNodeMetrics {
     // Open and read file operation increments GetBlockLocations
     // Perform read file operation on earlier created file
     readFile(fs, file1_Path);
-    updateMetrics();
     // Verify read file operation has incremented numGetBlockLocations by 1
     assertCounter("GetBlockLocations", 1L, getMetrics(NN_METRICS));
 
     // opening and reading file  twice will increment numGetBlockLocations by 2
     readFile(fs, file1_Path);
     readFile(fs, file1_Path);
-    updateMetrics();
     assertCounter("GetBlockLocations", 3L, getMetrics(NN_METRICS));
   }
   
@@ -298,7 +284,6 @@ public class TestNameNodeMetrics {
     assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
     
     fs.mkdirs(new Path(TEST_ROOT_DIR_PATH, "/tmp"));
-    updateMetrics();
     
     assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
     assertGauge("LastWrittenTransactionId", 2L, getMetrics(NS_METRICS));
@@ -306,7 +291,6 @@ public class TestNameNodeMetrics {
     assertGauge("TransactionsSinceLastLogRoll", 2L, getMetrics(NS_METRICS));
     
     cluster.getNameNodeRpc().rollEditLog();
-    updateMetrics();
     
     assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
     assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS));
@@ -316,7 +300,6 @@ public class TestNameNodeMetrics {
     cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER);
     cluster.getNameNodeRpc().saveNamespace();
     cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
-    updateMetrics();
     
     long newLastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",
         getMetrics(NS_METRICS));