You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cd...@apache.org on 2008/06/07 00:08:03 UTC
svn commit: r664155 - in /hadoop/core/trunk: CHANGES.txt
src/java/org/apache/hadoop/dfs/CorruptReplicasMap.java
src/java/org/apache/hadoop/dfs/NameNodeMetrics.java
Author: cdouglas
Date: Fri Jun 6 15:08:03 2008
New Revision: 664155
URL: http://svn.apache.org/viewvc?rev=664155&view=rev
Log:
HADOOP-3193. Include the address of the client that found the corrupted block
in the log. Also include a CorruptedBlocks metric to track the size of the
corrupted block map.
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/java/org/apache/hadoop/dfs/CorruptReplicasMap.java
hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NameNodeMetrics.java
Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=664155&r1=664154&r2=664155&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Fri Jun 6 15:08:03 2008
@@ -81,6 +81,10 @@
HADOOP-3452. Change fsck to return non-zero status for a corrupt
FileSystem. (lohit vijayarenu via cdouglas)
+ HADOOP-3193. Include the address of the client that found the corrupted
+ block in the log. Also include a CorruptedBlocks metric to track the size
+ of the corrupted block map. (cdouglas)
+
NEW FEATURES
HADOOP-3074. Provides a UrlStreamHandler for DFS and other FS,
Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/CorruptReplicasMap.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/CorruptReplicasMap.java?rev=664155&r1=664154&r2=664155&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/CorruptReplicasMap.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/CorruptReplicasMap.java Fri Jun 6 15:08:03 2008
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.dfs;
+import org.apache.hadoop.ipc.Server;
+
import java.util.*;
import java.io.IOException;
@@ -50,13 +52,17 @@
nodes.add(dn);
NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
blk.getBlockName() +
- " added as corrupt on " + dn.getName());
+ " added as corrupt on " + dn.getName() +
+ " by " + Server.getRemoteIp());
} else {
NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
"duplicate requested for " +
blk.getBlockName() + " to add as corrupt " +
- "on " + dn.getName());
+ "on " + dn.getName() +
+ " by " + Server.getRemoteIp());
}
+ NameNode.getNameNodeMetrics().numBlocksCorrupted.set(
+ corruptReplicasMap.size());
}
/**
@@ -68,8 +74,11 @@
FSNamesystem fsNamesystem = FSNamesystem.getFSNamesystem();
if (fsNamesystem.blocksMap.contains(blk))
return;
- if (corruptReplicasMap != null)
+ if (corruptReplicasMap != null) {
corruptReplicasMap.remove(blk);
+ NameNode.getNameNodeMetrics().numBlocksCorrupted.set(
+ corruptReplicasMap.size());
+ }
}
/**
Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NameNodeMetrics.java?rev=664155&r1=664154&r2=664155&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NameNodeMetrics.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NameNodeMetrics.java Fri Jun 6 15:08:03 2008
@@ -57,6 +57,7 @@
public MetricsIntValue safeModeTime = new MetricsIntValue("SafemodeTime");
public MetricsIntValue fsImageLoadTime =
new MetricsIntValue("fsImageLoadTime");
+ public MetricsIntValue numBlocksCorrupted = new MetricsIntValue("BlocksCorrupted");
NameNodeMetrics(Configuration conf, NameNode nameNode) {
@@ -101,6 +102,7 @@
blockReport.pushMetric(metricsRecord);
safeModeTime.pushMetric(metricsRecord);
fsImageLoadTime.pushMetric(metricsRecord);
+ numBlocksCorrupted.pushMetric(metricsRecord);
}
metricsRecord.update();
}