You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2016/02/21 08:23:32 UTC
[1/3] hadoop git commit: HDFS-9839. Reduce verbosity of processReport
logging. (Contributed by Arpit Agarwal)
Repository: hadoop
Updated Branches:
refs/heads/branch-2 7f6737951 -> 7660cfd91
refs/heads/branch-2.8 d31660ef4 -> 4823fbb49
refs/heads/trunk e54cc2931 -> d5abd293a
HDFS-9839. Reduce verbosity of processReport logging. (Contributed by Arpit Agarwal)
This closes #78
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5abd293
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5abd293
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5abd293
Branch: refs/heads/trunk
Commit: d5abd293a890a8a1da48a166a291ae1c5644ad57
Parents: e54cc29
Author: Arpit Agarwal <ar...@apache.org>
Authored: Sat Feb 20 23:19:09 2016 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Sat Feb 20 23:19:09 2016 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
.../hdfs/server/blockmanagement/BlockManager.java | 18 +++++++++---------
2 files changed, 11 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5abd293/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1d0379c..9215488 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2802,6 +2802,8 @@ Release 2.8.0 - UNRELEASED
HDFS-6832. Fix the usage of 'hdfs namenode' command.
(Manjunath Ballur via aajisaka)
+ HDFS-9839. Reduce verbosity of processReport logging. (Arpit Agarwal)
+
Release 2.7.3 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5abd293/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index cc52b6e..eaebed9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2060,7 +2060,7 @@ public class BlockManager implements BlockStatsMXBean {
final long startTime = Time.monotonicNow(); //after acquiring write lock
final long endTime;
DatanodeDescriptor node;
- Collection<Block> invalidatedBlocks = null;
+ Collection<Block> invalidatedBlocks = Collections.emptyList();
try {
node = datanodeManager.getDatanode(nodeID);
@@ -2136,11 +2136,9 @@ public class BlockManager implements BlockStatsMXBean {
namesystem.writeUnlock();
}
- if (invalidatedBlocks != null) {
- for (Block b : invalidatedBlocks) {
- blockLog.info("BLOCK* processReport: {} on node {} size {} does not " +
- "belong to any file", b, node, b.getNumBytes());
- }
+ for (Block b : invalidatedBlocks) {
+ blockLog.debug("BLOCK* processReport: {} on node {} size {} does not " +
+ "belong to any file", b, node, b.getNumBytes());
}
// Log the block report processing stats from Namenode perspective
@@ -2149,9 +2147,11 @@ public class BlockManager implements BlockStatsMXBean {
metrics.addBlockReport((int) (endTime - startTime));
}
blockLog.info("BLOCK* processReport: from storage {} node {}, " +
- "blocks: {}, hasStaleStorage: {}, processing time: {} msecs", storage
- .getStorageID(), nodeID, newReport.getNumberOfBlocks(),
- node.hasStaleStorages(), (endTime - startTime));
+ "blocks: {}, hasStaleStorage: {}, processing time: {} msecs, " +
+ "invalidatedBlocks: {}", storage.getStorageID(), nodeID,
+ newReport.getNumberOfBlocks(),
+ node.hasStaleStorages(), (endTime - startTime),
+ invalidatedBlocks.size());
return !node.hasStaleStorages();
}
[2/3] hadoop git commit: HDFS-9839. Reduce verbosity of processReport
logging. (Contributed by Arpit Agarwal)
Posted by ar...@apache.org.
HDFS-9839. Reduce verbosity of processReport logging. (Contributed by Arpit Agarwal)
This closes #78
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7660cfd9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7660cfd9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7660cfd9
Branch: refs/heads/branch-2
Commit: 7660cfd91a9c3ab1311803ea882957d611887398
Parents: 7f67379
Author: Arpit Agarwal <ar...@apache.org>
Authored: Sat Feb 20 23:19:09 2016 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Sat Feb 20 23:20:18 2016 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
.../hdfs/server/blockmanagement/BlockManager.java | 18 +++++++++---------
2 files changed, 11 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7660cfd9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4c62a01..5afceca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1872,6 +1872,8 @@ Release 2.8.0 - UNRELEASED
HDFS-6832. Fix the usage of 'hdfs namenode' command.
(Manjunath Ballur via aajisaka)
+ HDFS-9839. Reduce verbosity of processReport logging. (Arpit Agarwal)
+
Release 2.7.3 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7660cfd9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index cb23b9a..fb0707b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1865,7 +1865,7 @@ public class BlockManager implements BlockStatsMXBean {
final long startTime = Time.monotonicNow(); //after acquiring write lock
final long endTime;
DatanodeDescriptor node;
- Collection<Block> invalidatedBlocks = null;
+ Collection<Block> invalidatedBlocks = Collections.emptyList();
try {
node = datanodeManager.getDatanode(nodeID);
@@ -1940,11 +1940,9 @@ public class BlockManager implements BlockStatsMXBean {
namesystem.writeUnlock();
}
- if (invalidatedBlocks != null) {
- for (Block b : invalidatedBlocks) {
- blockLog.info("BLOCK* processReport: {} on node {} size {} does not " +
- "belong to any file", b, node, b.getNumBytes());
- }
+ for (Block b : invalidatedBlocks) {
+ blockLog.debug("BLOCK* processReport: {} on node {} size {} does not " +
+ "belong to any file", b, node, b.getNumBytes());
}
// Log the block report processing stats from Namenode perspective
@@ -1953,9 +1951,11 @@ public class BlockManager implements BlockStatsMXBean {
metrics.addBlockReport((int) (endTime - startTime));
}
blockLog.info("BLOCK* processReport: from storage {} node {}, " +
- "blocks: {}, hasStaleStorage: {}, processing time: {} msecs", storage
- .getStorageID(), nodeID, newReport.getNumberOfBlocks(),
- node.hasStaleStorages(), (endTime - startTime));
+ "blocks: {}, hasStaleStorage: {}, processing time: {} msecs, " +
+ "invalidatedBlocks: {}", storage.getStorageID(), nodeID,
+ newReport.getNumberOfBlocks(),
+ node.hasStaleStorages(), (endTime - startTime),
+ invalidatedBlocks.size());
return !node.hasStaleStorages();
}
[3/3] hadoop git commit: HDFS-9839. Reduce verbosity of processReport
logging. (Contributed by Arpit Agarwal)
Posted by ar...@apache.org.
HDFS-9839. Reduce verbosity of processReport logging. (Contributed by Arpit Agarwal)
This closes #78
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4823fbb4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4823fbb4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4823fbb4
Branch: refs/heads/branch-2.8
Commit: 4823fbb490d1e278cd21d431f1a051b8684d07f1
Parents: d31660e
Author: Arpit Agarwal <ar...@apache.org>
Authored: Sat Feb 20 23:19:09 2016 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Sat Feb 20 23:20:28 2016 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
.../hdfs/server/blockmanagement/BlockManager.java | 18 +++++++++---------
2 files changed, 11 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4823fbb4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 27e7465..dd68850 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1785,6 +1785,8 @@ Release 2.8.0 - UNRELEASED
HDFS-6832. Fix the usage of 'hdfs namenode' command.
(Manjunath Ballur via aajisaka)
+ HDFS-9839. Reduce verbosity of processReport logging. (Arpit Agarwal)
+
Release 2.7.3 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4823fbb4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 6bf0cc2..a5b8a3e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1825,7 +1825,7 @@ public class BlockManager implements BlockStatsMXBean {
final long startTime = Time.monotonicNow(); //after acquiring write lock
final long endTime;
DatanodeDescriptor node;
- Collection<Block> invalidatedBlocks = null;
+ Collection<Block> invalidatedBlocks = Collections.emptyList();
try {
node = datanodeManager.getDatanode(nodeID);
@@ -1900,11 +1900,9 @@ public class BlockManager implements BlockStatsMXBean {
namesystem.writeUnlock();
}
- if (invalidatedBlocks != null) {
- for (Block b : invalidatedBlocks) {
- blockLog.info("BLOCK* processReport: {} on node {} size {} does not " +
- "belong to any file", b, node, b.getNumBytes());
- }
+ for (Block b : invalidatedBlocks) {
+ blockLog.debug("BLOCK* processReport: {} on node {} size {} does not " +
+ "belong to any file", b, node, b.getNumBytes());
}
// Log the block report processing stats from Namenode perspective
@@ -1913,9 +1911,11 @@ public class BlockManager implements BlockStatsMXBean {
metrics.addBlockReport((int) (endTime - startTime));
}
blockLog.info("BLOCK* processReport: from storage {} node {}, " +
- "blocks: {}, hasStaleStorage: {}, processing time: {} msecs", storage
- .getStorageID(), nodeID, newReport.getNumberOfBlocks(),
- node.hasStaleStorages(), (endTime - startTime));
+ "blocks: {}, hasStaleStorage: {}, processing time: {} msecs, " +
+ "invalidatedBlocks: {}", storage.getStorageID(), nodeID,
+ newReport.getNumberOfBlocks(),
+ node.hasStaleStorages(), (endTime - startTime),
+ invalidatedBlocks.size());
return !node.hasStaleStorages();
}