You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by fe...@apache.org on 2021/09/28 05:09:28 UTC
[hadoop] branch trunk updated: HDFS-16231. Fix
TestDataNodeMetrics#testReceivePacketSlowMetrics (#3471)
This is an automated email from the ASF dual-hosted git repository.
ferhui pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new 1f8f53f HDFS-16231. Fix TestDataNodeMetrics#testReceivePacketSlowMetrics (#3471)
1f8f53f is described below
commit 1f8f53f7d729b2d327fea7fcd81c2ed7f93692c8
Author: huhaiyang <hu...@126.com>
AuthorDate: Tue Sep 28 13:09:18 2021 +0800
HDFS-16231. Fix TestDataNodeMetrics#testReceivePacketSlowMetrics (#3471)
---
.../hdfs/server/datanode/TestDataNodeMetrics.java | 21 +++++++++++++++------
1 file changed, 15 insertions(+), 6 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
index 602ac00..e1501fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
@@ -169,6 +169,7 @@ public class TestDataNodeMetrics {
conf.setInt(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, interval);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3).build();
+ DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get();
try {
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
@@ -190,22 +191,30 @@ public class TestDataNodeMetrics {
DataNodeFaultInjector.set(injector);
Path testFile = new Path("/testFlushNanosMetric.txt");
FSDataOutputStream fout = fs.create(testFile);
+ DFSOutputStream dout = (DFSOutputStream) fout.getWrappedStream();
fout.write(new byte[1]);
fout.hsync();
+ DatanodeInfo[] pipeline = dout.getPipeline();
fout.close();
+ dout.close();
+ DatanodeInfo headDatanodeInfo = pipeline[0];
List<DataNode> datanodes = cluster.getDataNodes();
- DataNode datanode = datanodes.get(0);
- MetricsRecordBuilder dnMetrics = getMetrics(datanode.getMetrics().name());
+ DataNode headNode = datanodes.stream().filter(d -> d.getDatanodeId().equals(headDatanodeInfo))
+ .findFirst().orElseGet(null);
+ assertNotNull("Could not find the head of the datanode write pipeline",
+ headNode);
+ MetricsRecordBuilder dnMetrics = getMetrics(headNode.getMetrics().name());
assertTrue("More than 1 packet received",
- getLongCounter("TotalPacketsReceived", dnMetrics) > 1L);
+ getLongCounter("PacketsReceived", dnMetrics) > 1L);
assertTrue("More than 1 slow packet to mirror",
- getLongCounter("TotalPacketsSlowWriteToMirror", dnMetrics) > 1L);
- assertCounter("TotalPacketsSlowWriteToDisk", 1L, dnMetrics);
- assertCounter("TotalPacketsSlowWriteToOsCache", 0L, dnMetrics);
+ getLongCounter("PacketsSlowWriteToMirror", dnMetrics) > 1L);
+ assertCounter("PacketsSlowWriteToDisk", 1L, dnMetrics);
+ assertCounter("PacketsSlowWriteToOsCache", 0L, dnMetrics);
} finally {
if (cluster != null) {
cluster.shutdown();
}
+ DataNodeFaultInjector.set(oldInjector);
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org