You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2015/05/01 09:01:42 UTC

[25/50] [abbrv] hadoop git commit: HDFS-8232. Missing datanode counters when using Metrics2 sink interface. Contributed by Anu Engineer.

HDFS-8232. Missing datanode counters when using Metrics2 sink interface. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/feb68cb5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/feb68cb5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/feb68cb5

Branch: refs/heads/HDFS-7240
Commit: feb68cb5470dc3e6c16b6bc1549141613e360601
Parents: db1b674
Author: cnauroth <cn...@apache.org>
Authored: Mon Apr 27 16:48:13 2015 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Mon Apr 27 16:48:13 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  27 ++++
 .../datanode/metrics/DataNodeMetricHelper.java  |  79 +++++++++++
 .../server/datanode/metrics/FSDatasetMBean.java |   3 +-
 .../server/datanode/SimulatedFSDataset.java     |  20 ++-
 .../datanode/TestDataNodeFSDataSetSink.java     | 136 +++++++++++++++++++
 .../extdataset/ExternalDatasetImpl.java         |  19 ++-
 7 files changed, 281 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb68cb5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d56ea0c..326de0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -574,6 +574,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8205. CommandFormat#parse() should not parse option as
     value of option. (Peter Shi and Xiaoyu Yao via Arpit Agarwal)
 
+    HDFS-8232. Missing datanode counters when using Metrics2 sink interface.
+    (Anu Engineer via cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb68cb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 8869f5a..b87daec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -71,6 +71,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetricHelper;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
@@ -104,6 +105,9 @@ import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
@@ -316,6 +320,13 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     lazyWriter = new Daemon(new LazyWriter(conf));
     lazyWriter.start();
     registerMBean(datanode.getDatanodeUuid());
+
+    // Add a Metrics2 Source Interface. This is same
+    // data as MXBean. We can remove the registerMbean call
+    // in a release where we can break backward compatibility
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    ms.register("FSDatasetState", "FSDatasetState", this);
+
     localFS = FileSystem.getLocal(conf);
     blockPinningEnabled = conf.getBoolean(
       DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED,
@@ -636,6 +647,22 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     return cacheManager.getNumBlocksFailedToUncache();
   }
 
+  /**
+   * Get metrics from the metrics source
+   *
+   * @param collector to contain the resulting metrics snapshot
+   * @param all if true, return all metrics even if unchanged.
+   */
+  @Override
+  public void getMetrics(MetricsCollector collector, boolean all) {
+    try {
+      DataNodeMetricHelper.getMetrics(collector, this, "FSDatasetState");
+    } catch (Exception e) {
+        LOG.warn("Exception thrown while metric collection. Exception : "
+          + e.getMessage());
+    }
+  }
+
   @Override // FSDatasetMBean
   public long getNumBlocksCached() {
     return cacheManager.getNumBlocksCached();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb68cb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetricHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetricHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetricHelper.java
new file mode 100644
index 0000000..8bbe08b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetricHelper.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.metrics;
+
+import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.apache.hadoop.metrics2.lib.Interns;
+
+import java.io.IOException;
+
+public class DataNodeMetricHelper {
+
+  /**
+   * Get metrics helper provides Helper function for
+   * metrics2 interface to act as a Metric source
+   *
+   * @param collector Metrics Collector that is passed in
+   * @param beanClass The Class that currently impliments the metric functions
+   * @param context A string that idenitifies the context
+   *
+   * @throws IOException
+   */
+  public static void getMetrics(MetricsCollector collector,
+                                FSDatasetMBean beanClass, String context)
+    throws IOException {
+
+    if (beanClass == null) {
+      throw new IOException("beanClass cannot be null");
+    }
+
+    String className = beanClass.getClass().getName();
+
+    collector.addRecord(className)
+      .setContext(context)
+      .addGauge(Interns.info("Capacity", "Total storage capacity"),
+        beanClass.getCapacity())
+      .addGauge(Interns.info("DfsUsed", "Total bytes used by dfs datanode"),
+        beanClass.getDfsUsed())
+      .addGauge(Interns.info("Remaining", "Total bytes of free storage"),
+        beanClass.getRemaining())
+      .add(new MetricsTag(Interns.info("StorageInfo", "Storage ID"),
+        beanClass.getStorageInfo()))
+      .addGauge(Interns.info("NumFailedVolumes", "Number of failed Volumes" +
+        " in the data Node"), beanClass.getNumFailedVolumes())
+      .addGauge(Interns.info("LastVolumeFailureDate", "Last Volume failure in" +
+        " milliseconds from epoch"), beanClass.getLastVolumeFailureDate())
+      .addGauge(Interns.info("EstimatedCapacityLostTotal", "Total capacity lost"
+        + " due to volume failure"), beanClass.getEstimatedCapacityLostTotal())
+      .addGauge(Interns.info("CacheUsed", "Datanode cache used in bytes"),
+        beanClass.getCacheUsed())
+      .addGauge(Interns.info("CacheCapacity", "Datanode cache capacity"),
+        beanClass.getCacheCapacity())
+      .addGauge(Interns.info("NumBlocksCached", "Datanode number" +
+        " of blocks cached"), beanClass.getNumBlocksCached())
+      .addGauge(Interns.info("NumBlocksFailedToCache", "Datanode number of " +
+        "blocks failed to cache"), beanClass.getNumBlocksFailedToCache())
+      .addGauge(Interns.info("NumBlocksFailedToUnCache", "Datanode number of" +
+          " blocks failed in cache eviction"),
+        beanClass.getNumBlocksFailedToUncache());
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb68cb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
index 5f22540..c2f175b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode.metrics;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.metrics2.MetricsSource;
 
 /**
  * 
@@ -37,7 +38,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
  *
  */
 @InterfaceAudience.Private
-public interface FSDatasetMBean {
+public interface FSDatasetMBean extends MetricsSource {
   
   /**
    * Returns the total space (in bytes) used by a block pool

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb68cb5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 060e055..bc24237 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -23,8 +23,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.nio.channels.ClosedChannelException;
-import java.util.ArrayList;
-import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
@@ -52,6 +50,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
+import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetricHelper;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
@@ -60,9 +59,9 @@ import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.metrics2.MetricsCollector;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.DataChecksum;
-import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 
 /**
  * This class implements a simulated FSDataset.
@@ -690,6 +689,21 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
     return 0l;
   }
 
+  /**
+   * Get metrics from the metrics source
+   *
+   * @param collector to contain the resulting metrics snapshot
+   * @param all if true, return all metrics even if unchanged.
+   */
+  @Override
+  public void getMetrics(MetricsCollector collector, boolean all) {
+    try {
+      DataNodeMetricHelper.getMetrics(collector, this, "SimulatedFSDataset");
+    } catch (Exception e){
+        //ignore Exceptions
+    }
+  }
+
   @Override // FsDatasetSpi
   public synchronized long getLength(ExtendedBlock b) throws IOException {
     final Map<Block, BInfo> map = getMap(b.getBlockPoolId());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb68cb5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFSDataSetSink.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFSDataSetSink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFSDataSetSink.java
new file mode 100644
index 0000000..dbd6bb0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFSDataSetSink.java
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsSink;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
+import org.junit.Test;
+
+import java.util.Set;
+import java.util.TreeSet;
+
+import static org.junit.Assert.assertEquals;
+
+public class TestDataNodeFSDataSetSink {
+  private static final MetricsSystemImpl ms = new
+    MetricsSystemImpl("TestFSDataSet");
+
+  class FSDataSetSinkTest implements MetricsSink {
+    private Set<String> nameMap;
+    private int count;
+
+    /**
+     * add a metrics record in the sink
+     *
+     * @param record the record to add
+     */
+    @Override
+    public void putMetrics(MetricsRecord record) {
+      // let us do this only once, otherwise
+      // our count could go out of sync.
+      if (count == 0) {
+        for (AbstractMetric m : record.metrics()) {
+          if (nameMap.contains(m.name())) {
+            count++;
+          }
+        }
+
+        for (MetricsTag t : record.tags()) {
+          if (nameMap.contains(t.name())) {
+            count++;
+          }
+        }
+      }
+    }
+
+    /**
+     * Flush any buffered metrics
+     */
+    @Override
+    public void flush() {
+
+    }
+
+    /**
+     * Initialize the plugin
+     *
+     * @param conf the configuration object for the plugin
+     */
+    @Override
+    public void init(SubsetConfiguration conf) {
+      nameMap = new TreeSet<>();
+      nameMap.add("DfsUsed");
+      nameMap.add("Capacity");
+      nameMap.add("Remaining");
+      nameMap.add("StorageInfo");
+      nameMap.add("NumFailedVolumes");
+      nameMap.add("LastVolumeFailureDate");
+      nameMap.add("EstimatedCapacityLostTotal");
+      nameMap.add("CacheUsed");
+      nameMap.add("CacheCapacity");
+      nameMap.add("NumBlocksCached");
+      nameMap.add("NumBlocksFailedToCache");
+      nameMap.add("NumBlocksFailedToUnCache");
+      nameMap.add("Context");
+      nameMap.add("Hostname");
+    }
+
+    public int getMapCount() {
+      return nameMap.size();
+    }
+
+    public int getFoundKeyCount() {
+      return count;
+    }
+  }
+
+  @Test
+  /**
+   * This test creates a Source and then calls into the Sink that we
+   * have registered. That is calls into FSDataSetSinkTest
+   */
+  public void testFSDataSetMetrics() throws InterruptedException {
+    Configuration conf = new HdfsConfiguration();
+    String bpid = "FSDatSetSink-Test";
+    SimulatedFSDataset fsdataset = new SimulatedFSDataset(null, conf);
+    fsdataset.addBlockPool(bpid, conf);
+    FSDataSetSinkTest sink = new FSDataSetSinkTest();
+    sink.init(null);
+    ms.init("Test");
+    ms.start();
+    ms.register("FSDataSetSource", "FSDataSetSource", fsdataset);
+    ms.register("FSDataSetSink", "FSDataSetSink", sink);
+    ms.startMetricsMBeans();
+    ms.publishMetricsNow();
+
+    Thread.sleep(4000);
+
+    ms.stopMetricsMBeans();
+    ms.shutdown();
+
+    // make sure we got all expected metric in the call back
+    assertEquals(sink.getMapCount(), sink.getFoundKeyCount());
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb68cb5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
index 2c6d868..b7c2028 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
@@ -35,14 +35,14 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
+import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetricHelper;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
-import org.apache.hadoop.util.DiskChecker;
-import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.metrics2.MetricsCollector;
 
 public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
 
@@ -420,6 +420,21 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
     return 0;
   }
 
+  /**
+   * Get metrics from the metrics source
+   *
+   * @param collector to contain the resulting metrics snapshot
+   * @param all if true, return all metrics even if unchanged.
+   */
+  @Override
+  public void getMetrics(MetricsCollector collector, boolean all) {
+    try {
+      DataNodeMetricHelper.getMetrics(collector, this, "ExternalDataset");
+    } catch (Exception e){
+        //ignore exceptions
+    }
+  }
+
   @Override
   public void setPinning(ExtendedBlock block) throws IOException {    
   }