You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by om...@apache.org on 2011/03/08 05:39:02 UTC

svn commit: r1079141 [4/5] - in /hadoop/common/branches/yahoo-merge: ./ conf/ ivy/ src/java/org/apache/hadoop/metrics/file/ src/java/org/apache/hadoop/metrics2/ src/java/org/apache/hadoop/metrics2/annotation/ src/java/org/apache/hadoop/metrics2/filter/...

Added: hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/package-info.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/package-info.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/package-info.java (added)
+++ hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/package-info.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,349 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+  <h1>Metrics 2.0</h1>
+  <ul id="toc">
+    <li><a href="#overview">Overview</a></li>
+    <li><a href="#gettingstarted">Getting Started</a></li>
+    <li><a href="#config">Configuration</a></li>
+    <li><a href="#filtering">Metrics Filtering</a></li>
+    <li><a href="#instrumentation">Metrics Instrumentation Strategy</a></li>
+    <li><a href="#migration">Migration from previous system</a></li>
+  </ul>
+  <h2><a name="overview">Overview</a></h2>
+  <p>This package provides a framework for metrics instrumentation
+    and publication.
+  </p>
+
+  <p>The framework provides a variety of ways to implement metrics
+    instrumentation easily via the simple
+    {@link org.apache.hadoop.metrics2.MetricsSource} interface
+    or the even simpler and more concise and declarative metrics annotations.
+    The consumers of metrics just need to implement the simple
+    {@link org.apache.hadoop.metrics2.MetricsSink} interface. Producers
+    register the metrics sources with a metrics system, while consumers
+    register the sinks. A default metrics system is provided to marshal
+    metrics from sources to sinks based on (per source/sink) configuration
+    options. All the metrics are also published and queryable via the
+    standard JMX MBean interface. This document targets the framework users.
+    Framework developers could also consult the
+    <a href="http://wiki.apache.org/hadoop/HADOOP-6728-MetricsV2">design
+    document</a> for architecture and implementation notes.
+  </p>
+  <h3>Sub-packages</h3>
+  <dl>
+    <dt><code>org.apache.hadoop.metrics2.annotation</code></dt>
+    <dd>Public annotation interfaces for simpler metrics instrumentation.
+    </dd>
+    <dt><code>org.apache.hadoop.metrics2.impl</code></dt>
+    <dd>Implementation classes of the framework for interface and/or
+      abstract classes defined in the top-level package. Sink plugin code
+      usually does not need to reference any class here.
+    </dd>
+    <dt> <code>org.apache.hadoop.metrics2.lib</code></dt>
+    <dd>Convenience classes for implementing metrics sources, including the
+      Mutable[{@link org.apache.hadoop.metrics2.lib.MutableGauge Gauge}*|
+      {@link org.apache.hadoop.metrics2.lib.MutableCounter Counter}*|
+      {@link org.apache.hadoop.metrics2.lib.MutableStat Stat}] and
+      {@link org.apache.hadoop.metrics2.lib.MetricsRegistry}.
+    </dd>
+    <dt> <code>org.apache.hadoop.metrics2.filter</code></dt>
+    <dd>Builtin metrics filter implementations include the
+      {@link org.apache.hadoop.metrics2.filter.GlobFilter} and
+      {@link org.apache.hadoop.metrics2.filter.RegexFilter}.
+    </dd>
+    <dt><code>org.apache.hadoop.metrics2.source</code></dt>
+    <dd>Builtin metrics source implementations including the
+      {@link org.apache.hadoop.metrics2.source.JvmMetrics}.
+    </dd>
+    <dt> <code>org.apache.hadoop.metrics2.sink</code></dt>
+    <dd>Builtin metrics sink implementations including the
+      {@link org.apache.hadoop.metrics2.sink.FileSink}.
+    </dd>
+    <dt> <code>org.apache.hadoop.metrics2.util</code></dt>
+    <dd>General utilities for implementing metrics sinks etc., including the
+      {@link org.apache.hadoop.metrics2.util.MetricsCache}.
+    </dd>
+  </dl>
+
+  <h2><a name="gettingstarted">Getting started</a></h2>
+  <h3>Implementing metrics sources</h3>
+  <table width="99%" border="1" cellspacing="0" cellpadding="4">
+    <tbody>
+      <tr>
+        <th>Using annotations</th><th>Using MetricsSource interface</th>
+      </tr>
+      <tr><td>
+  <pre>
+  &#064;Metrics(context="MyContext")
+  class MyStat {
+
+    &#064;Metric("My metric description")
+    public int getMyMetric() {
+      return 42;
+    }
+  }</pre></td><td>
+  <pre>
+  class MyStat implements MetricsSource {
+
+    &#064;Override
+    public void getMetrics(MetricsCollector collector, boolean all) {
+      collector.addRecord("MyStat")
+          .setContext("MyContext")
+          .addGauge(info("MyMetric", "My metric description"), 42);
+    }
+  }
+  </pre>
+        </td>
+      </tr>
+    </tbody>
+  </table>
+  <p>In this example we introduced the following:</p>
+  <dl>
+    <dt><em>&#064;Metrics</em></dt>
+    <dd>The {@link org.apache.hadoop.metrics2.annotation.Metrics} annotation is
+      used to indicate that the class is a metrics source.
+    </dd>
+
+    <dt><em>MyContext</em></dt>
+    <dd>The optional context name typically identifies either the
+      application, or a group of modules within an application or
+      library.
+    </dd>
+
+    <dt><em>MyStat</em></dt>
+    <dd>The class name is used (by default, or specified by name=value parameter
+      in the Metrics annotation) as the metrics record name for
+      which a set of metrics are to be reported.  For example, you could have a
+      record named "CacheStat" for reporting a number of statistics relating to
+      the usage of some cache in your application.</dd>
+
+    <dt><em>&#064;Metric</em></dt>
+    <dd>The {@link org.apache.hadoop.metrics2.annotation.Metric} annotation
+      identifies a particular metric, which in this case, is the
+      result of the method call getMyMetric of the "gauge" (default) type,
+      which means it can vary in both directions, compared with a "counter"
+      type, which can only increase or stay the same. The name of the metric
+      is "MyMetric" (inferred from getMyMetric method name by default.) The 42
+      here is the value of the metric which can be substituted with any valid
+      java expressions.
+    </dd>
+  </dl>
+  <p>Note, the {@link org.apache.hadoop.metrics2.MetricsSource} interface is
+    more verbose but more flexible,
+    allowing generated metrics names and multiple records. In fact, the
+    annotation interface is implemented with the MetricsSource interface
+    internally.</p>
+  <h3>Implementing metrics sinks</h3>
+  <pre>
+  public class MySink implements MetricsSink {
+    public void putMetrics(MetricsRecord record) {
+      System.out.print(record);
+    }
+    public void init(SubsetConfiguration conf) {}
+    public void flush() {}
+  }</pre>
+  <p>In this example there are three additional concepts:</p>
+  <dl>
+    <dt><em>record</em></dt>
+    <dd>This object corresponds to the record created in metrics sources
+      e.g., the "MyStat" in previous example.
+    </dd>
+    <dt><em>conf</em></dt>
+    <dd>The configuration object for the sink instance with prefix removed.
+      So you can get any sink specific configuration using the usual
+      get* method.
+    </dd>
+    <dt><em>flush</em></dt>
+    <dd>This method is called for each update cycle, which may involve
+      more than one record. The sink should try to flush any buffered metrics
+      to its backend upon the call. But it's not required that the
+      implementation is synchronous.
+    </dd>
+  </dl>
+  <p>In order to make use our <code>MyMetrics</code> and <code>MySink</code>,
+    they need to be hooked up to a metrics system. In this case (and most
+    cases), the <code>DefaultMetricsSystem</code> would suffice.
+  </p>
+  <pre>
+  DefaultMetricsSystem.initialize("test"); // called once per application
+  DefaultMetricsSystem.register(new MyStat());</pre>
+  <h2><a name="config">Metrics system configuration</a></h2>
+  <p>Sinks are usually specified in a configuration file, say,
+  "hadoop-metrics2-test.properties", as:
+  </p>
+  <pre>
+  test.sink.mysink0.class=com.example.hadoop.metrics.MySink</pre>
+  <p>The configuration syntax is:</p>
+  <pre>
+  [prefix].[source|sink|jmx|].[instance].[option]</pre>
+  <p>In the previous example, <code>test</code> is the prefix and
+    <code>mysink0</code> is an instance name.
+    <code>DefaultMetricsSystem</code> would try to load
+    <code>hadoop-metrics2-[prefix].properties</code> first, and if not found,
+    try the default <code>hadoop-metrics2.properties</code> in the class path.
+    Note, the <code>[instance]</code> is an arbitrary name to uniquely
+    identify a particular sink instance. The asterisk (<code>*</code>) can be
+    used to specify default options.
+  </p>
+  <p>Consult the metrics instrumentation in jvm, rpc, hdfs and mapred, etc.
+    for more examples.
+  </p>
+
+  <h2><a name="filtering">Metrics Filtering</a></h2>
+  <p>One of the features of the default metrics system is metrics filtering
+    configuration by source, context, record/tags and metrics. The least
+    expensive way to filter out metrics would be at the source level, e.g.,
+    filtering out source named "MyMetrics". The most expensive way would be
+    per metric filtering.
+  </p>
+  <p>Here are some examples:</p>
+  <pre>
+  test.sink.file0.class=org.apache.hadoop.metrics2.sink.FileSink
+  test.sink.file0.context=foo</pre>
+  <p>In this example, we configured one sink instance that would
+    accept metrics from context <code>foo</code> only.
+  </p>
+  <pre>
+  *.source.filter.class=org.apache.hadoop.metrics2.filter.GlobFilter
+  test.*.source.filter.include=foo
+  test.*.source.filter.exclude=bar</pre>
+  <p>In this example, we specify a source filter that includes source
+    <code>foo</code> and excludes <code>bar</code>. When only include
+    patterns are specified, the filter operates in the white listing mode,
+    where only matched sources are included. Likewise, when only exclude
+    patterns are specified, only matched sources are excluded. Sources that
+    are not matched in either patterns are included as well when both patterns
+    are present. Note, the include patterns have precedence over the exclude
+    patterns.
+  </p>
+  <p>Similarly, you can specify the <code>record.filter</code> and
+    <code>metrics.filter</code> options, which operate at record and metric
+    level, respectively. Filters can be combined to optimize
+    the filtering efficiency.</p>
+
+  <h2><a name="instrumentation">Metrics instrumentation strategy</a></h2>
+
+  In previous examples, we showed a minimal example to use the
+  metrics framework. In a larger system (like Hadoop) that allows
+  custom metrics instrumentation, we recommend the following strategy:
+  <pre>
+  &#064;Metrics(about="My metrics description", context="MyContext")
+  class MyMetrics extends MyInstrumentation {
+
+    &#064;Metric("My gauge description") MutableGaugeInt gauge0;
+    &#064;Metric("My counter description") MutableCounterLong counter0;
+    &#064;Metric("My rate description") MutableRate rate0;
+
+    &#064;Override public void setGauge0(int value) { gauge0.set(value); }
+    &#064;Override public void incrCounter0() { counter0.incr(); }
+    &#064;Override public void addRate0(long elapsed) { rate0.add(elapsed); }
+  }
+  </pre>
+
+  Note, in this example we introduced the following:
+  <dl>
+    <dt><em>MyInstrumentation</em></dt>
+    <dd>This is usually an abstract class (or interface) to define an
+      instrumentation interface (incrCounter0 etc.) that allows different
+      implementations. This could be a mechanism to allow different metrics
+      systems to be used at runtime via configuration.
+    </dd>
+    <dt><em>Mutable[Gauge*|Counter*|Rate]</em></dt>
+    <dd>These are library classes to manage mutable metrics for
+      implementations of metrics sources. They produce immutable gauge and
+      counters (Metric[Gauge*|Counter*]) for downstream consumption (sinks)
+      upon <code>snapshot</code>. The <code>MutableRate</code>
+      in particular, provides a way to measure latency and throughput of an
+      operation. In this particular case, it produces a long counter
+      "Rate0NumOps" and double gauge "Rate0AvgTime" when snapshotted.
+    </dd>
+  </dl>
+
+  <h2><a name="migration">Migration from previous system</a></h2>
+  <p>Users of the previous metrics system would notice the lack of
+    <code>context</code> prefix in the configuration examples. The new
+    metrics system decouples the concept for context (for grouping) with the
+    implementation where a particular context object does the updating and
+    publishing of metrics, which causes problems when you want to have a
+    single context to be consumed by multiple backends. You would also have to
+    configure an implementation instance per context, even if you have a
+    backend that can handle multiple contexts (file, gangalia etc.):
+  </p>
+  <table width="99%" border="1" cellspacing="0" cellpadding="4">
+    <tbody>
+      <tr>
+        <th width="40%">Before</th><th>After</th>
+      </tr>
+      <tr>
+        <td><pre>
+  context1.class=org.hadoop.metrics.file.FileContext
+  context2.class=org.hadoop.metrics.file.FileContext
+  ...
+  contextn.class=org.hadoop.metrics.file.FileContext</pre>
+        </td>
+        <td><pre>
+  myprefix.sink.file.class=org.hadoop.metrics2.sink.FileSink</pre>
+        </td>
+      </tr>
+    </tbody>
+  </table>
+  <p>In the new metrics system, you can simulate the previous behavior by
+    using the context option in the sink options like the following:
+  </p>
+  <table width="99%" border="1" cellspacing="0" cellpadding="4">
+    <tbody>
+      <tr>
+        <th width="40%">Before</th><th>After</th>
+      </tr>
+      <tr>
+        <td><pre>
+  context0.class=org.hadoop.metrics.file.FileContext
+  context0.fileName=context0.out
+  context1.class=org.hadoop.metrics.file.FileContext
+  context1.fileName=context1.out
+  ...
+  contextn.class=org.hadoop.metrics.file.FileContext
+  contextn.fileName=contextn.out</pre>
+        </td>
+        <td><pre>
+  myprefix.sink.*.class=org.apache.hadoop.metrics2.sink.FileSink
+  myprefix.sink.file0.context=context0
+  myprefix.sink.file0.filename=context1.out
+  myprefix.sink.file1.context=context1
+  myprefix.sink.file1.filename=context1.out
+  ...
+  myprefix.sink.filen.context=contextn
+  myprefix.sink.filen.filename=contextn.out</pre>
+        </td>
+      </tr>
+    </tbody>
+  </table>
+  <p>to send metrics of a particular context to a particular backend. Note,
+    <code>myprefix</code> is an arbitrary prefix for configuration groupings,
+    typically they are the name of a particular process
+    (<code>namenode</code>, <code>jobtracker</code>, etc.)
+  </p>
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+package org.apache.hadoop.metrics2;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

Added: hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/sink/FileSink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/sink/FileSink.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/sink/FileSink.java (added)
+++ hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/sink/FileSink.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.sink;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.PrintWriter;
+
+import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsException;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsSink;
+import org.apache.hadoop.metrics2.MetricsTag;
+
+/**
+ * A metrics sink that writes to a file
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class FileSink implements MetricsSink {
+
+  private static final String FILENAME_KEY = "filename";
+  private PrintWriter writer;
+
+  @Override
+  public void init(SubsetConfiguration conf) {
+    String filename = conf.getString(FILENAME_KEY);
+    try {
+      writer = filename == null
+          ? new PrintWriter(new BufferedOutputStream(System.out))
+          : new PrintWriter(new FileWriter(new File(filename), true));
+    }
+    catch (Exception e) {
+      throw new MetricsException("Error creating "+ filename, e);
+    }
+  }
+
+  @Override
+  public void putMetrics(MetricsRecord record) {
+    writer.print(record.timestamp());
+    writer.print(" ");
+    writer.print(record.context());
+    writer.print(".");
+    writer.print(record.name());
+    String separator = ": ";
+    for (MetricsTag tag : record.tags()) {
+      writer.print(separator);
+      separator = ", ";
+      writer.print(tag.name());
+      writer.print("=");
+      writer.print(tag.value());
+    }
+    for (AbstractMetric metric : record.metrics()) {
+      writer.print(separator);
+      separator = ", ";
+      writer.print(metric.name());
+      writer.print("=");
+      writer.print(metric.value());
+    }
+    writer.println();
+  }
+
+  @Override
+  public void flush() {
+    writer.flush();
+  }
+}

Added: hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/sink/package-info.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/sink/package-info.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/sink/package-info.java (added)
+++ hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/sink/package-info.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Builtin metrics sinks
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+package org.apache.hadoop.metrics2.sink;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

Added: hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/source/JvmMetrics.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/source/JvmMetrics.java (added)
+++ hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/source/JvmMetrics.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.source;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * JVM related metrics. Mostly used by various servers as part of the metrics
+ * they export.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class JvmMetrics {
+  // placeholder for javadoc to prevent broken links, until
+  // HADOOP-6920
+}

Added: hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/Contracts.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/Contracts.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/Contracts.java (added)
+++ hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/Contracts.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Additional helpers (besides guava Preconditions) for programming by contract
+ */
+@InterfaceAudience.Private
+public class Contracts {
+
+  private Contracts() {}
+
+  /**
+   * Check an argument for false conditions
+   * @param <T> type of the argument
+   * @param arg the argument to check
+   * @param expression  the boolean expression for the condition
+   * @param msg the error message if {@code expression} is false
+   * @return the argument for convenience
+   */
+  public static <T> T checkArg(T arg, boolean expression, Object msg) {
+    if (!expression) {
+      throw new IllegalArgumentException(String.valueOf(msg) +": "+ arg);
+    }
+    return arg;
+  }
+
+  /**
+   * Check an argument for false conditions
+   * @param arg the argument to check
+   * @param expression  the boolean expression for the condition
+   * @param msg the error message if {@code expression} is false
+   * @return the argument for convenience
+   */
+  public static int checkArg(int arg, boolean expression, Object msg) {
+    if (!expression) {
+      throw new IllegalArgumentException(String.valueOf(msg) +": "+ arg);
+    }
+    return arg;
+  }
+
+  /**
+   * Check an argument for false conditions
+   * @param arg the argument to check
+   * @param expression  the boolean expression for the condition
+   * @param msg the error message if {@code expression} is false
+   * @return the argument for convenience
+   */
+  public static long checkArg(long arg, boolean expression, Object msg) {
+    if (!expression) {
+      throw new IllegalArgumentException(String.valueOf(msg) +": "+ arg);
+    }
+    return arg;
+  }
+
+  /**
+   * Check an argument for false conditions
+   * @param arg the argument to check
+   * @param expression  the boolean expression for the condition
+   * @param msg the error message if {@code expression} is false
+   * @return the argument for convenience
+   */
+  public static float checkArg(float arg, boolean expression, Object msg) {
+    if (!expression) {
+      throw new IllegalArgumentException(String.valueOf(msg) +": "+ arg);
+    }
+    return arg;
+  }
+
+  /**
+   * Check an argument for false conditions
+   * @param arg the argument to check
+   * @param expression  the boolean expression for the condition
+   * @param msg the error message if {@code expression} is false
+   * @return the argument for convenience
+   */
+  public static double checkArg(double arg, boolean expression, Object msg) {
+    if (!expression) {
+      throw new IllegalArgumentException(String.valueOf(msg) +": "+ arg);
+    }
+    return arg;
+  }
+
+}

Added: hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/MBeans.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/MBeans.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/MBeans.java (added)
+++ hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/MBeans.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.util;
+
+import java.lang.management.ManagementFactory;
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+
+/**
+ * This util class provides a method to register an MBean using
+ * our standard naming convention as described in the doc
+ *  for {link {@link #register(String, String, Object)}
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class MBeans {
+
+  private static final Log LOG = LogFactory.getLog(MBeans.class);
+
+  /**
+   * Register the MBean using our standard MBeanName format
+   * "hadoop:service=<serviceName>,name=<nameName>"
+   * Where the <serviceName> and <nameName> are the supplied parameters
+   *
+   * @param serviceName
+   * @param nameName
+   * @param theMbean - the MBean to register
+   * @return the named used to register the MBean
+   */
+  static public ObjectName register(String serviceName, String nameName,
+                                    Object theMbean) {
+    final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+    ObjectName name = getMBeanName(serviceName, nameName);
+    try {
+      mbs.registerMBean(theMbean, name);
+      LOG.debug("Registered "+ name);
+      return name;
+    } catch (Exception e) {
+      LOG.warn("Error registering "+ name, e);
+    }
+    return null;
+  }
+
+  static public void unregister(ObjectName mbeanName) {
+    LOG.debug("Unregistering "+ mbeanName);
+    final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+    if (mbeanName == null) {
+      LOG.debug("Stacktrace: ", new Throwable());
+      return;
+    }
+    try {
+      mbs.unregisterMBean(mbeanName);
+    } catch (Exception e) {
+      LOG.warn("Error unregistering "+ mbeanName, e);
+    }
+  }
+
+  static private ObjectName getMBeanName(String serviceName, String nameName) {
+    ObjectName name = null;
+    String nameStr = "Hadoop:service="+ serviceName +",name="+ nameName;
+    try {
+      name = DefaultMetricsSystem.newMBeanName(nameStr);
+    } catch (Exception e) {
+      LOG.warn("Error creating MBean object name: "+ nameStr, e);
+    }
+    return name;
+  }
+}

Added: hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/MetricsCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/MetricsCache.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/MetricsCache.java (added)
+++ hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/MetricsCache.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.util;
+
+import java.util.Collection;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Set;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.Maps;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsTag;
+
+/**
+ * A metrics cache for sinks that don't support sparse updates.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class MetricsCache {
+
+  static final Log LOG = LogFactory.getLog(MetricsCache.class);
+  static final int MAX_RECS_PER_NAME_DEFAULT = 1000;
+
+  private final Map<String, RecordCache> map = Maps.newHashMap();
+  private final int maxRecsPerName;
+
+  class RecordCache
+      extends LinkedHashMap<Collection<MetricsTag>, Record> {
+    private static final long serialVersionUID = 1L;
+    private boolean gotOverflow = false;
+
+    @Override
+    protected boolean removeEldestEntry(Map.Entry<Collection<MetricsTag>,
+                                                  Record> eldest) {
+      boolean overflow = size() > maxRecsPerName;
+      if (overflow && !gotOverflow) {
+        LOG.warn("Metrics cache overflow at "+ size() +" for "+ eldest);
+        gotOverflow = true;
+      }
+      return overflow;
+    }
+  }
+
+  /**
+   * Cached record
+   */
+  public static class Record {
+    final Map<String, String> tags = Maps.newHashMap();
+    final Map<String, Number> metrics = Maps.newHashMap();
+
+    /**
+     * Lookup a tag value
+     * @param key name of the tag
+     * @return the tag value
+     */
+    public String getTag(String key) {
+      return tags.get(key);
+    }
+
+    /**
+     * Lookup a metric value
+     * @param key name of the metric
+     * @return the metric value
+     */
+    public Number getMetric(String key) {
+      return metrics.get(key);
+    }
+
+    /**
+     * @return the entry set of the tags of the record
+     */
+    public Set<Map.Entry<String, String>> tags() {
+      return tags.entrySet();
+    }
+
+    /**
+     * @return entry set of the metrics of the record
+     */
+    public Set<Map.Entry<String, Number>> metrics() {
+      return metrics.entrySet();
+    }
+
+    @Override public String toString() {
+      return Objects.toStringHelper(this)
+          .add("tags", tags).add("metrics", metrics)
+          .toString();
+    }
+  }
+
+  public MetricsCache() {
+    this(MAX_RECS_PER_NAME_DEFAULT);
+  }
+
+  /**
+   * Construct a metrics cache
+   * @param maxRecsPerName  limit of the number records per record name
+   */
+  public MetricsCache(int maxRecsPerName) {
+    this.maxRecsPerName = maxRecsPerName;
+  }
+
+  /**
+   * Update the cache and return the current cached record
+   * @param mr the update record
+   * @param includingTags cache tag values (for later lookup by name) if true
+   * @return the updated cache record
+   */
+  public Record update(MetricsRecord mr, boolean includingTags) {
+    String name = mr.name();
+    RecordCache recordCache = map.get(name);
+    if (recordCache == null) {
+      recordCache = new RecordCache();
+      map.put(name, recordCache);
+    }
+    Collection<MetricsTag> tags = mr.tags();
+    Record record = recordCache.get(tags);
+    if (record == null) {
+      record = new Record();
+      recordCache.put(tags, record);
+    }
+    for (AbstractMetric m : mr.metrics()) {
+      record.metrics.put(m.name(), m.value());
+    }
+    if (includingTags) {
+      // mostly for some sinks that include tags as part of a dense schema
+      for (MetricsTag t : mr.tags()) {
+        record.tags.put(t.name(), t.value());
+      }
+    }
+    return record;
+  }
+
+  /**
+   * Update the cache and return the current cache record
+   * @param mr the update record
+   * @return the updated cache record
+   */
+  public Record update(MetricsRecord mr) {
+    return update(mr, false);
+  }
+
+  /**
+   * Get the cached record
+   * @param name of the record
+   * @param tags of the record
+   * @return the cached record or null
+   */
+  public Record get(String name, Collection<MetricsTag> tags) {
+    RecordCache rc = map.get(name);
+    if (rc == null) return null;
+    return rc.get(tags);
+  }
+}

Added: hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/SampleStat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/SampleStat.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/SampleStat.java (added)
+++ hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/SampleStat.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,170 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Helper to compute running sample stats
+ */
+@InterfaceAudience.Private
+public class SampleStat {
+
+  private final MinMax minmax = new MinMax();
+  private long numSamples = 0;
+  private double a0, a1, s0, s1;
+
+  /**
+   * Construct a new running sample stat
+   */
+  public SampleStat() {
+    a0 = s0 = 0.0;
+  }
+
+  public void reset() {
+    numSamples = 0;
+    a0 = s0 = 0.0;
+    minmax.reset();
+  }
+
+  // We want to reuse the object, sometimes.
+  void reset(long numSamples, double a0, double a1, double s0, double s1,
+             MinMax minmax) {
+    this.numSamples = numSamples;
+    this.a0 = a0;
+    this.a1 = a1;
+    this.s0 = s0;
+    this.s1 = s1;
+    this.minmax.reset(minmax);
+  }
+
+  /**
+   * Copy the values to other (saves object creation and gc.)
+   * @param other the destination to hold our values
+   */
+  public void copyTo(SampleStat other) {
+    other.reset(numSamples, a0, a1, s0, s1, minmax);
+  }
+
+  /**
+   * Add a sample the running stat.
+   * @param x the sample number
+   * @return  self
+   */
+  public SampleStat add(double x) {
+    minmax.add(x);
+    return add(1, x);
+  }
+
+  /**
+   * Add some sample and a partial sum to the running stat.
+   * Note, min/max is not evaluated using this method.
+   * @param nSamples  number of samples
+   * @param x the partial sum
+   * @return  self
+   */
+  public SampleStat add(long nSamples, double x) {
+    numSamples += nSamples;
+
+    if (numSamples == 1) {
+      a0 = a1 = x;
+      s0 = 0.0;
+    }
+    else {
+      // The Welford method for numerical stability
+      a1 = a0 + (x - a0) / numSamples;
+      s1 = s0 + (x - a0) * (x - a1);
+      a0 = a1;
+      s0 = s1;
+    }
+    return this;
+  }
+
+  /**
+   * @return  the total number of samples
+   */
+  public long numSamples() {
+    return numSamples;
+  }
+
+  /**
+   * @return  the arithmetic mean of the samples
+   */
+  public double mean() {
+    return numSamples > 0 ? a1 : 0.0;
+  }
+
+  /**
+   * @return  the variance of the samples
+   */
+  public double variance() {
+    return numSamples > 1 ? s1 / (numSamples - 1) : 0.0;
+  }
+
+  /**
+   * @return  the standard deviation of the samples
+   */
+  public double stddev() {
+    return Math.sqrt(variance());
+  }
+
+  /**
+   * @return  the minimum value of the samples
+   */
+  public double min() {
+    return minmax.min();
+  }
+
+  /**
+   * @return  the maximum value of the samples
+   */
+  public double max() {
+    return minmax.max();
+  }
+
+  /**
+   * Helper to keep running min/max
+   */
+  @SuppressWarnings("PublicInnerClass")
+  public static class MinMax {
+
+    private double min = Double.MAX_VALUE;
+    private double max = Double.MIN_VALUE;
+
+    public void add(double value) {
+      if (value > max) max = value;
+      if (value < min) min = value;
+    }
+
+    public double min() { return min; }
+    public double max() { return max; }
+
+    public void reset() {
+      min = Double.MAX_VALUE;
+      max = Double.MIN_VALUE;
+    }
+
+    public void reset(MinMax other) {
+      min = other.min();
+      max = other.max();
+    }
+
+  }
+
+}

Added: hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/Servers.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/Servers.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/Servers.java (added)
+++ hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/Servers.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,74 @@
+/*
+ * Util.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.hadoop.metrics2.util;
+
+import java.net.InetSocketAddress;
+import java.util.List;
+
+import com.google.common.collect.Lists;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Helpers to handle server addresses
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class Servers {
+
+  /**
+   * This class is not intended to be instantiated
+   */
+  private Servers() {}
+
+  /**
+   * Parses a space and/or comma separated sequence of server specifications
+   * of the form <i>hostname</i> or <i>hostname:port</i>.  If
+   * the specs string is null, defaults to localhost:defaultPort.
+   *
+   * @param specs   server specs (see description)
+   * @param defaultPort the default port if not specified
+   * @return a list of InetSocketAddress objects.
+   */
+  public static List<InetSocketAddress> parse(String specs, int defaultPort) {
+    List<InetSocketAddress> result = Lists.newArrayList();
+    if (specs == null) {
+      result.add(new InetSocketAddress("localhost", defaultPort));
+    }
+    else {
+      String[] specStrings = specs.split("[ ,]+");
+      for (String specString : specStrings) {
+        int colon = specString.indexOf(':');
+        if (colon < 0 || colon == specString.length() - 1) {
+          result.add(new InetSocketAddress(specString, defaultPort));
+        } else {
+          String hostname = specString.substring(0, colon);
+          int port = Integer.parseInt(specString.substring(colon+1));
+          result.add(new InetSocketAddress(hostname, port));
+        }
+      }
+    }
+    return result;
+  }
+
+}

Added: hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/package-info.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/package-info.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/package-info.java (added)
+++ hadoop/common/branches/yahoo-merge/src/java/org/apache/hadoop/metrics2/util/package-info.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * General helpers for implementing source and sinks
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+package org.apache.hadoop.metrics2.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

Added: hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/filter/TestPatternFilter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/filter/TestPatternFilter.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/filter/TestPatternFilter.java (added)
+++ hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/filter/TestPatternFilter.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.filter;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.configuration.SubsetConfiguration;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.apache.hadoop.metrics2.impl.ConfigBuilder;
+import static org.apache.hadoop.metrics2.lib.Interns.*;
+
+public class TestPatternFilter {
+
+  /**
+   * Filters should default to accept
+   */
+  @Test public void emptyConfigShouldAccept() {
+    SubsetConfiguration empty = new ConfigBuilder().subset("");
+    shouldAccept(empty, "anything");
+    shouldAccept(empty, Arrays.asList(tag("key", "desc", "value")));
+  }
+
+  /**
+   * Filters should handle white-listing correctly
+   */
+  @Test public void includeOnlyShouldOnlyIncludeMatched() {
+    SubsetConfiguration wl = new ConfigBuilder()
+        .add("p.include", "foo")
+        .add("p.include.tags", "foo:f").subset("p");
+    shouldAccept(wl, "foo");
+    shouldAccept(wl, Arrays.asList(tag("bar", "", ""),
+                                   tag("foo", "", "f")));
+    shouldReject(wl, "bar");
+    shouldReject(wl, Arrays.asList(tag("bar", "", "")));
+    shouldReject(wl, Arrays.asList(tag("foo", "", "boo")));
+  }
+
+  /**
+   * Filters should handle black-listing correctly
+   */
+  @Test public void excludeOnlyShouldOnlyExcludeMatched() {
+    SubsetConfiguration bl = new ConfigBuilder()
+        .add("p.exclude", "foo")
+        .add("p.exclude.tags", "foo:f").subset("p");
+    shouldAccept(bl, "bar");
+    shouldAccept(bl, Arrays.asList(tag("bar", "", "")));
+    shouldReject(bl, "foo");
+    shouldReject(bl, Arrays.asList(tag("bar", "", ""),
+                                   tag("foo", "", "f")));
+  }
+
+  /**
+   * Filters should accepts unmatched item when both include and
+   * exclude patterns are present.
+   */
+  @Test public void shouldAcceptUnmatchedWhenBothAreConfigured() {
+    SubsetConfiguration c = new ConfigBuilder()
+        .add("p.include", "foo")
+        .add("p.include.tags", "foo:f")
+        .add("p.exclude", "bar")
+        .add("p.exclude.tags", "bar:b").subset("p");
+    shouldAccept(c, "foo");
+    shouldAccept(c, Arrays.asList(tag("foo", "", "f")));
+    shouldReject(c, "bar");
+    shouldReject(c, Arrays.asList(tag("bar", "", "b")));
+    shouldAccept(c, "foobar");
+    shouldAccept(c, Arrays.asList(tag("foobar", "", "")));
+  }
+
+  /**
+   * Include patterns should take precedence over exclude patterns
+   */
+  @Test public void includeShouldOverrideExclude() {
+    SubsetConfiguration c = new ConfigBuilder()
+        .add("p.include", "foo")
+        .add("p.include.tags", "foo:f")
+        .add("p.exclude", "foo")
+        .add("p.exclude.tags", "foo:f").subset("p");
+    shouldAccept(c, "foo");
+    shouldAccept(c, Arrays.asList(tag("foo", "", "f")));
+  }
+
+  static void shouldAccept(SubsetConfiguration conf, String s) {
+    assertTrue("accepts "+ s, newGlobFilter(conf).accepts(s));
+    assertTrue("accepts "+ s, newRegexFilter(conf).accepts(s));
+  }
+
+  static void shouldAccept(SubsetConfiguration conf, List<MetricsTag> tags) {
+    assertTrue("accepts "+ tags, newGlobFilter(conf).accepts(tags));
+    assertTrue("accepts "+ tags, newRegexFilter(conf).accepts(tags));
+  }
+
+  static void shouldReject(SubsetConfiguration conf, String s) {
+    assertTrue("rejects "+ s, !newGlobFilter(conf).accepts(s));
+    assertTrue("rejects "+ s, !newRegexFilter(conf).accepts(s));
+  }
+
+  static void shouldReject(SubsetConfiguration conf, List<MetricsTag> tags) {
+    assertTrue("rejects "+ tags, !newGlobFilter(conf).accepts(tags));
+    assertTrue("rejects "+ tags, !newRegexFilter(conf).accepts(tags));
+  }
+
+  /**
+   * Create a new glob filter with a config object
+   * @param conf  the config object
+   * @return the filter
+   */
+  public static GlobFilter newGlobFilter(SubsetConfiguration conf) {
+    GlobFilter f = new GlobFilter();
+    f.init(conf);
+    return f;
+  }
+
+  /**
+   * Create a new regex filter with a config object
+   * @param conf  the config object
+   * @return the filter
+   */
+  public static RegexFilter newRegexFilter(SubsetConfiguration conf) {
+    RegexFilter f = new RegexFilter();
+    f.init(conf);
+    return f;
+  }
+}

Added: hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/ConfigBuilder.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/ConfigBuilder.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/ConfigBuilder.java (added)
+++ hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/ConfigBuilder.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+import org.apache.commons.configuration.PropertiesConfiguration;
+import org.apache.commons.configuration.SubsetConfiguration;
+
+/**
+ * Helper class for building configs, mostly used in tests
+ */
+public class ConfigBuilder {
+
+  /** The built config */
+  public final PropertiesConfiguration config;
+
+  /**
+   * Default constructor
+   */
+  public ConfigBuilder() {
+    config = new PropertiesConfiguration();
+  }
+
+  /**
+   * Add a property to the config
+   * @param key of the property
+   * @param value of the property
+   * @return self
+   */
+  public ConfigBuilder add(String key, Object value) {
+    config.addProperty(key, value);
+    return this;
+  }
+
+  /**
+   * Save the config to a file
+   * @param filename  to save
+   * @return self
+   * @throws RuntimeException
+   */
+  public ConfigBuilder save(String filename) {
+    try {
+      config.save(filename);
+    }
+    catch (Exception e) {
+      throw new RuntimeException("Error saving config", e);
+    }
+    return this;
+  }
+
+  /**
+   * Return a subset configuration (so getParent() can be used.)
+   * @param prefix  of the subset
+   * @return the subset config
+   */
+  public SubsetConfiguration subset(String prefix) {
+    return new SubsetConfiguration(config, prefix, ".");
+  }
+}
+

Added: hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/ConfigUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/ConfigUtil.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/ConfigUtil.java (added)
+++ hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/ConfigUtil.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+import java.io.PrintStream;
+import java.util.Iterator;
+
+import static org.junit.Assert.*;
+import org.apache.commons.configuration.Configuration;
+import org.apache.commons.configuration.PropertiesConfiguration;
+
+/**
+ * Helpers for config tests and debugging
+ */
+class ConfigUtil {
+
+  static void dump(Configuration c) {
+    dump(null, c, System.out);
+  }
+
+  static void dump(String header, Configuration c) {
+    dump(header, c, System.out);
+  }
+
+  static void dump(String header, Configuration c, PrintStream out) {
+    PropertiesConfiguration p = new PropertiesConfiguration();
+    p.copy(c);
+    if (header != null) {
+      out.println(header);
+    }
+    try { p.save(out); }
+    catch (Exception e) {
+      throw new RuntimeException("Error saving config", e);
+    }
+  }
+
+  static void assertEq(Configuration expected, Configuration actual) {
+    // Check that the actual config contains all the properties of the expected
+    for (Iterator<?> it = expected.getKeys(); it.hasNext();) {
+      String key = (String) it.next();
+      assertTrue("actual should contain "+ key, actual.containsKey(key));
+      assertEquals("value of "+ key, expected.getProperty(key),
+                                     actual.getProperty(key));
+    }
+    // Check that the actual config has no extra properties
+    for (Iterator<?> it = actual.getKeys(); it.hasNext();) {
+      String key = (String) it.next();
+      assertTrue("expected should contain "+ key, expected.containsKey(key));
+    }
+  }
+
+}

Added: hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/MetricsLists.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/MetricsLists.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/MetricsLists.java (added)
+++ hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/MetricsLists.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+/**
+ * Helper to create metrics list for testing
+ */
+class MetricsLists {
+
+  static MetricsRecordBuilderImpl builder(String name) {
+    return new MetricsCollectorImpl().addRecord(name);
+  }
+
+}

Added: hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsCollectorImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsCollectorImpl.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsCollectorImpl.java (added)
+++ hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsCollectorImpl.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import org.apache.commons.configuration.SubsetConfiguration;
+import static org.apache.hadoop.metrics2.filter.TestPatternFilter.*;
+import static org.apache.hadoop.metrics2.lib.Interns.*;
+
+public class TestMetricsCollectorImpl {
+
+  @Test public void recordBuilderShouldNoOpIfFiltered() {
+    SubsetConfiguration fc = new ConfigBuilder()
+        .add("p.exclude", "foo").subset("p");
+    MetricsCollectorImpl mb = new MetricsCollectorImpl();
+    mb.setRecordFilter(newGlobFilter(fc));
+    MetricsRecordBuilderImpl rb = mb.addRecord("foo");
+    rb.tag(info("foo", ""), "value").addGauge(info("g0", ""), 1);
+    assertEquals("no tags", 0, rb.tags().size());
+    assertEquals("no metrics", 0, rb.metrics().size());
+    assertNull("null record", rb.getRecord());
+    assertEquals("no records", 0, mb.getRecords().size());
+  }
+
+  @Test public void testPerMetricFiltering() {
+    SubsetConfiguration fc = new ConfigBuilder()
+        .add("p.exclude", "foo").subset("p");
+    MetricsCollectorImpl mb = new MetricsCollectorImpl();
+    mb.setMetricFilter(newGlobFilter(fc));
+    MetricsRecordBuilderImpl rb = mb.addRecord("foo");
+    rb.tag(info("foo", ""), "").addCounter(info("c0", ""), 0)
+      .addGauge(info("foo", ""), 1);
+    assertEquals("1 tag", 1, rb.tags().size());
+    assertEquals("1 metric", 1, rb.metrics().size());
+    assertEquals("expect foo tag", "foo", rb.tags().get(0).name());
+    assertEquals("expect c0", "c0", rb.metrics().get(0).name());
+  }
+}

Added: hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java (added)
+++ hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,152 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+import java.util.Map;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import org.apache.commons.configuration.Configuration;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.Log;
+import static org.apache.hadoop.metrics2.impl.ConfigUtil.*;
+
+/**
+ * Test metrics configuration
+ */
+public class TestMetricsConfig {
+
+  static final Log LOG = LogFactory.getLog(TestMetricsConfig.class);
+
+  /**
+   * Common use cases
+   * @throws Exception
+   */
+  @Test public void testCommon() throws Exception {
+    String filename = getTestFilename("test-metrics2");
+    new ConfigBuilder()
+        .add("*.foo", "default foo")
+        .add("p1.*.bar", "p1 default bar")
+        .add("p1.t1.*.bar", "p1.t1 default bar")
+        .add("p1.t1.i1.name", "p1.t1.i1.name")
+        .add("p1.t1.42.bar", "p1.t1.42.bar")
+        .add("p1.t2.i1.foo", "p1.t2.i1.foo")
+        .add("p2.*.foo", "p2 default foo")
+        .save(filename);
+
+    MetricsConfig mc = MetricsConfig.create("p1", filename);
+    LOG.debug("mc:"+ mc);
+
+    Configuration expected = new ConfigBuilder()
+        .add("*.bar", "p1 default bar")
+        .add("t1.*.bar", "p1.t1 default bar")
+        .add("t1.i1.name", "p1.t1.i1.name")
+        .add("t1.42.bar", "p1.t1.42.bar")
+        .add("t2.i1.foo", "p1.t2.i1.foo")
+        .config;
+
+    assertEq(expected, mc);
+
+    testInstances(mc);
+  }
+
+  private void testInstances(MetricsConfig c) throws Exception {
+    Map<String, MetricsConfig> map = c.getInstanceConfigs("t1");
+    Map<String, MetricsConfig> map2 = c.getInstanceConfigs("t2");
+
+    assertEquals("number of t1 instances", 2, map.size());
+    assertEquals("number of t2 instances", 1, map2.size());
+    assertTrue("contains t1 instance i1", map.containsKey("i1"));
+    assertTrue("contains t1 instance 42", map.containsKey("42"));
+    assertTrue("contains t2 instance i1", map2.containsKey("i1"));
+
+    MetricsConfig t1i1 = map.get("i1");
+    MetricsConfig t1i42 = map.get("42");
+    MetricsConfig t2i1 = map2.get("i1");
+    LOG.debug("--- t1 instance i1:"+ t1i1);
+    LOG.debug("--- t1 instance 42:"+ t1i42);
+    LOG.debug("--- t2 instance i1:"+ t2i1);
+
+    Configuration t1expected1 = new ConfigBuilder()
+        .add("name", "p1.t1.i1.name").config;
+    Configuration t1expected42 = new ConfigBuilder()
+         .add("bar", "p1.t1.42.bar").config;
+    Configuration t2expected1 = new ConfigBuilder()
+        .add("foo", "p1.t2.i1.foo").config;
+
+    assertEq(t1expected1, t1i1);
+    assertEq(t1expected42, t1i42);
+    assertEq(t2expected1, t2i1);
+
+    LOG.debug("asserting foo == default foo");
+    // Check default lookups
+    assertEquals("value of foo in t1 instance i1", "default foo",
+                 t1i1.getString("foo"));
+    assertEquals("value of bar in t1 instance i1", "p1.t1 default bar",
+                 t1i1.getString("bar"));
+    assertEquals("value of foo in t1 instance 42", "default foo",
+                 t1i42.getString("foo"));
+    assertEquals("value of foo in t2 instance i1", "p1.t2.i1.foo",
+                 t2i1.getString("foo"));
+    assertEquals("value of bar in t2 instance i1", "p1 default bar",
+                 t2i1.getString("bar"));
+  }
+
+  /**
+   * Should throw if missing config files
+   */
+  @Test public void testMissingFiles() {
+    try {
+      MetricsConfig.create("JobTracker");
+    }
+    catch (MetricsConfigException e) {
+      assertTrue("expected the 'cannot locate configuration' exception",
+                 e.getMessage().startsWith("Cannot locate configuration"));
+      return;
+    }
+    fail("should've thrown");
+  }
+
+  /**
+   * Test the config file load order
+   * @throws Exception
+   */
+  @Test public void testLoadFirst() throws Exception {
+    String filename = getTestFilename("hadoop-metrics2-p1");
+    new ConfigBuilder().add("p1.foo", "p1foo").save(filename);
+
+    MetricsConfig mc = MetricsConfig.create("p1");
+    MetricsConfig mc2 = MetricsConfig.create("p1", "na1", "na2", filename);
+    Configuration expected = new ConfigBuilder().add("foo", "p1foo").config;
+
+    assertEq(expected, mc);
+    assertEq(expected, mc2);
+  }
+
+  /**
+   * Return a test filename in the class path
+   * @param basename
+   * @return the filename
+   */
+  public static String getTestFilename(String basename) {
+    return "build/classes/"+ basename +".properties";
+  }
+
+}

Added: hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java (added)
+++ hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,140 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+import java.util.List;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import org.mockito.ArgumentCaptor;
+import org.mockito.Captor;
+import org.mockito.runners.MockitoJUnitRunner;
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
+
+import com.google.common.collect.Iterables;
+
+import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import static org.apache.hadoop.test.MoreAsserts.*;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsSink;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.apache.hadoop.metrics2.annotation.*;
+import static org.apache.hadoop.metrics2.lib.Interns.*;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
+
+/**
+ * Test the MetricsSystemImpl class
+ */
+@RunWith(MockitoJUnitRunner.class)
+public class TestMetricsSystemImpl {
+  private static final Log LOG = LogFactory.getLog(TestMetricsSystemImpl.class);
+  @Captor private ArgumentCaptor<MetricsRecord> r1;
+  @Captor private ArgumentCaptor<MetricsRecord> r2;
+  private static String hostname = MetricsSystemImpl.getHostname();
+
+  public static class TestSink implements MetricsSink {
+
+    @Override public void putMetrics(MetricsRecord record) {
+      LOG.debug(record);
+    }
+
+    @Override public void flush() {}
+
+    @Override public void init(SubsetConfiguration conf) {
+      LOG.debug(MetricsConfig.toString(conf));
+    }
+  }
+
+  @Test public void testInitFirst() throws Exception {
+    ConfigBuilder cb = new ConfigBuilder().add("*.period", 8)
+        //.add("test.sink.plugin.urls", getPluginUrlsAsString())
+        .add("test.sink.test.class", TestSink.class.getName())
+        .add("test.*.source.filter.exclude", "s0")
+        .add("test.source.s1.metric.filter.exclude", "X*")
+        .add("test.sink.sink1.metric.filter.exclude", "Y*")
+        .add("test.sink.sink2.metric.filter.exclude", "Y*")
+        .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
+    MetricsSystemImpl ms = new MetricsSystemImpl("Test");
+    ms.start();
+    ms.register("s0", "s0 desc", new TestSource("s0rec"));
+    TestSource s1 = ms.register("s1", "s1 desc", new TestSource("s1rec"));
+    s1.c1.incr();
+    s1.xxx.incr();
+    s1.g1.set(2);
+    s1.yyy.incr(2);
+    s1.s1.add(0);
+    MetricsSink sink1 = mock(MetricsSink.class);
+    MetricsSink sink2 = mock(MetricsSink.class);
+    ms.registerSink("sink1", "sink1 desc", sink1);
+    ms.registerSink("sink2", "sink2 desc", sink2);
+    ms.onTimerEvent();  // trigger something interesting
+    ms.stop();
+
+    verify(sink1, times(2)).putMetrics(r1.capture());
+    List<MetricsRecord> mr1 = r1.getAllValues();
+    verify(sink2, times(2)).putMetrics(r2.capture());
+    List<MetricsRecord> mr2 = r2.getAllValues();
+    checkMetricsRecords(mr1);
+    assertEquals("output", mr1, mr2);
+  }
+
+  private void checkMetricsRecords(List<MetricsRecord> recs) {
+    LOG.debug(recs);
+    MetricsRecord r = recs.get(0);
+    assertEquals("name", "s1rec", r.name());
+    assertEquals("tags", new MetricsTag[] {
+      tag(MsInfo.Context, "test"),
+      tag(MsInfo.Hostname, hostname)}, r.tags());
+    assertEquals("metrics", MetricsLists.builder("")
+      .addCounter(info("C1", "C1 desc"), 1L)
+      .addGauge(info("G1", "G1 desc"), 2L)
+      .addCounter(info("S1NumOps", "Number of ops for s1"), 1L)
+      .addGauge(info("S1AvgTime", "Average time for s1"), 0.0)
+      .metrics(), r.metrics());
+
+    r = recs.get(1);
+    assertTrue("NumActiveSinks should be 3", Iterables.contains(r.metrics(),
+               new MetricGaugeInt(MsInfo.NumActiveSinks, 3)));
+  }
+
+  @Metrics(context="test")
+  private static class TestSource {
+    @Metric("C1 desc") MutableCounterLong c1;
+    @Metric("XXX desc") MutableCounterLong xxx;
+    @Metric("G1 desc") MutableGaugeLong g1;
+    @Metric("YYY desc") MutableGaugeLong yyy;
+    @Metric MutableRate s1;
+    final MetricsRegistry registry;
+
+    TestSource(String recName) {
+      registry = new MetricsRegistry(recName);
+    }
+  }
+
+  private static String getPluginUrlsAsString() {
+    return "file:metrics2-test-plugin.jar";
+  }
+}

Added: hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsVisitor.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsVisitor.java (added)
+++ hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsVisitor.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+import java.util.List;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+import org.junit.runner.RunWith;
+
+import static org.mockito.Mockito.*;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Captor;
+import org.mockito.runners.MockitoJUnitRunner;
+
+import org.apache.hadoop.metrics2.MetricsVisitor;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import static org.apache.hadoop.metrics2.lib.Interns.*;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+
+/**
+ * Test the metric visitor interface
+ */
+@RunWith(MockitoJUnitRunner.class)
+public class TestMetricsVisitor {
+
+  @Captor private ArgumentCaptor<MetricsInfo> c1;
+  @Captor private ArgumentCaptor<MetricsInfo> c2;
+  @Captor private ArgumentCaptor<MetricsInfo> g1;
+  @Captor private ArgumentCaptor<MetricsInfo> g2;
+  @Captor private ArgumentCaptor<MetricsInfo> g3;
+  @Captor private ArgumentCaptor<MetricsInfo> g4;
+
+  /**
+   * Test the common use cases
+   */
+  @Test public void testCommon() {
+    MetricsVisitor visitor = mock(MetricsVisitor.class);
+    MetricsRegistry registry = new MetricsRegistry("test");
+    List<AbstractMetric> metrics = MetricsLists.builder("test")
+        .addCounter(info("c1", "int counter"), 1)
+        .addCounter(info("c2", "long counter"), 2L)
+        .addGauge(info("g1", "int gauge"), 5)
+        .addGauge(info("g2", "long gauge"), 6L)
+        .addGauge(info("g3", "float gauge"), 7f)
+        .addGauge(info("g4", "double gauge"), 8d)
+        .metrics();
+
+    for (AbstractMetric metric : metrics) {
+      metric.visit(visitor);
+    }
+
+    verify(visitor).counter(c1.capture(), eq(1));
+    assertEquals("c1 name", "c1", c1.getValue().name());
+    assertEquals("c1 description", "int counter", c1.getValue().description());
+    verify(visitor).counter(c2.capture(), eq(2L));
+    assertEquals("c2 name", "c2", c2.getValue().name());
+    assertEquals("c2 description", "long counter", c2.getValue().description());
+    verify(visitor).gauge(g1.capture(), eq(5));
+    assertEquals("g1 name", "g1", g1.getValue().name());
+    assertEquals("g1 description", "int gauge", g1.getValue().description());
+    verify(visitor).gauge(g2.capture(), eq(6L));
+    assertEquals("g2 name", "g2", g2.getValue().name());
+    assertEquals("g2 description", "long gauge", g2.getValue().description());
+    verify(visitor).gauge(g3.capture(), eq(7f));
+    assertEquals("g3 name", "g3", g3.getValue().name());
+    assertEquals("g3 description", "float gauge", g3.getValue().description());
+    verify(visitor).gauge(g4.capture(), eq(8d));
+    assertEquals("g4 name", "g4", g4.getValue().name());
+    assertEquals("g4 description", "double gauge", g4.getValue().description());
+  }
+
+}

Added: hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestSinkQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestSinkQueue.java?rev=1079141&view=auto
==============================================================================
--- hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestSinkQueue.java (added)
+++ hadoop/common/branches/yahoo-merge/src/test/core/org/apache/hadoop/metrics2/impl/TestSinkQueue.java Tue Mar  8 04:38:59 2011
@@ -0,0 +1,268 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+import java.util.ConcurrentModificationException;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import static org.apache.hadoop.metrics2.impl.SinkQueue.*;
+
+/**
+ * Test the half-blocking metrics sink queue
+ */
+public class TestSinkQueue {
+
+  private final Log LOG = LogFactory.getLog(TestSinkQueue.class);
+
+  /**
+   * Test common use case
+   * @throws Exception
+   */
+  @Test public void testCommon() throws Exception {
+    final SinkQueue<Integer> q = new SinkQueue<Integer>(2);
+    q.enqueue(1);
+    assertEquals("queue front", 1, (int) q.front());
+    assertEquals("queue back", 1, (int) q.back());
+    assertEquals("element", 1, (int) q.dequeue());
+
+    assertTrue("should enqueue", q.enqueue(2));
+    q.consume(new Consumer<Integer>() {
+      public void consume(Integer e) {
+        assertEquals("element", 2, (int) e);
+      }
+    });
+    assertTrue("should enqueue", q.enqueue(3));
+    assertEquals("element", 3, (int) q.dequeue());
+    assertEquals("queue size", 0, q.size());
+    assertEquals("queue front", null, q.front());
+    assertEquals("queue back", null, q.back());
+  }
+
+  /**
+   * Test blocking when queue is empty
+   * @throws Exception
+   */
+  @Test public void testEmptyBlocking() throws Exception {
+    final SinkQueue<Integer> q = new SinkQueue<Integer>(2);
+    final Runnable trigger = mock(Runnable.class);
+    // try consuming emtpy equeue and blocking
+    Thread t = new Thread() {
+      @Override public void run() {
+        try {
+          assertEquals("element", 1, (int) q.dequeue());
+          q.consume(new Consumer<Integer>() {
+            public void consume(Integer e) {
+              assertEquals("element", 2, (int) e);
+              trigger.run();
+            }
+          });
+        }
+        catch (InterruptedException e) {
+          LOG.warn("Interrupted", e);
+        }
+      }
+    };
+    t.start();
+    Thread.yield(); // Let the other block
+    q.enqueue(1);
+    q.enqueue(2);
+    t.join();
+    verify(trigger).run();
+  }
+
+  /**
+   * Test nonblocking enqueue when queue is full
+   * @throws Exception
+   */
+  @Test public void testFull() throws Exception {
+    final SinkQueue<Integer> q = new SinkQueue<Integer>(1);
+    q.enqueue(1);
+
+    assertTrue("should drop", !q.enqueue(2));
+    assertEquals("element", 1, (int) q.dequeue());
+
+    q.enqueue(3);
+    q.consume(new Consumer<Integer>() {
+      public void consume(Integer e) {
+        assertEquals("element", 3, (int) e);
+      }
+    });
+    assertEquals("queue size", 0, q.size());
+  }
+
+  /**
+   * Test the consumeAll method
+   * @throws Exception
+   */
+  @Test public void testConsumeAll() throws Exception {
+    final int capacity = 64;  // arbitrary
+    final SinkQueue<Integer> q = new SinkQueue<Integer>(capacity);
+
+    for (int i = 0; i < capacity; ++i) {
+      assertTrue("should enqueue", q.enqueue(i));
+    }
+    assertTrue("should not enqueue", !q.enqueue(capacity));
+
+    final Runnable trigger = mock(Runnable.class);
+    q.consumeAll(new Consumer<Integer>() {
+      private int expected = 0;
+      public void consume(Integer e) {
+        assertEquals("element", expected++, (int) e);
+        trigger.run();
+      }
+    });
+
+    verify(trigger, times(capacity)).run();
+  }
+
+  /**
+   * Test the consumer throwing exceptions
+   * @throws Exception
+   */
+  @Test public void testConsumerException() throws Exception {
+    final SinkQueue<Integer> q = new SinkQueue<Integer>(1);
+    final RuntimeException ex = new RuntimeException("expected");
+    q.enqueue(1);
+
+    try {
+      q.consume(new Consumer<Integer>() {
+        public void consume(Integer e) {
+          throw ex;
+        }
+      });
+    }
+    catch (Exception expected) {
+      assertSame("consumer exception", ex, expected);
+    }
+    // The queue should be in consistent state after exception
+    assertEquals("queue size", 1, q.size());
+    assertEquals("element", 1, (int) q.dequeue());
+  }
+
+  /**
+   * Test the clear method
+   */
+  @Test public void testClear() {
+    final SinkQueue<Integer> q = new SinkQueue<Integer>(128);
+    for (int i = 0; i < q.capacity() + 97; ++i) {
+      q.enqueue(i);
+    }
+    assertEquals("queue size", q.capacity(), q.size());
+    q.clear();
+    assertEquals("queue size", 0, q.size());
+  }
+
+  /**
+   * Test consumers that take their time.
+   * @throws Exception
+   */
+  @Test public void testHangingConsumer() throws Exception {
+    SinkQueue<Integer> q = newSleepingConsumerQueue(2, 1, 2);
+    assertEquals("queue back", 2, (int) q.back());
+    assertTrue("should drop", !q.enqueue(3)); // should not block
+    assertEquals("queue size", 2, q.size());
+    assertEquals("queue head", 1, (int) q.front());
+    assertEquals("queue back", 2, (int) q.back());
+  }
+
+  /**
+   * Test concurrent consumer access, which is illegal
+   * @throws Exception
+   */
+  @Test public void testConcurrentConsumers() throws Exception {
+    final SinkQueue<Integer> q = newSleepingConsumerQueue(2, 1);
+    assertTrue("should enqueue", q.enqueue(2));
+    assertEquals("queue back", 2, (int) q.back());
+    assertTrue("should drop", !q.enqueue(3)); // should not block
+    shouldThrowCME(new Fun() {
+      public void run() {
+        q.clear();
+      }
+    });
+    shouldThrowCME(new Fun() {
+      public void run() throws Exception {
+        q.consume(null);
+      }
+    });
+    shouldThrowCME(new Fun() {
+      public void run() throws Exception {
+        q.consumeAll(null);
+      }
+    });
+    shouldThrowCME(new Fun() {
+      public void run() throws Exception {
+        q.dequeue();
+      }
+    });
+    // The queue should still be in consistent state after all the exceptions
+    assertEquals("queue size", 2, q.size());
+    assertEquals("queue front", 1, (int) q.front());
+    assertEquals("queue back", 2, (int) q.back());
+  }
+
+  private void shouldThrowCME(Fun callback) throws Exception {
+    try {
+      callback.run();
+    }
+    catch (ConcurrentModificationException e) {
+      LOG.info(e);
+      return;
+    }
+    fail("should've thrown");
+  }
+
+  private SinkQueue<Integer> newSleepingConsumerQueue(int capacity,
+                                                      int... values) {
+    final SinkQueue<Integer> q = new SinkQueue<Integer>(capacity);
+    for (int i : values) {
+      q.enqueue(i);
+    }
+    Thread t = new Thread() {
+      @Override public void run() {
+        try {
+          q.consume(new Consumer<Integer>() {
+            public void consume(Integer e) throws InterruptedException {
+              LOG.info("sleeping");
+              Thread.sleep(1000 * 86400); // a long time
+            }
+          });
+        }
+        catch (InterruptedException ex) {
+          LOG.warn("Interrupted", ex);
+        }
+      }
+    };
+    t.setName("Sleeping consumer");
+    t.setDaemon(true);  // so jvm can exit
+    t.start();
+    Thread.yield(); // Let the consumer consume
+    LOG.debug("Returning new sleeping consumer queue");
+    return q;
+  }
+
+  static interface Fun {
+    void run() throws Exception;
+  }
+
+}