You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2017/09/15 16:39:27 UTC

[02/20] hadoop git commit: HADOOP-14553. Add (parallelized) integration tests to hadoop-azure Contributed by Steve Loughran

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestConstants.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestConstants.java
new file mode 100644
index 0000000..0b72f06
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestConstants.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.integration;
+
+import org.apache.hadoop.fs.Path;
+
+/**
+ * Constants for the Azure tests.
+ */
+public interface AzureTestConstants {
+
+  /**
+   * Prefix for any cross-filesystem scale test options.
+   */
+  String SCALE_TEST = "scale.test.";
+
+  /**
+   * Prefix for wasb-specific scale tests.
+   */
+  String AZURE_SCALE_TEST = "fs.azure.scale.test.";
+
+  /**
+   * Prefix for FS wasb tests.
+   */
+  String TEST_FS_WASB = "test.fs.azure.";
+
+  /**
+   * Name of the test filesystem.
+   */
+  String TEST_FS_WASB_NAME = TEST_FS_WASB + "name";
+
+  /**
+   * Tell tests that they are being executed in parallel: {@value}.
+   */
+  String KEY_PARALLEL_TEST_EXECUTION = "test.parallel.execution";
+
+  /**
+   * A property set to true in maven if scale tests are enabled: {@value}.
+   */
+  String KEY_SCALE_TESTS_ENABLED = AZURE_SCALE_TEST + "enabled";
+
+  /**
+   * The number of operations to perform: {@value}.
+   */
+  String KEY_OPERATION_COUNT = SCALE_TEST + "operation.count";
+
+  /**
+   * The number of directory operations to perform: {@value}.
+   */
+  String KEY_DIRECTORY_COUNT = SCALE_TEST + "directory.count";
+
+  /**
+   * The readahead buffer: {@value}.
+   */
+  String KEY_READ_BUFFER_SIZE = AZURE_SCALE_TEST + "read.buffer.size";
+
+  int DEFAULT_READ_BUFFER_SIZE = 16384;
+
+  /**
+   * Key for a multi MB test file: {@value}.
+   */
+  String KEY_CSVTEST_FILE = AZURE_SCALE_TEST + "csvfile";
+
+  /**
+   * Default path for the multi MB test file: {@value}.
+   */
+  String DEFAULT_CSVTEST_FILE = "wasb://datasets@azuremlsampleexperiments.blob.core.windows.net/network_intrusion_detection.csv";
+
+  /**
+   * Name of the property to define the timeout for scale tests: {@value}.
+   * Measured in seconds.
+   */
+  String KEY_TEST_TIMEOUT = AZURE_SCALE_TEST + "timeout";
+
+  /**
+   * Name of the property to define the file size for the huge file
+   * tests: {@value}.
+   * Measured in KB; a suffix like "M", or "G" will change the unit.
+   */
+  String KEY_HUGE_FILESIZE = AZURE_SCALE_TEST + "huge.filesize";
+
+  /**
+   * Name of the property to define the partition size for the huge file
+   * tests: {@value}.
+   * Measured in KB; a suffix like "M", or "G" will change the unit.
+   */
+  String KEY_HUGE_PARTITION_SIZE = AZURE_SCALE_TEST + "huge.partitionsize";
+
+  /**
+   * The default huge size is small —full 5GB+ scale tests are something
+   * to run in long test runs on EC2 VMs. {@value}.
+   */
+  String DEFAULT_HUGE_FILESIZE = "10M";
+
+  /**
+   * The default number of operations to perform: {@value}.
+   */
+  long DEFAULT_OPERATION_COUNT = 2005;
+
+  /**
+   * Default number of directories to create when performing
+   * directory performance/scale tests.
+   */
+  int DEFAULT_DIRECTORY_COUNT = 2;
+
+  /**
+   * Default policy on scale tests: {@value}.
+   */
+  boolean DEFAULT_SCALE_TESTS_ENABLED = false;
+
+  /**
+   * Fork ID passed down from maven if the test is running in parallel.
+   */
+  String TEST_UNIQUE_FORK_ID = "test.unique.fork.id";
+
+  /**
+   * Timeout in Milliseconds for standard tests: {@value}.
+   */
+  int AZURE_TEST_TIMEOUT = 10 * 60 * 1000;
+
+  /**
+   * Timeout in Seconds for Scale Tests: {@value}.
+   */
+  int SCALE_TEST_TIMEOUT_SECONDS = 30 * 60;
+
+  int SCALE_TEST_TIMEOUT_MILLIS = SCALE_TEST_TIMEOUT_SECONDS * 1000;
+
+
+
+  String ACCOUNT_KEY_PROPERTY_NAME
+      = "fs.azure.account.key.";
+  String SAS_PROPERTY_NAME = "fs.azure.sas.";
+  String TEST_CONFIGURATION_FILE_NAME = "azure-test.xml";
+  String TEST_ACCOUNT_NAME_PROPERTY_NAME
+      = "fs.azure.test.account.name";
+  String MOCK_ACCOUNT_NAME
+      = "mockAccount.blob.core.windows.net";
+  String MOCK_CONTAINER_NAME = "mockContainer";
+  String WASB_AUTHORITY_DELIMITER = "@";
+  String WASB_SCHEME = "wasb";
+  String PATH_DELIMITER = "/";
+  String AZURE_ROOT_CONTAINER = "$root";
+  String MOCK_WASB_URI = "wasb://" + MOCK_CONTAINER_NAME
+      + WASB_AUTHORITY_DELIMITER + MOCK_ACCOUNT_NAME + "/";
+  String USE_EMULATOR_PROPERTY_NAME
+      = "fs.azure.test.emulator";
+
+  String KEY_DISABLE_THROTTLING
+      = "fs.azure.disable.bandwidth.throttling";
+  String KEY_READ_TOLERATE_CONCURRENT_APPEND
+      = "fs.azure.io.read.tolerate.concurrent.append";
+  /**
+   * Path for page blobs: {@value}.
+   */
+  String DEFAULT_PAGE_BLOB_DIRECTORY = "pageBlobs";
+
+  String DEFAULT_ATOMIC_RENAME_DIRECTORIES
+      = "/atomicRenameDir1,/atomicRenameDir2";
+
+  /**
+   * Base directory for page blobs.
+   */
+  Path PAGE_BLOB_DIR = new Path("/" + DEFAULT_PAGE_BLOB_DIRECTORY);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java
new file mode 100644
index 0000000..2fbbcd1
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java
@@ -0,0 +1,479 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.integration;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.List;
+
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.internal.AssumptionViolatedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
+import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
+
+import static org.apache.hadoop.fs.azure.integration.AzureTestConstants.*;
+import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getLongGauge;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+
+/**
+ * Utilities for the Azure tests. Based on {@code S3ATestUtils}, so
+ * (initially) has unused method.
+ */
+public final class AzureTestUtils extends Assert {
+  private static final Logger LOG = LoggerFactory.getLogger(
+      AzureTestUtils.class);
+
+  /**
+   * Value to set a system property to (in maven) to declare that
+   * a property has been unset.
+   */
+  public static final String UNSET_PROPERTY = "unset";
+
+  /**
+   * Create the test filesystem.
+   *
+   * If the test.fs.wasb.name property is not set, this will
+   * raise a JUnit assumption exception
+   *
+   * @param conf configuration
+   * @return the FS
+   * @throws IOException IO Problems
+   * @throws AssumptionViolatedException if the FS is not named
+   */
+  public static NativeAzureFileSystem createTestFileSystem(Configuration conf)
+      throws IOException {
+
+    String fsname = conf.getTrimmed(TEST_FS_WASB_NAME, "");
+
+    boolean liveTest = !StringUtils.isEmpty(fsname);
+    URI testURI = null;
+    if (liveTest) {
+      testURI = URI.create(fsname);
+      liveTest = testURI.getScheme().equals(WASB_SCHEME);
+    }
+    if (!liveTest) {
+      // Skip the test
+      throw new AssumptionViolatedException(
+          "No test filesystem in " + TEST_FS_WASB_NAME);
+    }
+    NativeAzureFileSystem fs1 = new NativeAzureFileSystem();
+    fs1.initialize(testURI, conf);
+    return fs1;
+  }
+
+  /**
+   * Create a file context for tests.
+   *
+   * If the test.fs.wasb.name property is not set, this will
+   * trigger a JUnit failure.
+   *
+   * Multipart purging is enabled.
+   * @param conf configuration
+   * @return the FS
+   * @throws IOException IO Problems
+   * @throws AssumptionViolatedException if the FS is not named
+   */
+  public static FileContext createTestFileContext(Configuration conf)
+      throws IOException {
+    String fsname = conf.getTrimmed(TEST_FS_WASB_NAME, "");
+
+    boolean liveTest = !StringUtils.isEmpty(fsname);
+    URI testURI = null;
+    if (liveTest) {
+      testURI = URI.create(fsname);
+      liveTest = testURI.getScheme().equals(WASB_SCHEME);
+    }
+    if (!liveTest) {
+      // This doesn't work with our JUnit 3 style test cases, so instead we'll
+      // make this whole class not run by default
+      throw new AssumptionViolatedException("No test filesystem in "
+          + TEST_FS_WASB_NAME);
+    }
+    FileContext fc = FileContext.getFileContext(testURI, conf);
+    return fc;
+  }
+
+  /**
+   * Get a long test property.
+   * <ol>
+   *   <li>Look up configuration value (which can pick up core-default.xml),
+   *       using {@code defVal} as the default value (if conf != null).
+   *   </li>
+   *   <li>Fetch the system property.</li>
+   *   <li>If the system property is not empty or "(unset)":
+   *   it overrides the conf value.
+   *   </li>
+   * </ol>
+   * This puts the build properties in charge of everything. It's not a
+   * perfect design; having maven set properties based on a file, as ant let
+   * you do, is better for customization.
+   *
+   * As to why there's a special (unset) value, see
+   * {@link http://stackoverflow.com/questions/7773134/null-versus-empty-arguments-in-maven}
+   * @param conf config: may be null
+   * @param key key to look up
+   * @param defVal default value
+   * @return the evaluated test property.
+   */
+  public static long getTestPropertyLong(Configuration conf,
+      String key, long defVal) {
+    return Long.valueOf(
+        getTestProperty(conf, key, Long.toString(defVal)));
+  }
+  /**
+   * Get a test property value in bytes, using k, m, g, t, p, e suffixes.
+   * {@link org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix#string2long(String)}
+   * <ol>
+   *   <li>Look up configuration value (which can pick up core-default.xml),
+   *       using {@code defVal} as the default value (if conf != null).
+   *   </li>
+   *   <li>Fetch the system property.</li>
+   *   <li>If the system property is not empty or "(unset)":
+   *   it overrides the conf value.
+   *   </li>
+   * </ol>
+   * This puts the build properties in charge of everything. It's not a
+   * perfect design; having maven set properties based on a file, as ant let
+   * you do, is better for customization.
+   *
+   * As to why there's a special (unset) value, see
+   * {@link http://stackoverflow.com/questions/7773134/null-versus-empty-arguments-in-maven}
+   * @param conf config: may be null
+   * @param key key to look up
+   * @param defVal default value
+   * @return the evaluated test property.
+   */
+  public static long getTestPropertyBytes(Configuration conf,
+      String key, String defVal) {
+    return org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix
+        .string2long(getTestProperty(conf, key, defVal));
+  }
+
+  /**
+   * Get an integer test property; algorithm described in
+   * {@link #getTestPropertyLong(Configuration, String, long)}.
+   * @param key key to look up
+   * @param defVal default value
+   * @return the evaluated test property.
+   */
+  public static int getTestPropertyInt(Configuration conf,
+      String key, int defVal) {
+    return (int) getTestPropertyLong(conf, key, defVal);
+  }
+
+  /**
+   * Get a boolean test property; algorithm described in
+   * {@link #getTestPropertyLong(Configuration, String, long)}.
+   * @param key key to look up
+   * @param defVal default value
+   * @return the evaluated test property.
+   */
+  public static boolean getTestPropertyBool(Configuration conf,
+      String key,
+      boolean defVal) {
+    return Boolean.valueOf(
+        getTestProperty(conf, key, Boolean.toString(defVal)));
+  }
+
+  /**
+   * Get a string test property.
+   * <ol>
+   *   <li>Look up configuration value (which can pick up core-default.xml),
+   *       using {@code defVal} as the default value (if conf != null).
+   *   </li>
+   *   <li>Fetch the system property.</li>
+   *   <li>If the system property is not empty or "(unset)":
+   *   it overrides the conf value.
+   *   </li>
+   * </ol>
+   * This puts the build properties in charge of everything. It's not a
+   * perfect design; having maven set properties based on a file, as ant let
+   * you do, is better for customization.
+   *
+   * As to why there's a special (unset) value, see
+   * @see <a href="http://stackoverflow.com/questions/7773134/null-versus-empty-arguments-in-maven">
+   *   Stack Overflow</a>
+   * @param conf config: may be null
+   * @param key key to look up
+   * @param defVal default value
+   * @return the evaluated test property.
+   */
+
+  public static String getTestProperty(Configuration conf,
+      String key,
+      String defVal) {
+    String confVal = conf != null
+        ? conf.getTrimmed(key, defVal)
+        : defVal;
+    String propval = System.getProperty(key);
+    return StringUtils.isNotEmpty(propval) && !UNSET_PROPERTY.equals(propval)
+        ? propval : confVal;
+  }
+
+  /**
+   * Verify the class of an exception. If it is not as expected, rethrow it.
+   * Comparison is on the exact class, not subclass-of inference as
+   * offered by {@code instanceof}.
+   * @param clazz the expected exception class
+   * @param ex the exception caught
+   * @return the exception, if it is of the expected class
+   * @throws Exception the exception passed in.
+   */
+  public static Exception verifyExceptionClass(Class clazz,
+      Exception ex)
+      throws Exception {
+    if (!(ex.getClass().equals(clazz))) {
+      throw ex;
+    }
+    return ex;
+  }
+
+  /**
+   * Turn off FS Caching: use if a filesystem with different options from
+   * the default is required.
+   * @param conf configuration to patch
+   */
+  public static void disableFilesystemCaching(Configuration conf) {
+    conf.setBoolean("fs.wasb.impl.disable.cache", true);
+  }
+
+  /**
+   * Create a test path, using the value of
+   * {@link AzureTestUtils#TEST_UNIQUE_FORK_ID} if it is set.
+   * @param defVal default value
+   * @return a path
+   */
+  public static Path createTestPath(Path defVal) {
+    String testUniqueForkId = System.getProperty(
+        AzureTestConstants.TEST_UNIQUE_FORK_ID);
+    return testUniqueForkId == null
+        ? defVal
+        : new Path("/" + testUniqueForkId, "test");
+  }
+
+  /**
+   * Create a test page blob path using the value of
+   * {@link AzureTestConstants#TEST_UNIQUE_FORK_ID} if it is set.
+   * @param filename filename at the end of the path
+   * @return an absolute path
+   */
+  public static Path blobPathForTests(FileSystem fs, String filename) {
+    String testUniqueForkId = System.getProperty(
+        AzureTestConstants.TEST_UNIQUE_FORK_ID);
+    return fs.makeQualified(new Path(PAGE_BLOB_DIR,
+        testUniqueForkId == null
+            ? filename
+            : (testUniqueForkId + "/" + filename)));
+  }
+
+  /**
+   * Create a test path using the value of
+   * {@link AzureTestConstants#TEST_UNIQUE_FORK_ID} if it is set.
+   * @param filename filename at the end of the path
+   * @return an absolute path
+   */
+  public static Path pathForTests(FileSystem fs, String filename) {
+    String testUniqueForkId = System.getProperty(
+        AzureTestConstants.TEST_UNIQUE_FORK_ID);
+    return fs.makeQualified(new Path(
+        testUniqueForkId == null
+            ? ("/test/" + filename)
+            : (testUniqueForkId + "/" + filename)));
+  }
+
+  /**
+   * Get a unique fork ID.
+   * Returns a default value for non-parallel tests.
+   * @return a string unique for all test VMs running in this maven build.
+   */
+  public static String getForkID() {
+    return System.getProperty(
+        AzureTestConstants.TEST_UNIQUE_FORK_ID, "fork-1");
+  }
+
+  /**
+   * Flag to indicate that this test is being executed in parallel.
+   * This is used by some of the scale tests to validate test time expectations.
+   * @return true if the build indicates this test is being run in parallel.
+   */
+  public static boolean isParallelExecution() {
+    return Boolean.getBoolean(KEY_PARALLEL_TEST_EXECUTION);
+  }
+
+  /**
+   * Asserts that {@code obj} is an instance of {@code expectedClass} using a
+   * descriptive assertion message.
+   * @param expectedClass class
+   * @param obj object to check
+   */
+  public static void assertInstanceOf(Class<?> expectedClass, Object obj) {
+    Assert.assertTrue(String.format("Expected instance of class %s, but is %s.",
+        expectedClass, obj.getClass()),
+        expectedClass.isAssignableFrom(obj.getClass()));
+  }
+
+  /**
+   * Builds a comma-separated list of class names.
+   * @param classes list of classes
+   * @return comma-separated list of class names
+   */
+  public static <T extends Class<?>> String buildClassListString(
+      List<T> classes) {
+    StringBuilder sb = new StringBuilder();
+    for (int i = 0; i < classes.size(); ++i) {
+      if (i > 0) {
+        sb.append(',');
+      }
+      sb.append(classes.get(i).getName());
+    }
+    return sb.toString();
+  }
+
+  /**
+   * This class should not be instantiated.
+   */
+  private AzureTestUtils() {
+  }
+
+  /**
+   * Assert that a configuration option matches the expected value.
+   * @param conf configuration
+   * @param key option key
+   * @param expected expected value
+   */
+  public static void assertOptionEquals(Configuration conf,
+      String key,
+      String expected) {
+    assertEquals("Value of " + key, expected, conf.get(key));
+  }
+
+  /**
+   * Assume that a condition is met. If not: log at WARN and
+   * then throw an {@link AssumptionViolatedException}.
+   * @param message message in an assumption
+   * @param condition condition to probe
+   */
+  public static void assume(String message, boolean condition) {
+    if (!condition) {
+      LOG.warn(message);
+    }
+    Assume.assumeTrue(message, condition);
+  }
+
+  /**
+   * Gets the current value of the given gauge.
+   * @param fs filesystem
+   * @param gaugeName gauge name
+   * @return the gauge value
+   */
+  public static long getLongGaugeValue(NativeAzureFileSystem fs,
+      String gaugeName) {
+    return getLongGauge(gaugeName, getMetrics(fs.getInstrumentation()));
+  }
+
+  /**
+   * Gets the current value of the given counter.
+   * @param fs filesystem
+   * @param counterName counter name
+   * @return the counter value
+   */
+  public static long getLongCounterValue(NativeAzureFileSystem fs,
+      String counterName) {
+    return getLongCounter(counterName, getMetrics(fs.getInstrumentation()));
+  }
+
+
+  /**
+   * Delete a path, catching any exception and downgrading to a log message.
+   * @param fs filesystem
+   * @param path path to delete
+   * @param recursive recursive delete?
+   * @throws IOException IO failure.
+   */
+  public static void deleteQuietly(FileSystem fs,
+      Path path,
+      boolean recursive) throws IOException {
+    if (fs != null && path != null) {
+      try {
+        fs.delete(path, recursive);
+      } catch (IOException e) {
+        LOG.warn("When deleting {}", path, e);
+      }
+    }
+  }
+
+
+  /**
+   * Clean up the test account if non-null; return null to put in the
+   * field.
+   * @param testAccount test account to clean up
+   * @return null
+   * @throws Execption cleanup problems
+   */
+  public static AzureBlobStorageTestAccount cleanup(
+      AzureBlobStorageTestAccount testAccount) throws Exception {
+    if (testAccount != null) {
+      testAccount.cleanup();
+      testAccount = null;
+    }
+    return null;
+  }
+
+
+  /**
+   * Clean up the test account; any thrown exceptions are caught and
+   * logged.
+   * @param testAccount test account
+   * @return null, so that any fields can be reset.
+   */
+  public static AzureBlobStorageTestAccount cleanupTestAccount(
+      AzureBlobStorageTestAccount testAccount) {
+    if (testAccount != null) {
+      try {
+        testAccount.cleanup();
+      } catch (Exception e) {
+        LOG.error("While cleaning up test account: ", e);
+      }
+    }
+    return null;
+  }
+
+  /**
+   * Assume that the scale tests are enabled by the relevant system property.
+   */
+  public static void assumeScaleTestsEnabled(Configuration conf) {
+    boolean enabled = getTestPropertyBool(
+        conf,
+        KEY_SCALE_TESTS_ENABLED,
+        DEFAULT_SCALE_TESTS_ENABLED);
+    assume("Scale test disabled: to enable set property "
+            + KEY_SCALE_TESTS_ENABLED,
+        enabled);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/CleanupTestContainers.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/CleanupTestContainers.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/CleanupTestContainers.java
new file mode 100644
index 0000000..059a8c4
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/CleanupTestContainers.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.integration;
+
+import java.util.EnumSet;
+
+import com.microsoft.azure.storage.CloudStorageAccount;
+import com.microsoft.azure.storage.blob.CloudBlobClient;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import org.junit.Test;
+
+import org.apache.hadoop.fs.azure.AbstractWasbTestBase;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
+
+/**
+ * This looks like a test, but it is really a command to invoke to
+ * clean up containers created in other test runs.
+ *
+ */
+public class CleanupTestContainers extends AbstractWasbTestBase {
+
+  private static final String CONTAINER_PREFIX = "wasbtests-";
+
+  @Override
+  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+    return AzureBlobStorageTestAccount.create(
+        "CleanupTestContainers",
+        EnumSet.noneOf(AzureBlobStorageTestAccount.CreateOptions.class),
+        createConfiguration(),
+        true);
+  }
+
+  @Test
+  public void testEnumContainers() throws Throwable {
+    describe("Enumerating all the WASB test containers");
+
+    int count = 0;
+    CloudStorageAccount storageAccount = getTestAccount().getRealAccount();
+    CloudBlobClient blobClient = storageAccount.createCloudBlobClient();
+    Iterable<CloudBlobContainer> containers
+        = blobClient.listContainers(CONTAINER_PREFIX);
+    for (CloudBlobContainer container : containers) {
+      count++;
+      LOG.info("Container {} URI {}",
+          container.getName(),
+          container.getUri());
+    }
+    LOG.info("Found {} test containers", count);
+  }
+
+  @Test
+  public void testDeleteContainers() throws Throwable {
+    describe("Delete all the WASB test containers");
+    int count = 0;
+    CloudStorageAccount storageAccount = getTestAccount().getRealAccount();
+    CloudBlobClient blobClient = storageAccount.createCloudBlobClient();
+    Iterable<CloudBlobContainer> containers
+        = blobClient.listContainers(CONTAINER_PREFIX);
+    for (CloudBlobContainer container : containers) {
+      LOG.info("Container {} URI {}",
+          container.getName(),
+          container.getUri());
+      if (container.deleteIfExists()) {
+        count++;
+      }
+    }
+    LOG.info("Deleted {} test containers", count);
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/ITestAzureHugeFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/ITestAzureHugeFiles.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/ITestAzureHugeFiles.java
new file mode 100644
index 0000000..850aca1
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/ITestAzureHugeFiles.java
@@ -0,0 +1,456 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.integration;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Iterator;
+
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.FixMethodOrder;
+import org.junit.Test;
+import org.junit.runners.MethodSorters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageStatistics;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
+import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.io.IOUtils;
+
+import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.*;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
+
+
+/**
+ * Scale test which creates a huge file.
+ *
+ * <b>Important:</b> the order in which these tests execute is fixed to
+ * alphabetical order. Test cases are numbered {@code test_123_} to impose
+ * an ordering based on the numbers.
+ *
+ * Having this ordering allows the tests to assume that the huge file
+ * exists. Even so: they should all have a {@link #assumeHugeFileExists()}
+ * check at the start, in case an individual test is executed.
+ *
+ * <b>Ignore checkstyle complaints about naming: we need a scheme with visible
+ * ordering.</b>
+ */
+
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+public class ITestAzureHugeFiles extends AbstractAzureScaleTest {
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+      ITestAzureHugeFiles.class);
+
+  private Path scaleTestDir;
+  private Path hugefile;
+  private Path hugefileRenamed;
+  private AzureBlobStorageTestAccount testAccountForCleanup;
+
+  private static final int UPLOAD_BLOCKSIZE = 64 * S_1K;
+  private static final byte[] SOURCE_DATA;
+
+  static {
+    SOURCE_DATA = dataset(UPLOAD_BLOCKSIZE, 0, S_256);
+  }
+
+  private Path testPath;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    testPath = path("ITestAzureHugeFiles");
+    scaleTestDir = new Path(testPath, "scale");
+    hugefile = new Path(scaleTestDir, "hugefile");
+    hugefileRenamed = new Path(scaleTestDir, "hugefileRenamed");
+  }
+
+  /**
+   * Only clean up the test account (and delete the container) if the account
+   * is set in the field {@code testAccountForCleanup}.
+   * @throws Exception
+   */
+  @Override
+  public void tearDown() throws Exception {
+    testAccount = null;
+    super.tearDown();
+    if (testAccountForCleanup != null) {
+      cleanupTestAccount(testAccount);
+    }
+  }
+
+  @Override
+  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+    return AzureBlobStorageTestAccount.create(
+        "testazurehugefiles",
+        EnumSet.of(AzureBlobStorageTestAccount.CreateOptions.CreateContainer),
+        createConfiguration(),
+        true);
+  }
+
+  /**
+   * Stop the test-case teardown from deleting the test path.
+   * @throws IOException never
+   */
+  protected void deleteTestDirInTeardown() throws IOException {
+    // this is a no-op, so the test file is preserved.
+    // the last test in the suite does the teardown
+  }
+
+  protected void deleteHugeFile() throws IOException {
+    describe("Deleting %s", hugefile);
+    ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+    getFileSystem().delete(hugefile, false);
+    timer.end("time to delete %s", hugefile);
+  }
+
+  /**
+   * Log how long an IOP took, by dividing the total time by the
+   * count of operations, printing in a human-readable form.
+   * @param operation operation being measured
+   * @param timer timing data
+   * @param count IOP count.
+   */
+  protected void logTimePerIOP(String operation,
+      ContractTestUtils.NanoTimer timer,
+      long count) {
+    LOG.info("Time per {}: {} nS",
+        operation, toHuman(timer.duration() / count));
+  }
+
+  /**
+   * Assume that the huge file exists, skip if not/empty.
+   * @return the file status
+   * @throws IOException IO failure
+   */
+  FileStatus assumeHugeFileExists() throws IOException {
+    assertPathExists(getFileSystem(), "huge file not created", hugefile);
+    try {
+      FileStatus status = getFileSystem().getFileStatus(hugefile);
+      Assume.assumeTrue("Not a file: " + status, status.isFile());
+      Assume.assumeTrue("File " + hugefile + " is empty", status.getLen() > 0);
+      return status;
+    } catch (FileNotFoundException e) {
+      skip("huge file not created: " + hugefile);
+    }
+    return null;
+  }
+
+  /**
+   * If/when {@link NativeAzureFileSystem#getStorageStatistics()} returns
+   * statistics, this will be interesting.
+   */
+  private void logFSState() {
+    StorageStatistics statistics = getFileSystem().getStorageStatistics();
+    Iterator<StorageStatistics.LongStatistic> longStatistics
+        = statistics.getLongStatistics();
+    while (longStatistics.hasNext()) {
+      StorageStatistics.LongStatistic next = longStatistics.next();
+      LOG.info("{} = {}", next.getName(), next.getValue());
+    }
+  }
+
+  @Test
+  public void test_010_CreateHugeFile() throws IOException {
+    long filesize = getTestPropertyBytes(getConfiguration(),
+        KEY_HUGE_FILESIZE,
+        DEFAULT_HUGE_FILESIZE);
+    long filesizeMB = filesize / S_1M;
+
+    // clean up from any previous attempts
+    deleteHugeFile();
+
+    describe("Creating file %s of size %d MB", hugefile, filesizeMB);
+
+    // now do a check of available upload time, with a pessimistic bandwidth
+    // (that of remote upload tests). If the test times out then not only is
+    // the test outcome lost, as the follow-on tests continue, they will
+    // overlap with the ongoing upload test, for much confusion.
+/*
+    int timeout = getTestTimeoutSeconds();
+    // assume 1 MB/s upload bandwidth
+    int bandwidth = _1MB;
+    long uploadTime = filesize / bandwidth;
+    assertTrue(String.format("Timeout set in %s seconds is too low;" +
+            " estimating upload time of %d seconds at 1 MB/s." +
+            " Rerun tests with -D%s=%d",
+        timeout, uploadTime, KEY_TEST_TIMEOUT, uploadTime * 2),
+        uploadTime < timeout);
+*/
+    assertEquals("File size set in " + KEY_HUGE_FILESIZE + " = " + filesize
+            + " is not a multiple of " + UPLOAD_BLOCKSIZE,
+        0, filesize % UPLOAD_BLOCKSIZE);
+
+    byte[] data = SOURCE_DATA;
+
+    long blocks = filesize / UPLOAD_BLOCKSIZE;
+    long blocksPerMB = S_1M / UPLOAD_BLOCKSIZE;
+
+    // perform the upload.
+    // there's lots of logging here, so that a tail -f on the output log
+    // can give a view of what is happening.
+    NativeAzureFileSystem fs = getFileSystem();
+
+    ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+    long blocksPer10MB = blocksPerMB * 10;
+    fs.mkdirs(hugefile.getParent());
+    try (FSDataOutputStream out = fs.create(hugefile,
+        true,
+        UPLOAD_BLOCKSIZE,
+        null)) {
+      for (long block = 1; block <= blocks; block++) {
+        out.write(data);
+        long written = block * UPLOAD_BLOCKSIZE;
+        // every 10 MB and on file upload @ 100%, print some stats
+        if (block % blocksPer10MB == 0 || written == filesize) {
+          long percentage = written * 100 / filesize;
+          double elapsedTime = timer.elapsedTime() / NANOSEC;
+          double writtenMB = 1.0 * written / S_1M;
+          LOG.info(String.format("[%02d%%] Buffered %.2f MB out of %d MB;"
+                  + " elapsedTime=%.2fs; write to buffer bandwidth=%.2f MB/s",
+              percentage,
+              writtenMB,
+              filesizeMB,
+              elapsedTime,
+              writtenMB / elapsedTime));
+        }
+      }
+      // now close the file
+      LOG.info("Closing stream {}", out);
+      ContractTestUtils.NanoTimer closeTimer
+          = new ContractTestUtils.NanoTimer();
+      out.close();
+      closeTimer.end("time to close() output stream");
+    }
+
+    timer.end("time to write %d MB in blocks of %d",
+        filesizeMB, UPLOAD_BLOCKSIZE);
+    logFSState();
+    bandwidth(timer, filesize);
+    ContractTestUtils.assertPathExists(fs, "Huge file", hugefile);
+    FileStatus status = fs.getFileStatus(hugefile);
+    ContractTestUtils.assertIsFile(hugefile, status);
+    assertEquals("File size in " + status, filesize, status.getLen());
+  }
+
+  @Test
+  public void test_040_PositionedReadHugeFile() throws Throwable {
+    assumeHugeFileExists();
+    describe("Positioned reads of file %s", hugefile);
+    NativeAzureFileSystem fs = getFileSystem();
+    FileStatus status = fs.getFileStatus(hugefile);
+    long filesize = status.getLen();
+    int ops = 0;
+    final int bufferSize = 8192;
+    byte[] buffer = new byte[bufferSize];
+    long eof = filesize - 1;
+
+    ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+    ContractTestUtils.NanoTimer readAtByte0, readAtByte0Again, readAtEOF;
+    try (FSDataInputStream in = openDataFile()) {
+      readAtByte0 = new ContractTestUtils.NanoTimer();
+      in.readFully(0, buffer);
+      readAtByte0.end("time to read data at start of file");
+      ops++;
+
+      readAtEOF = new ContractTestUtils.NanoTimer();
+      in.readFully(eof - bufferSize, buffer);
+      readAtEOF.end("time to read data at end of file");
+      ops++;
+
+      readAtByte0Again = new ContractTestUtils.NanoTimer();
+      in.readFully(0, buffer);
+      readAtByte0Again.end("time to read data at start of file again");
+      ops++;
+      LOG.info("Final stream state: {}", in);
+    }
+    long mb = Math.max(filesize / S_1M, 1);
+
+    logFSState();
+    timer.end("time to performed positioned reads of %d MB ", mb);
+    LOG.info("Time per positioned read = {} nS",
+        toHuman(timer.nanosPerOperation(ops)));
+  }
+
+  protected FSDataInputStream openDataFile() throws IOException {
+    NanoTimer openTimer = new NanoTimer();
+    FSDataInputStream inputStream = getFileSystem().open(hugefile,
+        UPLOAD_BLOCKSIZE);
+    openTimer.end("open data file");
+    return inputStream;
+  }
+
+
+  /**
+   * Work out the bandwidth in bytes/second.
+   * @param timer timer measuring the duration
+   * @param bytes bytes
+   * @return the number of bytes/second of the recorded operation
+   */
+  public static double bandwidthInBytes(NanoTimer timer, long bytes) {
+    return bytes * NANOSEC / timer.duration();
+  }
+
+  @Test
+  public void test_050_readHugeFile() throws Throwable {
+    assumeHugeFileExists();
+    describe("Reading %s", hugefile);
+    NativeAzureFileSystem fs = getFileSystem();
+    FileStatus status = fs.getFileStatus(hugefile);
+    long filesize = status.getLen();
+    long blocks = filesize / UPLOAD_BLOCKSIZE;
+    byte[] data = new byte[UPLOAD_BLOCKSIZE];
+
+    ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+    try (FSDataInputStream in = openDataFile()) {
+      for (long block = 0; block < blocks; block++) {
+        in.readFully(data);
+      }
+      LOG.info("Final stream state: {}", in);
+    }
+
+    long mb = Math.max(filesize / S_1M, 1);
+    timer.end("time to read file of %d MB ", mb);
+    LOG.info("Time per MB to read = {} nS",
+        toHuman(timer.nanosPerOperation(mb)));
+    bandwidth(timer, filesize);
+    logFSState();
+  }
+
+  @Test
+  public void test_060_openAndReadWholeFileBlocks() throws Throwable {
+    FileStatus status = assumeHugeFileExists();
+    int blockSize = S_1M;
+    describe("Open the test file and read it in blocks of size %d",
+        blockSize);
+    long len =  status.getLen();
+    FSDataInputStream in = openDataFile();
+    NanoTimer timer2 = null;
+    long blockCount = 0;
+    long totalToRead = 0;
+    int resetCount = 0;
+    try {
+      byte[] block = new byte[blockSize];
+      timer2 = new NanoTimer();
+      long count = 0;
+      // implicitly rounding down here
+      blockCount = len / blockSize;
+      totalToRead = blockCount * blockSize;
+      long minimumBandwidth = S_128K;
+      int maxResetCount = 4;
+      resetCount = 0;
+      for (long i = 0; i < blockCount; i++) {
+        int offset = 0;
+        int remaining = blockSize;
+        long blockId = i + 1;
+        NanoTimer blockTimer = new NanoTimer();
+        int reads = 0;
+        while (remaining > 0) {
+          NanoTimer readTimer = new NanoTimer();
+          int bytesRead = in.read(block, offset, remaining);
+          reads++;
+          if (bytesRead == 1) {
+            break;
+          }
+          remaining -= bytesRead;
+          offset += bytesRead;
+          count += bytesRead;
+          readTimer.end();
+          if (bytesRead != 0) {
+            LOG.debug("Bytes in read #{}: {} , block bytes: {},"
+                    + " remaining in block: {}"
+                    + " duration={} nS; ns/byte: {}, bandwidth={} MB/s",
+                reads, bytesRead, blockSize - remaining, remaining,
+                readTimer.duration(),
+                readTimer.nanosPerOperation(bytesRead),
+                readTimer.bandwidthDescription(bytesRead));
+          } else {
+            LOG.warn("0 bytes returned by read() operation #{}", reads);
+          }
+        }
+        blockTimer.end("Reading block %d in %d reads", blockId, reads);
+        String bw = blockTimer.bandwidthDescription(blockSize);
+        LOG.info("Bandwidth of block {}: {} MB/s: ", blockId, bw);
+        if (bandwidthInBytes(blockTimer, blockSize) < minimumBandwidth) {
+          LOG.warn("Bandwidth {} too low on block {}: resetting connection",
+              bw, blockId);
+          Assert.assertTrue("Bandwidth of " + bw + " too low after "
+              + resetCount + " attempts", resetCount <= maxResetCount);
+          resetCount++;
+          // reset the connection
+        }
+      }
+    } finally {
+      IOUtils.closeStream(in);
+    }
+    timer2.end("Time to read %d bytes in %d blocks", totalToRead, blockCount);
+    LOG.info("Overall Bandwidth {} MB/s; reset connections {}",
+        timer2.bandwidth(totalToRead), resetCount);
+  }
+
+  @Test
+  public void test_100_renameHugeFile() throws Throwable {
+    assumeHugeFileExists();
+    describe("renaming %s to %s", hugefile, hugefileRenamed);
+    NativeAzureFileSystem fs = getFileSystem();
+    FileStatus status = fs.getFileStatus(hugefile);
+    long filesize = status.getLen();
+    fs.delete(hugefileRenamed, false);
+    ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+    fs.rename(hugefile, hugefileRenamed);
+    long mb = Math.max(filesize / S_1M, 1);
+    timer.end("time to rename file of %d MB", mb);
+    LOG.info("Time per MB to rename = {} nS",
+        toHuman(timer.nanosPerOperation(mb)));
+    bandwidth(timer, filesize);
+    logFSState();
+    FileStatus destFileStatus = fs.getFileStatus(hugefileRenamed);
+    assertEquals(filesize, destFileStatus.getLen());
+
+    // rename back
+    ContractTestUtils.NanoTimer timer2 = new ContractTestUtils.NanoTimer();
+    fs.rename(hugefileRenamed, hugefile);
+    timer2.end("Renaming back");
+    LOG.info("Time per MB to rename = {} nS",
+        toHuman(timer2.nanosPerOperation(mb)));
+    bandwidth(timer2, filesize);
+  }
+
+  @Test
+  public void test_999_deleteHugeFiles() throws IOException {
+    // mark the test account for cleanup after this test
+    testAccountForCleanup = testAccount;
+    deleteHugeFile();
+    ContractTestUtils.NanoTimer timer2 = new ContractTestUtils.NanoTimer();
+    NativeAzureFileSystem fs = getFileSystem();
+    fs.delete(hugefileRenamed, false);
+    timer2.end("time to delete %s", hugefileRenamed);
+    rm(fs, testPath, true, false);
+    assertPathDoesNotExist(fs, "deleted huge file", testPath);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/Sizes.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/Sizes.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/Sizes.java
new file mode 100644
index 0000000..92b10cf
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/Sizes.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.integration;
+
+/**
+ * Sizes of data.
+ * Checkstyle doesn't like the naming scheme or the fact its an interface.
+ */
+public interface Sizes {
+
+  int S_256 = 256;
+  int S_512 = 512;
+  int S_1K = 1024;
+  int S_4K = 4 * S_1K;
+  int S_8K = 8 * S_1K;
+  int S_16K = 16 * S_1K;
+  int S_32K = 32 * S_1K;
+  int S_64K = 64 * S_1K;
+  int S_128K = 128 * S_1K;
+  int S_256K = 256 * S_1K;
+  int S_1M = S_1K * S_1K;
+  int S_2M = 2 * S_1M;
+  int S_5M = 5 * S_1M;
+  int S_10M = 10* S_1M;
+  double NANOSEC = 1.0e9;
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2d97fa/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/ITestAzureFileSystemInstrumentation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/ITestAzureFileSystemInstrumentation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/ITestAzureFileSystemInstrumentation.java
new file mode 100644
index 0000000..60e24ee
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/ITestAzureFileSystemInstrumentation.java
@@ -0,0 +1,586 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.metrics;
+
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_CLIENT_ERRORS;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DIRECTORIES_CREATED;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DOWNLOAD_LATENCY;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_DOWNLOAD_RATE;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_FILES_CREATED;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_FILES_DELETED;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_SERVER_ERRORS;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_UPLOAD_LATENCY;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_UPLOAD_RATE;
+import static org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation.WASB_WEB_RESPONSES;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.verify;
+
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Arrays;
+import java.util.Date;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.AbstractWasbTestBase;
+import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
+import org.apache.hadoop.fs.azure.AzureException;
+import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore;
+import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.hamcrest.BaseMatcher;
+import org.hamcrest.Description;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Instrumentation test, changing state of time and verifying metrics are
+ * consistent.
+ */
+public class ITestAzureFileSystemInstrumentation extends AbstractWasbTestBase {
+
+  protected static final Logger LOG =
+      LoggerFactory.getLogger(ITestAzureFileSystemInstrumentation.class);
+
+  @Override
+  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+    return AzureBlobStorageTestAccount.create();
+  }
+
+  @Test
+  public void testMetricTags() throws Exception {
+    String accountName =
+        getTestAccount().getRealAccount().getBlobEndpoint()
+        .getAuthority();
+    String containerName =
+        getTestAccount().getRealContainer().getName();
+    MetricsRecordBuilder myMetrics = getMyMetrics();
+    verify(myMetrics).add(argThat(
+        new TagMatcher("accountName", accountName)
+        ));
+    verify(myMetrics).add(argThat(
+        new TagMatcher("containerName", containerName)
+        ));
+    verify(myMetrics).add(argThat(
+        new TagMatcher("Context", "azureFileSystem")
+        ));
+    verify(myMetrics).add(argThat(
+        new TagExistsMatcher("wasbFileSystemId")
+        ));
+  }
+  
+
+  @Test
+  public void testMetricsOnMkdirList() throws Exception {
+    long base = getBaseWebResponses();
+
+    // Create a directory
+    assertTrue(fs.mkdirs(new Path("a")));
+    // At the time of writing
+    // getAncestor uses 2 calls for each folder level /user/<name>/a
+    // plus 1 call made by checkContainer
+    // mkdir checks the hierarchy with 2 calls per level
+    // mkdirs calls storeEmptyDir to create the empty folder, which makes 5 calls
+    // For a total of 7 + 6 + 5 = 18 web responses
+    base = assertWebResponsesInRange(base, 1, 18);
+    assertEquals(1,
+        AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_DIRECTORIES_CREATED));
+
+    // List the root contents
+    assertEquals(1, getFileSystem().listStatus(new Path("/")).length);
+    base = assertWebResponsesEquals(base, 1);
+
+    assertNoErrors();
+  }
+
+  private BandwidthGaugeUpdater getBandwidthGaugeUpdater() {
+    NativeAzureFileSystem azureFs = (NativeAzureFileSystem) getFileSystem();
+    AzureNativeFileSystemStore azureStore = azureFs.getStore();
+    return azureStore.getBandwidthGaugeUpdater();
+  }
+
+  private static byte[] nonZeroByteArray(int size) {
+    byte[] data = new byte[size];
+    Arrays.fill(data, (byte)5);
+    return data;
+  }
+
+  @Test
+  public void testMetricsOnFileCreateRead() throws Exception {
+    long base = getBaseWebResponses();
+
+    assertEquals(0, AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
+
+    Path filePath = new Path("/metricsTest_webResponses");
+    final int FILE_SIZE = 1000;
+
+    // Suppress auto-update of bandwidth metrics so we get
+    // to update them exactly when we want to.
+    getBandwidthGaugeUpdater().suppressAutoUpdate();
+
+    // Create a file
+    Date start = new Date();
+    OutputStream outputStream = getFileSystem().create(filePath);
+    outputStream.write(nonZeroByteArray(FILE_SIZE));
+    outputStream.close();
+    long uploadDurationMs = new Date().getTime() - start.getTime();
+
+    // The exact number of requests/responses that happen to create a file
+    // can vary  - at the time of writing this code it takes 10
+    // requests/responses for the 1000 byte file (33 for 100 MB),
+    // plus the initial container-check request but that
+    // can very easily change in the future. Just assert that we do roughly
+    // more than 2 but less than 15.
+    logOpResponseCount("Creating a 1K file", base);
+    base = assertWebResponsesInRange(base, 2, 15);
+    getBandwidthGaugeUpdater().triggerUpdate(true);
+    long bytesWritten = AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation());
+    assertTrue("The bytes written in the last second " + bytesWritten +
+        " is pretty far from the expected range of around " + FILE_SIZE +
+        " bytes plus a little overhead.",
+        bytesWritten > (FILE_SIZE / 2) && bytesWritten < (FILE_SIZE * 2));
+    long totalBytesWritten = AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
+    assertTrue("The total bytes written  " + totalBytesWritten +
+        " is pretty far from the expected range of around " + FILE_SIZE +
+        " bytes plus a little overhead.",
+        totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
+    long uploadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_UPLOAD_RATE);
+    LOG.info("Upload rate: " + uploadRate + " bytes/second.");
+    long expectedRate = (FILE_SIZE * 1000L) / uploadDurationMs;
+    assertTrue("The upload rate " + uploadRate +
+        " is below the expected range of around " + expectedRate +
+        " bytes/second that the unit test observed. This should never be" +
+        " the case since the test underestimates the rate by looking at " +
+        " end-to-end time instead of just block upload time.",
+        uploadRate >= expectedRate);
+    long uploadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
+        WASB_UPLOAD_LATENCY);
+    LOG.info("Upload latency: {}", uploadLatency);
+    long expectedLatency = uploadDurationMs; // We're uploading less than a block.
+    assertTrue("The upload latency " + uploadLatency +
+        " should be greater than zero now that I've just uploaded a file.",
+        uploadLatency > 0);
+    assertTrue("The upload latency " + uploadLatency +
+        " is more than the expected range of around " + expectedLatency +
+        " milliseconds that the unit test observed. This should never be" +
+        " the case since the test overestimates the latency by looking at " +
+        " end-to-end time instead of just block upload time.",
+        uploadLatency <= expectedLatency);
+
+    // Read the file
+    start = new Date();
+    InputStream inputStream = getFileSystem().open(filePath);
+    int count = 0;
+    while (inputStream.read() >= 0) {
+      count++;
+    }
+    inputStream.close();
+    long downloadDurationMs = new Date().getTime() - start.getTime();
+    assertEquals(FILE_SIZE, count);
+
+    // Again, exact number varies. At the time of writing this code
+    // it takes 4 request/responses, so just assert a rough range between
+    // 1 and 10.
+    logOpResponseCount("Reading a 1K file", base);
+    base = assertWebResponsesInRange(base, 1, 10);
+    getBandwidthGaugeUpdater().triggerUpdate(false);
+    long totalBytesRead = AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
+    assertEquals(FILE_SIZE, totalBytesRead);
+    long bytesRead = AzureMetricsTestUtil.getCurrentBytesRead(getInstrumentation());
+    assertTrue("The bytes read in the last second " + bytesRead +
+        " is pretty far from the expected range of around " + FILE_SIZE +
+        " bytes plus a little overhead.",
+        bytesRead > (FILE_SIZE / 2) && bytesRead < (FILE_SIZE * 2));
+    long downloadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_DOWNLOAD_RATE);
+    LOG.info("Download rate: " + downloadRate + " bytes/second.");
+    expectedRate = (FILE_SIZE * 1000L) / downloadDurationMs;
+    assertTrue("The download rate " + downloadRate +
+        " is below the expected range of around " + expectedRate +
+        " bytes/second that the unit test observed. This should never be" +
+        " the case since the test underestimates the rate by looking at " +
+        " end-to-end time instead of just block download time.",
+        downloadRate >= expectedRate);
+    long downloadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
+        WASB_DOWNLOAD_LATENCY);
+    LOG.info("Download latency: " + downloadLatency);
+    expectedLatency = downloadDurationMs; // We're downloading less than a block.
+    assertTrue("The download latency " + downloadLatency +
+        " should be greater than zero now that I've just downloaded a file.",
+        downloadLatency > 0);
+    assertTrue("The download latency " + downloadLatency +
+        " is more than the expected range of around " + expectedLatency +
+        " milliseconds that the unit test observed. This should never be" +
+        " the case since the test overestimates the latency by looking at " +
+        " end-to-end time instead of just block download time.",
+        downloadLatency <= expectedLatency);
+
+    assertNoErrors();
+  }
+
+  @Test
+  public void testMetricsOnBigFileCreateRead() throws Exception {
+    long base = getBaseWebResponses();
+
+    assertEquals(0, AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
+
+    Path filePath = new Path("/metricsTest_webResponses");
+    final int FILE_SIZE = 100 * 1024 * 1024;
+
+    // Suppress auto-update of bandwidth metrics so we get
+    // to update them exactly when we want to.
+    getBandwidthGaugeUpdater().suppressAutoUpdate();
+
+    // Create a file
+    OutputStream outputStream = getFileSystem().create(filePath);
+    outputStream.write(new byte[FILE_SIZE]);
+    outputStream.close();
+
+    // The exact number of requests/responses that happen to create a file
+    // can vary  - at the time of writing this code it takes 34
+    // requests/responses for the 100 MB file,
+    // plus the initial container check request, but that
+    // can very easily change in the future. Just assert that we do roughly
+    // more than 20 but less than 50.
+    logOpResponseCount("Creating a 100 MB file", base);
+    base = assertWebResponsesInRange(base, 20, 50);
+    getBandwidthGaugeUpdater().triggerUpdate(true);
+    long totalBytesWritten = AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
+    assertTrue("The total bytes written  " + totalBytesWritten +
+        " is pretty far from the expected range of around " + FILE_SIZE +
+        " bytes plus a little overhead.",
+        totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
+    long uploadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_UPLOAD_RATE);
+    LOG.info("Upload rate: " + uploadRate + " bytes/second.");
+    long uploadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
+        WASB_UPLOAD_LATENCY);
+    LOG.info("Upload latency: " + uploadLatency);
+    assertTrue("The upload latency " + uploadLatency +
+        " should be greater than zero now that I've just uploaded a file.",
+        uploadLatency > 0);
+
+    // Read the file
+    InputStream inputStream = getFileSystem().open(filePath);
+    int count = 0;
+    while (inputStream.read() >= 0) {
+      count++;
+    }
+    inputStream.close();
+    assertEquals(FILE_SIZE, count);
+
+    // Again, exact number varies. At the time of writing this code
+    // it takes 27 request/responses, so just assert a rough range between
+    // 20 and 40.
+    logOpResponseCount("Reading a 100 MB file", base);
+    base = assertWebResponsesInRange(base, 20, 40);
+    getBandwidthGaugeUpdater().triggerUpdate(false);
+    long totalBytesRead = AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
+    assertEquals(FILE_SIZE, totalBytesRead);
+    long downloadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_DOWNLOAD_RATE);
+    LOG.info("Download rate: " + downloadRate + " bytes/second.");
+    long downloadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
+        WASB_DOWNLOAD_LATENCY);
+    LOG.info("Download latency: " + downloadLatency);
+    assertTrue("The download latency " + downloadLatency +
+        " should be greater than zero now that I've just downloaded a file.",
+        downloadLatency > 0);
+  }
+
+  @Test
+  public void testMetricsOnFileRename() throws Exception {
+    long base = getBaseWebResponses();
+
+    Path originalPath = new Path("/metricsTest_RenameStart");
+    Path destinationPath = new Path("/metricsTest_RenameFinal");
+
+    // Create an empty file
+    assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_CREATED));
+    assertTrue(getFileSystem().createNewFile(originalPath));
+    logOpResponseCount("Creating an empty file", base);
+    base = assertWebResponsesInRange(base, 2, 20);
+    assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_CREATED));
+
+    // Rename the file
+    assertTrue(
+        ((FileSystem) getFileSystem()).rename(originalPath, destinationPath));
+    // Varies: at the time of writing this code it takes 7 requests/responses.
+    logOpResponseCount("Renaming a file", base);
+    base = assertWebResponsesInRange(base, 2, 15);
+
+    assertNoErrors();
+  }
+
+  @Test
+  public void testMetricsOnFileExistsDelete() throws Exception {
+    long base = getBaseWebResponses();
+
+    Path filePath = new Path("/metricsTest_delete");
+
+    // Check existence
+    assertFalse(getFileSystem().exists(filePath));
+    // At the time of writing this code it takes 2 requests/responses to
+    // check existence, which seems excessive, plus initial request for
+    // container check.
+    logOpResponseCount("Checking file existence for non-existent file", base);
+    base = assertWebResponsesInRange(base, 1, 3);
+
+    // Create an empty file
+    assertTrue(getFileSystem().createNewFile(filePath));
+    base = getCurrentWebResponses();
+
+    // Check existence again
+    assertTrue(getFileSystem().exists(filePath));
+    logOpResponseCount("Checking file existence for existent file", base);
+    base = assertWebResponsesInRange(base, 1, 2);
+
+    // Delete the file
+    assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_DELETED));
+    assertTrue(getFileSystem().delete(filePath, false));
+    // At the time of writing this code it takes 4 requests/responses to
+    // delete, which seems excessive. Check for range 1-4 for now.
+    logOpResponseCount("Deleting a file", base);
+    base = assertWebResponsesInRange(base, 1, 4);
+    assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_DELETED));
+
+    assertNoErrors();
+  }
+
+  @Test
+  public void testMetricsOnDirRename() throws Exception {
+    long base = getBaseWebResponses();
+
+    Path originalDirName = new Path("/metricsTestDirectory_RenameStart");
+    Path innerFileName = new Path(originalDirName, "innerFile");
+    Path destDirName = new Path("/metricsTestDirectory_RenameFinal");
+
+    // Create an empty directory
+    assertTrue(getFileSystem().mkdirs(originalDirName));
+    base = getCurrentWebResponses();
+
+    // Create an inner file
+    assertTrue(getFileSystem().createNewFile(innerFileName));
+    base = getCurrentWebResponses();
+
+    // Rename the directory
+    assertTrue(getFileSystem().rename(originalDirName, destDirName));
+
+    // At the time of writing this code it takes 11 requests/responses
+    // to rename the directory with one file. Check for range 1-20 for now.
+    logOpResponseCount("Renaming a directory", base);
+    base = assertWebResponsesInRange(base, 1, 20);
+
+    assertNoErrors();
+  }
+
+  /**
+   * Recursive discovery of path depth
+   * @param path path to measure.
+   * @return depth, where "/" == 0.
+   */
+  int depth(Path path) {
+    if (path.isRoot()) {
+      return 0;
+    } else {
+      return 1 + depth(path.getParent());
+    }
+  }
+
+  @Test
+  public void testClientErrorMetrics() throws Exception {
+    String fileName = "metricsTestFile_ClientError";
+    Path filePath = new Path("/"+fileName);
+    final int FILE_SIZE = 100;
+    OutputStream outputStream = null;
+    String leaseID = null;
+    try {
+      // Create a file
+      outputStream = getFileSystem().create(filePath);
+      leaseID = getTestAccount().acquireShortLease(fileName);
+      try {
+        outputStream.write(new byte[FILE_SIZE]);
+        outputStream.close();
+        assertTrue("Should've thrown", false);
+      } catch (AzureException ex) {
+        assertTrue("Unexpected exception: " + ex,
+          ex.getMessage().contains("lease"));
+      }
+      assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_CLIENT_ERRORS));
+      assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_SERVER_ERRORS));
+    } finally {
+      if(leaseID != null){
+        getTestAccount().releaseLease(leaseID, fileName);
+      }
+      IOUtils.closeStream(outputStream);
+    }
+  }
+
+  private void logOpResponseCount(String opName, long base) {
+    LOG.info("{}  took {} web responses to complete.",
+        opName, getCurrentWebResponses() - base);
+  }
+
+  /**
+   * Gets (and asserts) the value of the wasb_web_responses counter just
+   * after the creation of the file system object.
+   */
+  private long getBaseWebResponses() {
+    // The number of requests should start at 0
+    return assertWebResponsesEquals(0, 0);
+  }
+
+  /**
+   * Gets the current value of the wasb_web_responses counter.
+   */
+  private long getCurrentWebResponses() {
+      return AzureMetricsTestUtil.getCurrentWebResponses(getInstrumentation());
+  }
+
+  /**
+   * Checks that the wasb_web_responses counter is at the given value.
+   * @param base The base value (before the operation of interest).
+   * @param expected The expected value for the operation of interest.
+   * @return The new base value now.
+   */
+  private long assertWebResponsesEquals(long base, long expected) {
+    assertCounter(WASB_WEB_RESPONSES, base + expected, getMyMetrics());
+    return base + expected;
+  }
+
+  private void assertNoErrors() {
+    assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_CLIENT_ERRORS));
+    assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_SERVER_ERRORS));
+  }
+
+  /**
+   * Checks that the wasb_web_responses counter is in the given range.
+   * @param base The base value (before the operation of interest).
+   * @param inclusiveLowerLimit The lower limit for what it should increase by.
+   * @param inclusiveUpperLimit The upper limit for what it should increase by.
+   * @return The new base value now.
+   */
+  private long assertWebResponsesInRange(long base,
+      long inclusiveLowerLimit,
+      long inclusiveUpperLimit) {
+    long currentResponses = getCurrentWebResponses();
+    long justOperation = currentResponses - base;
+    assertTrue(String.format(
+        "Web responses expected in range [%d, %d], but was %d.",
+        inclusiveLowerLimit, inclusiveUpperLimit, justOperation),
+        justOperation >= inclusiveLowerLimit &&
+        justOperation <= inclusiveUpperLimit);
+    return currentResponses;
+  }  
+
+  /**
+   * Gets the metrics for the file system object.
+   * @return The metrics record.
+   */
+  private MetricsRecordBuilder getMyMetrics() {
+    return getMetrics(getInstrumentation());
+  }
+
+  private AzureFileSystemInstrumentation getInstrumentation() {
+    return getFileSystem().getInstrumentation();
+  }
+
+  /**
+   * A matcher class for asserting that we got a tag with a given
+   * value.
+   */
+  private static class TagMatcher extends TagExistsMatcher {
+    private final String tagValue;
+
+    public TagMatcher(String tagName, String tagValue) {
+      super(tagName);
+      this.tagValue = tagValue;
+    }
+
+    @Override
+    public boolean matches(MetricsTag toMatch) {
+      return toMatch.value().equals(tagValue);
+    }
+
+    @Override
+    public void describeTo(Description desc) {
+      super.describeTo(desc);
+      desc.appendText(" with value " + tagValue);
+    }
+  }
+
+  /**
+   * A matcher class for asserting that we got a tag with any value.
+   */
+  private static class TagExistsMatcher extends BaseMatcher<MetricsTag> {
+    private final String tagName;
+
+    public TagExistsMatcher(String tagName) {
+      this.tagName = tagName;
+    }
+
+    @Override
+    public boolean matches(Object toMatch) {
+      MetricsTag asTag = (MetricsTag)toMatch;
+      return asTag.name().equals(tagName) && matches(asTag);
+    }
+
+    protected boolean matches(MetricsTag toMatch) {
+      return true;
+    }
+
+    @Override
+    public void describeTo(Description desc) {
+      desc.appendText("Has tag " + tagName);
+    }
+  }
+
+  /**
+   * A matcher class for asserting that a long value is in a
+   * given range.
+   */
+  private static class InRange extends BaseMatcher<Long> {
+    private final long inclusiveLowerLimit;
+    private final long inclusiveUpperLimit;
+    private long obtained;
+
+    public InRange(long inclusiveLowerLimit, long inclusiveUpperLimit) {
+      this.inclusiveLowerLimit = inclusiveLowerLimit;
+      this.inclusiveUpperLimit = inclusiveUpperLimit;
+    }
+
+    @Override
+    public boolean matches(Object number) {
+      obtained = (Long)number;
+      return obtained >= inclusiveLowerLimit &&
+          obtained <= inclusiveUpperLimit;
+    }
+
+    @Override
+    public void describeTo(Description description) {
+      description.appendText("Between " + inclusiveLowerLimit +
+          " and " + inclusiveUpperLimit + " inclusively");
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org