You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by wa...@apache.org on 2014/07/07 22:44:04 UTC

svn commit: r1608603 - in /hadoop/common/branches/fs-encryption: ./ hadoop-project/ hadoop-project/src/site/ hadoop-tools/hadoop-azure/ hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ hadoop-tools/hadoop-azure/src/main/java/org/apac...

Author: wang
Date: Mon Jul  7 20:43:56 2014
New Revision: 1608603

URL: http://svn.apache.org/r1608603
Log:
Merge trunk to branch.

Added:
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/
      - copied from r1608600, hadoop/common/trunk/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/
      - copied from r1608600, hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/test/resources/hadoop-metrics2-azure-file-system.properties
      - copied unchanged from r1608600, hadoop/common/trunk/hadoop-tools/hadoop-azure/src/test/resources/hadoop-metrics2-azure-file-system.properties
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/contract/
      - copied from r1608600, hadoop/common/trunk/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/contract/
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/resources/contract/
      - copied from r1608600, hadoop/common/trunk/hadoop-tools/hadoop-openstack/src/test/resources/contract/
Removed:
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/exceptions/SwiftNotDirectoryException.java
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/exceptions/SwiftPathExistsException.java
Modified:
    hadoop/common/branches/fs-encryption/   (props changed)
    hadoop/common/branches/fs-encryption/.gitignore
    hadoop/common/branches/fs-encryption/hadoop-project/pom.xml
    hadoop/common/branches/fs-encryption/hadoop-project/src/site/site.xml
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/.gitignore
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/README.txt
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/pom.xml
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/StrictBufferedFSInputStream.java
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeInputStream.java
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeOutputStream.java
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemBasicOps.java
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemContract.java
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemRename.java
    hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/hdfs2/TestV2LsOperations.java

Propchange: hadoop/common/branches/fs-encryption/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/branch-2.5:r1607590,1607618
  Merged /hadoop/common/branches/branch-2:r1606534,1607440
  Merged /hadoop/common/trunk:r1603979-1608600

Modified: hadoop/common/branches/fs-encryption/.gitignore
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/.gitignore?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/.gitignore (original)
+++ hadoop/common/branches/fs-encryption/.gitignore Mon Jul  7 20:43:56 2014
@@ -12,3 +12,5 @@ target
 hadoop-common-project/hadoop-kms/downloads/
 hadoop-hdfs-project/hadoop-hdfs/downloads
 hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads
+hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml
+hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml

Modified: hadoop/common/branches/fs-encryption/hadoop-project/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-project/pom.xml?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-project/pom.xml (original)
+++ hadoop/common/branches/fs-encryption/hadoop-project/pom.xml Mon Jul  7 20:43:56 2014
@@ -392,6 +392,11 @@
         <artifactId>jetty-util</artifactId>
         <version>6.1.26</version>
       </dependency>
+      <dependency>
+        <groupId>javax.servlet.jsp</groupId>
+        <artifactId>jsp-api</artifactId>
+        <version>2.1</version>
+      </dependency>
 
       <dependency>
         <groupId>org.glassfish</groupId>

Modified: hadoop/common/branches/fs-encryption/hadoop-project/src/site/site.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-project/src/site/site.xml?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-project/src/site/site.xml (original)
+++ hadoop/common/branches/fs-encryption/hadoop-project/src/site/site.xml Mon Jul  7 20:43:56 2014
@@ -51,8 +51,10 @@
       <item name="Single Node Setup" href="hadoop-project-dist/hadoop-common/SingleCluster.html"/>
       <item name="Cluster Setup" href="hadoop-project-dist/hadoop-common/ClusterSetup.html"/>
       <item name="Hadoop Commands Reference" href="hadoop-project-dist/hadoop-common/CommandsManual.html"/>
-      <item name="File System Shell" href="hadoop-project-dist/hadoop-common/FileSystemShell.html"/>
+      <item name="FileSystem Shell" href="hadoop-project-dist/hadoop-common/FileSystemShell.html"/>
       <item name="Hadoop Compatibility" href="hadoop-project-dist/hadoop-common/Compatibility.html"/>
+      <item name="FileSystem Specification"
+        href="hadoop-project-dist/hadoop-common/index.html"/>
     </menu>
 
     <menu name="Common" inherit="top">

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/.gitignore
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/.gitignore?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/.gitignore (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/.gitignore Mon Jul  7 20:43:56 2014
@@ -1 +1,2 @@
-.checkstyle
\ No newline at end of file
+.checkstyle
+bin/
\ No newline at end of file

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/README.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/README.txt?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/README.txt (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/README.txt Mon Jul  7 20:43:56 2014
@@ -12,9 +12,13 @@ Unit tests
 =============
 Most of the tests will run without additional configuration.
 For complete testing, configuration in src/test/resources is required:
-  src/test/resources/azure-test.xml
-  src/test/resources/log4j.properties
+  
+  src/test/resources/azure-test.xml -> Defines Azure storage dependencies, including account information 
 
+The other files in src/test/resources do not normally need alteration:
+  log4j.properties -> Test logging setup
+  hadoop-metrics2-azure-file-system.properties -> used to wire up instrumentation for testing
+  
 From command-line
 ------------------
 Basic execution:
@@ -59,6 +63,12 @@ Enable the Azure emulator tests by setti
   fs.azure.test.emulator -> true 
 in src\test\resources\azure-test.xml
 
+Known issues:
+  Symptom: When running tests for emulator, you see the following failure message
+           com.microsoft.windowsazure.storage.StorageException: The value for one of the HTTP headers is not in the correct format.
+  Issue:   The emulator can get into a confused state.  
+  Fix:     Restart the Azure Emulator.  Ensure it is v3.2 or later.
+ 
 Running tests against live Azure storage 
 -------------------------------------------------------------------------
 In order to run WASB unit tests against a live Azure Storage account, add credentials to 
@@ -101,4 +111,8 @@ Eclipse:
 NOTE:
 - After any change to the checkstyle rules xml, use window|preferences|checkstyle|{refresh}|OK
 
- 
\ No newline at end of file
+=============
+Javadoc
+============= 
+Command-line
+> mvn javadoc:javadoc
\ No newline at end of file

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/pom.xml?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/pom.xml (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/pom.xml Mon Jul  7 20:43:56 2014
@@ -37,22 +37,6 @@
   </properties>
 
   <build>
-  
-    <testResources>
-      <testResource>
-        <directory>src/test/resources</directory>
-        <includes>
-          <include>log4j.properties</include>
-        </includes>
-      </testResource>
-      <testResource>
-        <directory>src/test/resources</directory>
-        <includes>
-          <include>azure-test.xml</include>
-        </includes>
-      </testResource>
-    </testResources>
-  
     <plugins>
       <plugin>
         <groupId>org.codehaus.mojo</groupId>
@@ -198,5 +182,11 @@
       <type>test-jar</type>
     </dependency>
     
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
+    </dependency>
+    
   </dependencies>
 </project>

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java Mon Jul  7 20:43:56 2014
@@ -17,7 +17,6 @@
  */
 
 package org.apache.hadoop.fs.azure;
-
 import static org.apache.hadoop.fs.azure.NativeAzureFileSystem.PATH_DELIMITER;
 
 import java.io.BufferedInputStream;
@@ -46,6 +45,10 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobContainerWrapper;
 import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobDirectoryWrapper;
 import org.apache.hadoop.fs.azure.StorageInterface.CloudBlockBlobWrapper;
+import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
+import org.apache.hadoop.fs.azure.metrics.BandwidthGaugeUpdater;
+import org.apache.hadoop.fs.azure.metrics.ErrorMetricUpdater;
+import org.apache.hadoop.fs.azure.metrics.ResponseReceivedMetricUpdater;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.mortbay.util.ajax.JSON;
@@ -69,8 +72,15 @@ import com.microsoft.windowsazure.storag
 import com.microsoft.windowsazure.storage.blob.ListBlobItem;
 import com.microsoft.windowsazure.storage.core.Utility;
 
+
+/**
+ * Core implementation of Windows Azure Filesystem for Hadoop.
+ * Provides the bridging logic between Hadoop's abstract filesystem and Azure Storage 
+ *
+ */
 @InterfaceAudience.Private
-class AzureNativeFileSystemStore implements NativeFileSystemStore {
+@VisibleForTesting
+public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   
   /**
    * Configuration knob on whether we do block-level MD5 validation on
@@ -169,6 +179,8 @@ class AzureNativeFileSystemStore impleme
   private boolean isAnonymousCredentials = false;
   // Set to true if we are connecting using shared access signatures.
   private boolean connectingUsingSAS = false;
+  private AzureFileSystemInstrumentation instrumentation;
+  private BandwidthGaugeUpdater bandwidthGaugeUpdater;
   private static final JSON PERMISSION_JSON_SERIALIZER = createPermissionJsonSerializer();
 
   private boolean suppressRetryPolicy = false;
@@ -301,6 +313,11 @@ class AzureNativeFileSystemStore impleme
     this.storageInteractionLayer = storageInteractionLayer;
   }
 
+  @VisibleForTesting
+  public BandwidthGaugeUpdater getBandwidthGaugeUpdater() {
+    return bandwidthGaugeUpdater;
+  }
+  
   /**
    * Check if concurrent reads and writes on the same blob are allowed.
    * 
@@ -325,12 +342,18 @@ class AzureNativeFileSystemStore impleme
    *           if URI or job object is null, or invalid scheme.
    */
   @Override
-  public void initialize(URI uri, Configuration conf) throws AzureException {
+  public void initialize(URI uri, Configuration conf, AzureFileSystemInstrumentation instrumentation) throws AzureException {
 
     if (null == this.storageInteractionLayer) {
       this.storageInteractionLayer = new StorageInterfaceImpl();
     }
 
+    this.instrumentation = instrumentation;
+    this.bandwidthGaugeUpdater = new BandwidthGaugeUpdater(instrumentation);
+    if (null == this.storageInteractionLayer) {
+      this.storageInteractionLayer = new StorageInterfaceImpl();
+    }
+    
     // Check that URI exists.
     //
     if (null == uri) {
@@ -775,8 +798,10 @@ class AzureNativeFileSystemStore impleme
         throw new AzureException(errMsg);
       }
 
+      instrumentation.setAccountName(accountName);
       String containerName = getContainerFromAuthority(sessionUri);
-
+      instrumentation.setContainerName(containerName);
+      
       // Check whether this is a storage emulator account.
       if (isStorageEmulatorAccount(accountName)) {
         // It is an emulator account, connect to it with no credentials.
@@ -1522,6 +1547,11 @@ class AzureNativeFileSystemStore impleme
           selfThrottlingWriteFactor);
     }
 
+    ResponseReceivedMetricUpdater.hook(
+        operationContext,
+        instrumentation,
+        bandwidthGaugeUpdater);
+    
     // Bind operation context to receive send request callbacks on this
     // operation.
     // If reads concurrent to OOB writes are allowed, the interception will
@@ -1535,6 +1565,8 @@ class AzureNativeFileSystemStore impleme
       operationContext = testHookOperationContext
           .modifyOperationContext(operationContext);
     }
+    
+    ErrorMetricUpdater.hook(operationContext, instrumentation);
 
     // Return the operation context.
     return operationContext;
@@ -1723,7 +1755,7 @@ class AzureNativeFileSystemStore impleme
           inDataStream.close();
         }
         if(in != null){
-          inDataStream.close();
+          in.close();
         }
         throw e;
       }
@@ -2218,5 +2250,14 @@ class AzureNativeFileSystemStore impleme
 
   @Override
   public void close() {
+    bandwidthGaugeUpdater.close();
+  }
+  
+  // Finalizer to ensure complete shutdown
+  @Override
+  protected void finalize() throws Throwable {
+    LOG.debug("finalize() called");
+    close();
+    super.finalize();
   }
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java Mon Jul  7 20:43:56 2014
@@ -31,12 +31,13 @@ import java.util.Date;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.UUID;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BufferedFSInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -45,12 +46,14 @@ import org.apache.hadoop.fs.FSInputStrea
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
+import org.apache.hadoop.fs.azure.metrics.AzureFileSystemMetricsSystem;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
 
-
 import com.google.common.annotations.VisibleForTesting;
 import com.microsoft.windowsazure.storage.core.Utility;
 
@@ -193,7 +196,7 @@ public class NativeAzureFileSystem exten
     }
 
     @Override
-    public void close() throws IOException {
+    public synchronized void close() throws IOException {
       in.close();
     }
 
@@ -369,8 +372,12 @@ public class NativeAzureFileSystem exten
   private AzureNativeFileSystemStore actualStore;
   private Path workingDir;
   private long blockSize = MAX_AZURE_BLOCK_SIZE;
+  private AzureFileSystemInstrumentation instrumentation;
   private static boolean suppressRetryPolicy = false;
+  // A counter to create unique (within-process) names for my metrics sources.
+  private static AtomicInteger metricsSourceNameCounter = new AtomicInteger();
 
+  
   public NativeAzureFileSystem() {
     // set store in initialize()
   }
@@ -397,6 +404,20 @@ public class NativeAzureFileSystem exten
   }
 
   /**
+   * Creates a new metrics source name that's unique within this process.
+   */
+  @VisibleForTesting
+  public static String newMetricsSourceName() {
+    int number = metricsSourceNameCounter.incrementAndGet();
+    final String baseName = "AzureFileSystemMetrics";
+    if (number == 1) { // No need for a suffix for the first one
+      return baseName;
+    } else {
+      return baseName + number;
+    }
+  }
+  
+  /**
    * Checks if the given URI scheme is a scheme that's affiliated with the Azure
    * File System.
    * 
@@ -459,7 +480,16 @@ public class NativeAzureFileSystem exten
       store = createDefaultStore(conf);
     }
 
-    store.initialize(uri, conf);
+    // Make sure the metrics system is available before interacting with Azure
+    AzureFileSystemMetricsSystem.fileSystemStarted();
+    String sourceName = newMetricsSourceName(),
+        sourceDesc = "Azure Storage Volume File System metrics";
+    instrumentation = DefaultMetricsSystem.instance().register(sourceName,
+        sourceDesc, new AzureFileSystemInstrumentation(conf));
+    AzureFileSystemMetricsSystem.registerSource(sourceName, sourceDesc,
+        instrumentation);
+
+    store.initialize(uri, conf, instrumentation);
     setConf(conf);
     this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
     this.workingDir = new Path("/user", UserGroupInformation.getCurrentUser()
@@ -535,9 +565,19 @@ public class NativeAzureFileSystem exten
    * @return The store object.
    */
   @VisibleForTesting
-  AzureNativeFileSystemStore getStore() {
+  public AzureNativeFileSystemStore getStore() {
     return actualStore;
   }
+  
+  /**
+   * Gets the metrics source for this file system.
+   * This is mainly here for unit testing purposes.
+   *
+   * @return the metrics source.
+   */
+  public AzureFileSystemInstrumentation getInstrumentation() {
+    return instrumentation;
+  }
 
   /** This optional operation is not yet supported. */
   @Override
@@ -622,6 +662,10 @@ public class NativeAzureFileSystem exten
     // Construct the data output stream from the buffered output stream.
     FSDataOutputStream fsOut = new FSDataOutputStream(bufOutStream, statistics);
 
+    
+    // Increment the counter
+    instrumentation.fileCreated();
+    
     // Return data output stream to caller.
     return fsOut;
   }
@@ -682,6 +726,7 @@ public class NativeAzureFileSystem exten
           store.updateFolderLastModifiedTime(parentKey);
         }
       }
+      instrumentation.fileDeleted();
       store.delete(key);
     } else {
       // The path specifies a folder. Recursively delete all entries under the
@@ -724,6 +769,7 @@ public class NativeAzureFileSystem exten
             p.getKey().lastIndexOf(PATH_DELIMITER));
         if (!p.isDir()) {
           store.delete(key + suffix);
+          instrumentation.fileDeleted();
         } else {
           // Recursively delete contents of the sub-folders. Notice this also
           // deletes the blob for the directory.
@@ -740,6 +786,7 @@ public class NativeAzureFileSystem exten
         String parentKey = pathToKey(parent);
         store.updateFolderLastModifiedTime(parentKey);
       }
+      instrumentation.directoryDeleted();
     }
 
     // File or directory was successfully deleted.
@@ -972,6 +1019,8 @@ public class NativeAzureFileSystem exten
       store.updateFolderLastModifiedTime(key, lastModified);
     }
 
+    instrumentation.directoryCreated();
+    
     // otherwise throws exception
     return true;
   }
@@ -1293,6 +1342,19 @@ public class NativeAzureFileSystem exten
     super.close();
     // Close the store
     store.close();
+    
+    // Notify the metrics system that this file system is closed, which may
+    // trigger one final metrics push to get the accurate final file system
+    // metrics out.
+
+    long startTime = System.currentTimeMillis();
+
+    AzureFileSystemMetricsSystem.fileSystemClosed();
+
+    if (LOG.isDebugEnabled()) {
+        LOG.debug("Submitting metrics when file system closed took "
+                + (System.currentTimeMillis() - startTime) + " ms.");
+    }
   }
 
   /**

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java Mon Jul  7 20:43:56 2014
@@ -26,6 +26,7 @@ import java.util.Date;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -38,7 +39,7 @@ import com.google.common.annotations.Vis
 @InterfaceAudience.Private
 interface NativeFileSystemStore {
 
-  void initialize(URI uri, Configuration conf) throws IOException;
+  void initialize(URI uri, Configuration conf, AzureFileSystemInstrumentation instrumentation) throws IOException;
 
   void storeEmptyFolder(String key, PermissionStatus permissionStatus)
       throws AzureException;

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java Mon Jul  7 20:43:56 2014
@@ -27,9 +27,18 @@ import java.util.Date;
 import java.util.EnumSet;
 import java.util.GregorianCalendar;
 import java.util.TimeZone;
+import java.util.concurrent.ConcurrentLinkedQueue;
 
+import org.apache.commons.configuration.SubsetConfiguration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
+import org.apache.hadoop.fs.azure.metrics.AzureFileSystemMetricsSystem;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsSink;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 
 import com.microsoft.windowsazure.storage.AccessCondition;
 import com.microsoft.windowsazure.storage.CloudStorageAccount;
@@ -76,7 +85,10 @@ public final class AzureBlobStorageTestA
   private NativeAzureFileSystem fs;
   private AzureNativeFileSystemStore storage;
   private MockStorageInterface mockStorage;
-
+  private static final ConcurrentLinkedQueue<MetricsRecord> allMetrics =
+      new ConcurrentLinkedQueue<MetricsRecord>();
+  
+  
   private AzureBlobStorageTestAccount(NativeAzureFileSystem fs,
       CloudStorageAccount account, CloudBlobContainer container) {
     this.account = account;
@@ -124,6 +136,10 @@ public final class AzureBlobStorageTestA
     this.fs = fs;
     this.mockStorage = mockStorage;
   }
+  
+  private static void addRecord(MetricsRecord record) {
+    allMetrics.add(record);
+  }
 
   public static String getMockContainerUri() {
     return String.format("http://%s/%s",
@@ -141,6 +157,47 @@ public final class AzureBlobStorageTestA
     // Remove the first SEPARATOR
     return toMockUri(path.toUri().getRawPath().substring(1)); 
   }
+  
+  public Number getLatestMetricValue(String metricName, Number defaultValue)
+      throws IndexOutOfBoundsException{
+    boolean found = false;
+    Number ret = null;
+    for (MetricsRecord currentRecord : allMetrics) {
+      // First check if this record is coming for my file system.
+      if (wasGeneratedByMe(currentRecord)) {
+        for (AbstractMetric currentMetric : currentRecord.metrics()) {
+          if (currentMetric.name().equalsIgnoreCase(metricName)) {
+            found = true;
+            ret = currentMetric.value();
+            break;
+          }
+        }
+      }
+    }
+    if (!found) {
+      if (defaultValue != null) {
+        return defaultValue;
+      }
+      throw new IndexOutOfBoundsException(metricName);
+    }
+    return ret;
+  }
+
+  /**
+   * Checks if the given record was generated by my WASB file system instance.
+   * @param currentRecord The metrics record to check.
+   * @return
+   */
+  private boolean wasGeneratedByMe(MetricsRecord currentRecord) {
+    String myFsId = fs.getInstrumentation().getFileSystemInstanceId().toString();
+    for (MetricsTag currentTag : currentRecord.tags()) {
+      if (currentTag.name().equalsIgnoreCase("wasbFileSystemId")) {
+        return currentTag.value().equals(myFsId);
+      }
+    }
+    return false;
+  }
+
 
   /**
    * Gets the blob reference to the given blob key.
@@ -236,7 +293,6 @@ public final class AzureBlobStorageTestA
 
   public static AzureBlobStorageTestAccount createOutOfBandStore(
       int uploadBlockSize, int downloadBlockSize) throws Exception {
-
     CloudBlobContainer container = null;
     Configuration conf = createTestConfiguration();
     CloudStorageAccount account = createTestAccount(conf);
@@ -262,11 +318,25 @@ public final class AzureBlobStorageTestA
     // Set account URI and initialize Azure file system.
     URI accountUri = createAccountUri(accountName, containerName);
 
+    // Set up instrumentation.
+    //
+    AzureFileSystemMetricsSystem.fileSystemStarted();
+    String sourceName = NativeAzureFileSystem.newMetricsSourceName();
+    String sourceDesc = "Azure Storage Volume File System metrics";
+
+    AzureFileSystemInstrumentation instrumentation =
+        DefaultMetricsSystem.instance().register(sourceName,
+                sourceDesc, new AzureFileSystemInstrumentation(conf));
+
+    AzureFileSystemMetricsSystem.registerSource(
+        sourceName, sourceDesc, instrumentation);
+    
+    
     // Create a new AzureNativeFileSystemStore object.
     AzureNativeFileSystemStore testStorage = new AzureNativeFileSystemStore();
 
     // Initialize the store with the throttling feedback interfaces.
-    testStorage.initialize(accountUri, conf);
+    testStorage.initialize(accountUri, conf, instrumentation);
 
     // Create test account initializing the appropriate member variables.
     AzureBlobStorageTestAccount testAcct = new AzureBlobStorageTestAccount(
@@ -722,5 +792,20 @@ public final class AzureBlobStorageTestA
   public MockStorageInterface getMockStorage() {
     return mockStorage;
   }
+  
+  public static class StandardCollector implements MetricsSink {
+    @Override
+    public void init(SubsetConfiguration conf) {
+    }
+
+    @Override
+    public void putMetrics(MetricsRecord record) {
+      addRecord(record);
+    }
+
+    @Override
+    public void flush() {
+    }
+  }
  
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java Mon Jul  7 20:43:56 2014
@@ -33,11 +33,9 @@ import java.util.HashMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
-import org.apache.hadoop.fs.permission.FsPermission;
 import org.junit.Test;
 
 import com.microsoft.windowsazure.storage.OperationContext;

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml Mon Jul  7 20:43:56 2014
@@ -26,6 +26,7 @@
     <value>org.apache.hadoop.fs.azure.NativeAzureFileSystem</value> 
   </property> 
  
+ 
   
   <!-- For tests against live azure, provide the following account information -->
   <!--

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/StrictBufferedFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/StrictBufferedFSInputStream.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/StrictBufferedFSInputStream.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/StrictBufferedFSInputStream.java Mon Jul  7 20:43:56 2014
@@ -19,9 +19,11 @@
 package org.apache.hadoop.fs.swift.snative;
 
 import org.apache.hadoop.fs.BufferedFSInputStream;
+import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.swift.exceptions.SwiftConnectionClosedException;
 
+import java.io.EOFException;
 import java.io.IOException;
 
 /**
@@ -37,10 +39,10 @@ public class StrictBufferedFSInputStream
   @Override
   public void seek(long pos) throws IOException {
     if (pos < 0) {
-      throw new IOException("Negative position");
+      throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
     }
     if (in == null) {
-      throw new SwiftConnectionClosedException("Stream closed");
+      throw new SwiftConnectionClosedException(FSExceptionMessages.STREAM_IS_CLOSED);
     }
     super.seek(pos);
   }

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java Mon Jul  7 20:43:56 2014
@@ -25,14 +25,14 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.swift.exceptions.SwiftConfigurationException;
-import org.apache.hadoop.fs.swift.exceptions.SwiftNotDirectoryException;
 import org.apache.hadoop.fs.swift.exceptions.SwiftOperationFailedException;
-import org.apache.hadoop.fs.swift.exceptions.SwiftPathExistsException;
 import org.apache.hadoop.fs.swift.exceptions.SwiftUnsupportedFeatureException;
 import org.apache.hadoop.fs.swift.http.SwiftProtocolConstants;
 import org.apache.hadoop.fs.swift.util.DurationStats;
@@ -373,7 +373,7 @@ public class SwiftNativeFileSystem exten
    * @param directory path to query
    * @return true iff the directory should be created
    * @throws IOException IO problems
-   * @throws SwiftNotDirectoryException if the path references a file
+   * @throws ParentNotDirectoryException if the path references a file
    */
   private boolean shouldCreate(Path directory) throws IOException {
     FileStatus fileStatus;
@@ -388,9 +388,9 @@ public class SwiftNativeFileSystem exten
 
       if (!SwiftUtils.isDirectory(fileStatus)) {
         //if it's a file, raise an error
-        throw new SwiftNotDirectoryException(directory,
-                String.format(": can't mkdir since it exists and is not a directory: %s",
-                        fileStatus));
+        throw new ParentNotDirectoryException(
+                String.format("%s: can't mkdir since it exists and is not a directory: %s",
+                    directory, fileStatus));
       } else {
         //path exists, and it is a directory
         if (LOG.isDebugEnabled()) {
@@ -488,7 +488,7 @@ public class SwiftNativeFileSystem exten
         //overwrite set -> delete the object.
         store.delete(absolutePath, true);
       } else {
-        throw new SwiftPathExistsException("Path exists: " + file);
+        throw new FileAlreadyExistsException("Path exists: " + file);
       }
     } else {
       // destination does not exist -trigger creation of the parent
@@ -580,6 +580,9 @@ public class SwiftNativeFileSystem exten
     } catch (SwiftOperationFailedException e) {
       //downgrade to a failure
       return false;
+    } catch (FileAlreadyExistsException e) {
+      //downgrade to a failure
+      return false;
     } catch (FileNotFoundException e) {
       //downgrade to a failure
       return false;

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java Mon Jul  7 20:43:56 2014
@@ -22,6 +22,7 @@ import org.apache.commons.httpclient.Htt
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.swift.exceptions.SwiftConfigurationException;
@@ -590,7 +591,7 @@ public class SwiftNativeFileSystemStore 
         } else {
           //outcome #1 dest it's a file: fail if differeent
           if (!renamingOnToSelf) {
-            throw new SwiftOperationFailedException(
+            throw new FileAlreadyExistsException(
                     "cannot rename a file over one that already exists");
           } else {
             //is mv self self where self is a file. this becomes a no-op
@@ -633,7 +634,7 @@ public class SwiftNativeFileSystemStore 
 
       if (destExists && !destIsDir) {
         // #1 destination is a file: fail
-        throw new SwiftOperationFailedException(
+        throw new FileAlreadyExistsException(
                 "the source is a directory, but not the destination");
       }
       Path targetPath;

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeInputStream.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeInputStream.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeInputStream.java Mon Jul  7 20:43:56 2014
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.swift.snati
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -298,7 +299,8 @@ class SwiftNativeInputStream extends FSI
   @Override
   public synchronized void seek(long targetPos) throws IOException {
     if (targetPos < 0) {
-      throw new IOException("Negative Seek offset not supported");
+      throw new EOFException(
+          FSExceptionMessages.NEGATIVE_SEEK);
     }
     //there's some special handling of near-local data
     //as the seek can be omitted if it is in/adjacent

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeOutputStream.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeOutputStream.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeOutputStream.java Mon Jul  7 20:43:56 2014
@@ -22,6 +22,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.swift.exceptions.SwiftConnectionClosedException;
 import org.apache.hadoop.fs.swift.exceptions.SwiftException;
 import org.apache.hadoop.fs.swift.exceptions.SwiftInternalStateException;
 import org.apache.hadoop.fs.swift.util.SwiftUtils;
@@ -109,7 +110,7 @@ class SwiftNativeOutputStream extends Ou
    */
   private synchronized void verifyOpen() throws SwiftException {
     if (closed) {
-      throw new SwiftException("Output stream is closed");
+      throw new SwiftConnectionClosedException();
     }
   }
 

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemBasicOps.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemBasicOps.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemBasicOps.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemBasicOps.java Mon Jul  7 20:43:56 2014
@@ -21,9 +21,9 @@ package org.apache.hadoop.fs.swift;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.swift.exceptions.SwiftBadRequestException;
-import org.apache.hadoop.fs.swift.exceptions.SwiftNotDirectoryException;
 import org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem;
 import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
 import org.junit.Test;
@@ -245,7 +245,7 @@ public class TestSwiftFileSystemBasicOps
       writeTextFile(fs, path, "parent", true);
       try {
         fs.mkdirs(child);
-      } catch (SwiftNotDirectoryException expected) {
+      } catch (ParentNotDirectoryException expected) {
         LOG.debug("Expected Exception", expected);
       }
     } finally {

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemContract.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemContract.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemContract.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemContract.java Mon Jul  7 20:43:56 2014
@@ -23,8 +23,8 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
+import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.swift.exceptions.SwiftNotDirectoryException;
 import org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem;
 import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
 
@@ -47,6 +47,14 @@ public class TestSwiftFileSystemContract
   private static final Log LOG =
           LogFactory.getLog(TestSwiftFileSystemContract.class);
 
+  /**
+   * Override this if the filesystem is not case sensitive
+   * @return true if the case detection/preservation tests should run
+   */
+  protected boolean filesystemIsCaseSensitive() {
+    return false;
+  }
+
   @Override
   protected void setUp() throws Exception {
     final URI uri = getFilesystemURI();
@@ -89,9 +97,8 @@ public class TestSwiftFileSystemContract
     try {
       fs.mkdirs(testSubDir);
       fail("Should throw IOException.");
-    } catch (SwiftNotDirectoryException e) {
+    } catch (ParentNotDirectoryException e) {
       // expected
-      assertEquals(filepath,e.getPath());
     }
     //now verify that the subdir path does not exist
     SwiftTestUtils.assertPathDoesNotExist(fs, "subdir after mkdir", testSubDir);
@@ -100,7 +107,7 @@ public class TestSwiftFileSystemContract
     try {
       fs.mkdirs(testDeepSubDir);
       fail("Should throw IOException.");
-    } catch (SwiftNotDirectoryException e) {
+    } catch (ParentNotDirectoryException e) {
       // expected
     }
     SwiftTestUtils.assertPathDoesNotExist(fs, "testDeepSubDir  after mkdir",

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemRename.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemRename.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemRename.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemRename.java Mon Jul  7 20:43:56 2014
@@ -21,6 +21,7 @@ package org.apache.hadoop.fs.swift;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.swift.exceptions.SwiftOperationFailedException;
 import org.apache.hadoop.fs.swift.util.SwiftTestUtils;
 import org.junit.Test;
 
@@ -220,7 +221,11 @@ public class TestSwiftFileSystemRename e
     fs.mkdirs(testdir);
     Path parent = testdir.getParent();
     //the outcome here is ambiguous, so is not checked
-    fs.rename(testdir, parent);
+    try {
+      fs.rename(testdir, parent);
+    } catch (SwiftOperationFailedException e) {
+      // allowed
+    }
     assertExists("Source directory has been deleted ", testdir);
   }
 

Modified: hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/hdfs2/TestV2LsOperations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/hdfs2/TestV2LsOperations.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/hdfs2/TestV2LsOperations.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/hdfs2/TestV2LsOperations.java Mon Jul  7 20:43:56 2014
@@ -111,7 +111,7 @@ public class TestV2LsOperations extends 
   @Test(timeout = SWIFT_TEST_TIMEOUT)
   public void testListFilesSubDir() throws Throwable {
     createTestSubdirs();
-    Path dir = path("/test");
+    Path dir = path("/test/subdir");
     Path child = new Path(dir, "text.txt");
     SwiftTestUtils.writeTextFile(fs, child, "text", false);
     assertListFilesFinds(fs, dir, child, false);
@@ -120,7 +120,7 @@ public class TestV2LsOperations extends 
   @Test(timeout = SWIFT_TEST_TIMEOUT)
   public void testListFilesRecursive() throws Throwable {
     createTestSubdirs();
-    Path dir = path("/test");
+    Path dir = path("/test/recursive");
     Path child = new Path(dir, "hadoop/a/a.txt");
     SwiftTestUtils.writeTextFile(fs, child, "text", false);
     assertListFilesFinds(fs, dir, child, true);