You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2019/11/12 18:19:18 UTC

[hadoop] branch trunk updated: HADOOP-16665. Filesystems to be closed if they failed during initialize().

This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 990063d  HADOOP-16665. Filesystems to be closed if they failed during initialize().
990063d is described below

commit 990063d2af0a37e9474949f33128805e34c3f016
Author: Steve Loughran <st...@cloudera.com>
AuthorDate: Tue Nov 12 18:17:02 2019 +0000

    HADOOP-16665. Filesystems to be closed if they failed during initialize().
    
    Contributed by Steve Loughran.
    
    This FileSystem instantiation so if an IOException or RuntimeException is
    raised in the invocation of FileSystem.initialize() then a best-effort
    attempt is made to close the FS instance; exceptions raised that there
    are swallowed.
    
    The S3AFileSystem is also modified to do its own cleanup if an
    IOException is raised during its initialize() process, it being the
    FS we know has the "potential" to leak threads, especially in
    extension points (e.g AWS Authenticators) which spawn threads.
    
    Change-Id: Ib84073a606c9d53bf53cbfca4629876a03894f04
---
 .../main/java/org/apache/hadoop/fs/FileSystem.java |  20 +++-
 .../hadoop/fs/TestFileSystemInitialization.java    | 125 +++++++++++++++++++++
 ...opTestBase.java => AbstractHadoopTestBase.java} |  12 +-
 .../org/apache/hadoop/test/HadoopTestBase.java     |   4 +-
 .../apache/hadoop/fs/s3a/S3ABlockOutputStream.java |   9 +-
 .../org/apache/hadoop/fs/s3a/S3ADataBlocks.java    |   4 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java    |  87 +++++++++-----
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java    |  20 +---
 .../apache/hadoop/fs/s3a/MockS3AFileSystem.java    |   6 +
 .../apache/hadoop/fs/s3a/auth/ITestAssumeRole.java |   4 +-
 .../s3a/auth/ITestAssumedRoleCommitOperations.java |   4 +-
 .../fs/s3a/auth/ITestRestrictedReadAccess.java     |   4 +-
 12 files changed, 234 insertions(+), 65 deletions(-)

diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 2376c05..d472591 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -63,6 +63,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsCreateModes;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
@@ -3391,9 +3392,22 @@ public abstract class FileSystem extends Configured
     Tracer tracer = FsTracer.get(conf);
     try(TraceScope scope = tracer.newScope("FileSystem#createFileSystem")) {
       scope.addKVAnnotation("scheme", uri.getScheme());
-      Class<?> clazz = getFileSystemClass(uri.getScheme(), conf);
-      FileSystem fs = (FileSystem)ReflectionUtils.newInstance(clazz, conf);
-      fs.initialize(uri, conf);
+      Class<? extends FileSystem> clazz =
+          getFileSystemClass(uri.getScheme(), conf);
+      FileSystem fs = ReflectionUtils.newInstance(clazz, conf);
+      try {
+        fs.initialize(uri, conf);
+      } catch (IOException | RuntimeException e) {
+        // exception raised during initialization.
+        // log summary at warn and full stack at debug
+        LOGGER.warn("Failed to initialize fileystem {}: {}",
+            uri, e.toString());
+        LOGGER.debug("Failed to initialize fileystem", e);
+        // then (robustly) close the FS, so as to invoke any
+        // cleanup code.
+        IOUtils.cleanupWithLogger(LOGGER, fs);
+        throw e;
+      }
       return fs;
     }
   }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java
index 4d627a5..10ad8a1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java
@@ -18,14 +18,24 @@
 package org.apache.hadoop.fs;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Progressable;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.URI;
 import java.net.URL;
 import java.util.ServiceConfigurationError;
 
 import org.junit.Test;
+
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.*;
 
+/**
+ * Tests related to filesystem creation and lifecycle.
+ */
 public class TestFileSystemInitialization {
 
  /**
@@ -55,4 +65,119 @@ public class TestFileSystemInitialization {
     } catch (Exception | ServiceConfigurationError expected) {
     }
   }
+
+  @Test
+  public void testNewInstanceFailure() throws Throwable {
+    intercept(IOException.class, FailingFileSystem.INITIALIZE, () ->
+        FileSystem.newInstance(new URI("failing://localhost"), FailingFileSystem
+            .failingConf()));
+    assertThat(FailingFileSystem.initCount).describedAs("init count")
+        .isEqualTo(1);
+    assertThat(FailingFileSystem.closeCount).describedAs("close count")
+        .isEqualTo(1);
+  }
+
+  /**
+   * An FS which will fail on both init and close, and update
+   * counters of invocations as it does so.
+   */
+  public static class FailingFileSystem extends FileSystem {
+
+    public static final String INITIALIZE = "initialize()";
+
+    public static final String CLOSE = "close()";
+
+    private static int initCount;
+
+    private static int closeCount;
+
+    private static Configuration failingConf() {
+      final Configuration conf = new Configuration(false);
+      conf.setClass("fs.failing.impl", FailingFileSystem.class,
+          FileSystem.class);
+      return conf;
+    }
+
+    @Override
+    public void initialize(final URI name, final Configuration conf)
+        throws IOException {
+      super.initialize(name, conf);
+      initCount++;
+      throw new IOException(INITIALIZE);
+    }
+
+    @Override
+    public void close() throws IOException {
+      closeCount++;
+      throw new IOException(CLOSE);
+    }
+
+    @Override
+    public URI getUri() {
+      return null;
+    }
+
+    @Override
+    public FSDataInputStream open(final Path f, final int bufferSize)
+        throws IOException {
+      return null;
+    }
+
+    @Override
+    public FSDataOutputStream create(final Path f,
+        final FsPermission permission,
+        final boolean overwrite,
+        final int bufferSize,
+        final short replication,
+        final long blockSize,
+        final Progressable progress) throws IOException {
+      return null;
+    }
+
+    @Override
+    public FSDataOutputStream append(final Path f,
+        final int bufferSize,
+        final Progressable progress) throws IOException {
+      return null;
+    }
+
+    @Override
+    public boolean rename(final Path src, final Path dst) throws IOException {
+      return false;
+    }
+
+    @Override
+    public boolean delete(final Path f, final boolean recursive)
+        throws IOException {
+      return false;
+    }
+
+    @Override
+    public FileStatus[] listStatus(final Path f)
+        throws FileNotFoundException, IOException {
+      return new FileStatus[0];
+    }
+
+    @Override
+    public void setWorkingDirectory(final Path new_dir) {
+
+    }
+
+    @Override
+    public Path getWorkingDirectory() {
+      return null;
+    }
+
+    @Override
+    public boolean mkdirs(final Path f, final FsPermission permission)
+        throws IOException {
+      return false;
+    }
+
+    @Override
+    public FileStatus getFileStatus(final Path f) throws IOException {
+      return null;
+    }
+  }
+
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/AbstractHadoopTestBase.java
similarity index 90%
copy from hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java
copy to hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/AbstractHadoopTestBase.java
index cb7df4b..e18119c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/AbstractHadoopTestBase.java
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.test;
 
-import org.junit.Assert;
+import java.util.concurrent.TimeUnit;
+
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Rule;
@@ -25,13 +26,16 @@ import org.junit.rules.TestName;
 import org.junit.rules.Timeout;
 
 /**
- * A base class for JUnit4 tests that sets a default timeout for all tests
+ * A base class for JUnit5+ tests that sets a default timeout for all tests
  * that subclass this test.
  *
  * Threads are named to the method being executed, for ease of diagnostics
  * in logs and thread dumps.
+ *
+ * Unlike {@link HadoopTestBase} this class does not extend JUnit Assert
+ * so is easier to use with AssertJ.
  */
-public abstract class HadoopTestBase extends Assert {
+public abstract class AbstractHadoopTestBase {
 
   /**
    * System property name to set the test timeout: {@value}.
@@ -70,7 +74,7 @@ public abstract class HadoopTestBase extends Assert {
       //fall back to the default value, as the property cannot be parsed
       millis = TEST_DEFAULT_TIMEOUT_VALUE;
     }
-    return new Timeout(millis);
+    return new Timeout(millis, TimeUnit.MILLISECONDS);
   }
 
   /**
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java
index cb7df4b..23f3531 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.test;
 
+import java.util.concurrent.TimeUnit;
+
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -70,7 +72,7 @@ public abstract class HadoopTestBase extends Assert {
       //fall back to the default value, as the property cannot be parsed
       millis = TEST_DEFAULT_TIMEOUT_VALUE;
     }
-    return new Timeout(millis);
+    return new Timeout(millis, TimeUnit.MILLISECONDS);
   }
 
   /**
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
index a60f9af..66cac99 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.util.Progressable;
 
 import static org.apache.hadoop.fs.s3a.S3AUtils.*;
 import static org.apache.hadoop.fs.s3a.Statistic.*;
+import static org.apache.hadoop.io.IOUtils.cleanupWithLogger;
 
 /**
  * Upload files/parts directly via different buffering mechanisms:
@@ -396,9 +397,9 @@ class S3ABlockOutputStream extends OutputStream implements
       writeOperationHelper.writeFailed(ioe);
       throw ioe;
     } finally {
-      closeAll(LOG, block, blockFactory);
+      cleanupWithLogger(LOG, block, blockFactory);
       LOG.debug("Statistics: {}", statistics);
-      closeAll(LOG, statistics);
+      cleanupWithLogger(LOG, statistics);
       clearActiveBlock();
     }
     // Note end of write. This does not change the state of the remote FS.
@@ -437,7 +438,7 @@ class S3ABlockOutputStream extends OutputStream implements
             // stream afterwards.
             return writeOperationHelper.putObject(putObjectRequest);
           } finally {
-            closeAll(LOG, uploadData, block);
+            cleanupWithLogger(LOG, uploadData, block);
           }
         });
     clearActiveBlock();
@@ -614,7 +615,7 @@ class S3ABlockOutputStream extends OutputStream implements
               return partETag;
             } finally {
               // close the stream and block
-              closeAll(LOG, uploadData, block);
+              cleanupWithLogger(LOG, uploadData, block);
             }
           });
       partETagsFutures.add(partETagFuture);
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java
index 0e3bca5..156defb 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.util.DirectBufferPool;
 
 import static org.apache.hadoop.fs.s3a.S3ADataBlocks.DataBlock.DestState.*;
-import static org.apache.hadoop.fs.s3a.S3AUtils.closeAll;
+import static org.apache.hadoop.io.IOUtils.cleanupWithLogger;
 
 /**
  * Set of classes to support output streaming into blocks which are then
@@ -155,7 +155,7 @@ final class S3ADataBlocks {
      */
     @Override
     public void close() throws IOException {
-      closeAll(LOG, uploadStream);
+      cleanupWithLogger(LOG, uploadStream);
     }
   }
 
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index f0ddf44..832e066 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -306,22 +306,22 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
       throws IOException {
     // get the host; this is guaranteed to be non-null, non-empty
     bucket = name.getHost();
-    LOG.debug("Initializing S3AFileSystem for {}", bucket);
-    // clone the configuration into one with propagated bucket options
-    Configuration conf = propagateBucketOptions(originalConf, bucket);
-    // patch the Hadoop security providers
-    patchSecurityCredentialProviders(conf);
-    // look for delegation token support early.
-    boolean delegationTokensEnabled = hasDelegationTokenBinding(conf);
-    if (delegationTokensEnabled) {
-      LOG.debug("Using delegation tokens");
-    }
-    // set the URI, this will do any fixup of the URI to remove secrets,
-    // canonicalize.
-    setUri(name, delegationTokensEnabled);
-    super.initialize(uri, conf);
-    setConf(conf);
     try {
+      LOG.debug("Initializing S3AFileSystem for {}", bucket);
+      // clone the configuration into one with propagated bucket options
+      Configuration conf = propagateBucketOptions(originalConf, bucket);
+      // patch the Hadoop security providers
+      patchSecurityCredentialProviders(conf);
+      // look for delegation token support early.
+      boolean delegationTokensEnabled = hasDelegationTokenBinding(conf);
+      if (delegationTokensEnabled) {
+        LOG.debug("Using delegation tokens");
+      }
+      // set the URI, this will do any fixup of the URI to remove secrets,
+      // canonicalize.
+      setUri(name, delegationTokensEnabled);
+      super.initialize(uri, conf);
+      setConf(conf);
 
       // look for encryption data
       // DT Bindings may override this
@@ -381,6 +381,9 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
 
       initCannedAcls(conf);
 
+      // This initiates a probe against S3 for the bucket existing.
+      // It is where all network and authentication configuration issues
+      // surface, and is potentially slow.
       verifyBucketExists();
 
       inputPolicy = S3AInputPolicy.getPolicy(
@@ -436,7 +439,13 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
 
       initMultipartUploads(conf);
     } catch (AmazonClientException e) {
+      // amazon client exception: stop all services then throw the translation
+      stopAllServices();
       throw translateException("initializing ", new Path(name), e);
+    } catch (IOException | RuntimeException e) {
+      // other exceptions: stop the services.
+      stopAllServices();
+      throw e;
     }
 
   }
@@ -3118,25 +3127,41 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
     try {
       super.close();
     } finally {
-      if (transfers != null) {
+      stopAllServices();
+    }
+  }
+
+  /**
+   * Stop all services.
+   * This is invoked in close() and during failures of initialize()
+   * -make sure that all operations here are robust to failures in
+   * both the expected state of this FS and of failures while being stopped.
+   */
+  protected synchronized void stopAllServices() {
+    if (transfers != null) {
+      try {
         transfers.shutdownNow(true);
-        transfers = null;
+      } catch (RuntimeException e) {
+        // catch and swallow for resilience.
+        LOG.debug("When shutting down", e);
       }
-      HadoopExecutors.shutdown(boundedThreadPool, LOG,
-          THREAD_POOL_SHUTDOWN_DELAY_SECONDS, TimeUnit.SECONDS);
-      boundedThreadPool = null;
-      HadoopExecutors.shutdown(unboundedThreadPool, LOG,
-          THREAD_POOL_SHUTDOWN_DELAY_SECONDS, TimeUnit.SECONDS);
-      unboundedThreadPool = null;
-      S3AUtils.closeAll(LOG, metadataStore, instrumentation);
-      metadataStore = null;
-      instrumentation = null;
-      closeAutocloseables(LOG, credentials);
-      cleanupWithLogger(LOG, delegationTokens.orElse(null));
-      cleanupWithLogger(LOG, signerManager);
-      signerManager = null;
-      credentials = null;
+      transfers = null;
     }
+    HadoopExecutors.shutdown(boundedThreadPool, LOG,
+        THREAD_POOL_SHUTDOWN_DELAY_SECONDS, TimeUnit.SECONDS);
+    boundedThreadPool = null;
+    HadoopExecutors.shutdown(unboundedThreadPool, LOG,
+        THREAD_POOL_SHUTDOWN_DELAY_SECONDS, TimeUnit.SECONDS);
+    unboundedThreadPool = null;
+    closeAutocloseables(LOG, credentials);
+    cleanupWithLogger(LOG,
+        metadataStore,
+        instrumentation,
+        delegationTokens.orElse(null),
+        signerManager);
+    delegationTokens = Optional.empty();
+    signerManager = null;
+    credentials = null;
   }
 
   /**
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index 7e3c5e6..0250881 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -86,6 +86,7 @@ import java.util.concurrent.ExecutionException;
 import static org.apache.commons.lang3.StringUtils.isEmpty;
 import static org.apache.hadoop.fs.s3a.Constants.*;
 import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.translateDeleteException;
+import static org.apache.hadoop.io.IOUtils.cleanupWithLogger;
 
 /**
  * Utility methods for S3A code.
@@ -1613,26 +1614,17 @@ public final class S3AUtils {
   /**
    * Close the Closeable objects and <b>ignore</b> any Exception or
    * null pointers.
-   * (This is the SLF4J equivalent of that in {@code IOUtils}).
+   * This is obsolete: use
+   * {@link org.apache.hadoop.io.IOUtils#cleanupWithLogger(Logger, Closeable...)}
    * @param log the log to log at debug level. Can be null.
    * @param closeables the objects to close
    */
+  @Deprecated
   public static void closeAll(Logger log,
       Closeable... closeables) {
-    if (log == null) {
-      log = LOG;
-    }
-    for (Closeable c : closeables) {
-      if (c != null) {
-        try {
-          log.debug("Closing {}", c);
-          c.close();
-        } catch (Exception e) {
-          log.debug("Exception in closing {}", c, e);
-        }
-      }
-    }
+    cleanupWithLogger(log, closeables);
   }
+
   /**
    * Close the Closeable objects and <b>ignore</b> any Exception or
    * null pointers.
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java
index 8686e54..e302692 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState;
 import org.apache.hadoop.util.Progressable;
 
 import static com.google.common.base.Preconditions.checkNotNull;
+import static org.apache.hadoop.io.IOUtils.cleanupWithLogger;
 
 /**
  * Relays FS calls to the mocked FS, allows for some extra logging with
@@ -149,6 +150,11 @@ public class MockS3AFileSystem extends S3AFileSystem {
   }
 
   @Override
+  public void close() {
+    cleanupWithLogger(LOG, instrumentation);
+  }
+
+  @Override
   public WriteOperationHelper getWriteOperationHelper() {
     return writeHelper;
   }
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java
index f155aa0..82589fa 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
 import org.apache.hadoop.fs.s3a.MultipartUtils;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.fs.s3a.S3ATestConstants;
-import org.apache.hadoop.fs.s3a.S3AUtils;
 import org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider;
 import org.apache.hadoop.fs.s3a.commit.CommitConstants;
 import org.apache.hadoop.fs.s3a.commit.CommitOperations;
@@ -63,6 +62,7 @@ import static org.apache.hadoop.fs.s3a.auth.RolePolicies.*;
 import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.forbidden;
 import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.newAssumedRoleConfig;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardToolTestHelper.exec;
+import static org.apache.hadoop.io.IOUtils.cleanupWithLogger;
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.apache.hadoop.test.LambdaTestUtils.*;
 
@@ -106,7 +106,7 @@ public class ITestAssumeRole extends AbstractS3ATestBase {
 
   @Override
   public void teardown() throws Exception {
-    S3AUtils.closeAll(LOG, roleFS);
+    cleanupWithLogger(LOG, roleFS);
     super.teardown();
   }
 
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java
index 6b55b1b..8538106 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java
@@ -26,7 +26,6 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
-import org.apache.hadoop.fs.s3a.S3AUtils;
 import org.apache.hadoop.fs.s3a.commit.ITestCommitOperations;
 
 import static org.apache.hadoop.fs.s3a.Constants.ASSUMED_ROLE_ARN;
@@ -34,6 +33,7 @@ import static org.apache.hadoop.fs.s3a.S3ATestUtils.assume;
 import static org.apache.hadoop.fs.s3a.auth.RoleModel.*;
 import static org.apache.hadoop.fs.s3a.auth.RolePolicies.*;
 import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.*;
+import static org.apache.hadoop.io.IOUtils.cleanupWithLogger;
 
 /**
  * Verify that the commit operations work with a restricted set of operations.
@@ -84,7 +84,7 @@ public class ITestAssumedRoleCommitOperations extends ITestCommitOperations {
 
   @Override
   public void teardown() throws Exception {
-    S3AUtils.closeAll(LOG, roleFS);
+    cleanupWithLogger(LOG, roleFS);
     // switches getFileSystem() back to the full FS.
     roleFS = null;
     super.teardown();
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java
index 0b03219..050bd6e 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.fs.s3a.S3ATestUtils;
-import org.apache.hadoop.fs.s3a.S3AUtils;
 import org.apache.hadoop.fs.s3a.Statistic;
 import org.apache.hadoop.fs.s3a.s3guard.DynamoDBMetadataStore;
 import org.apache.hadoop.mapred.LocatedFileStatusFetcher;
@@ -71,6 +70,7 @@ import static org.apache.hadoop.fs.s3a.auth.RoleModel.statement;
 import static org.apache.hadoop.fs.s3a.auth.RolePolicies.*;
 import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.bindRolePolicyStatements;
 import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.newAssumedRoleConfig;
+import static org.apache.hadoop.io.IOUtils.cleanupWithLogger;
 import static org.apache.hadoop.mapreduce.lib.input.FileInputFormat.LIST_STATUS_NUM_THREADS;
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.apache.hadoop.test.GenericTestUtils.failif;
@@ -255,7 +255,7 @@ public class ITestRestrictedReadAccess extends AbstractS3ATestBase {
     try {
       super.teardown();
     } finally {
-      S3AUtils.closeAll(LOG, readonlyFS);
+      cleanupWithLogger(LOG, readonlyFS);
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org