You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by jh...@apache.org on 2020/04/16 18:32:32 UTC

[hadoop] branch branch-2.10 updated (7945ed4 -> 8f60971)

This is an automated email from the ASF dual-hosted git repository.

jhung pushed a change to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


    from 7945ed4  HADOOP-16361. Fixed TestSecureLogins#testValidKerberosName on branch-2.               Contributed by Jim Brennan via eyang
     new 70c2a29  HDFS-15036. Active NameNode should not silently fail the image transfer. Contributed by Chen Liang.
     new 17a0bf8  HADOOP-16734. Backport HADOOP-16455- "ABFS: Implement FileSystem.access() method" to branch-2. Contributed by Bilahari T H.
     new 3bc2e26  HADOOP-16778. ABFS: Backport HADOOP-16660 ABFS: Make RetryCount in ExponentialRetryPolicy Configurable to Branch-2. Contributed by Sneha Vijayarajan.
     new 8f60971  HADOOP-16933. Backport HADOOP-16890 and HADOOP-16825 to branch-2

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../hadoop/hdfs/server/namenode/ImageServlet.java  |   6 +
 .../server/namenode/ha/StandbyCheckpointer.java    |  12 +-
 .../org/apache/hadoop/hdfs/TestRollingUpgrade.java |  20 ++
 hadoop-project/pom.xml                             |   5 +
 hadoop-tools/hadoop-azure/pom.xml                  |   6 +
 .../hadoop/fs/azurebfs/AbfsConfiguration.java      |   8 +
 .../hadoop/fs/azurebfs/AzureBlobFileSystem.java    |  23 +-
 .../fs/azurebfs/AzureBlobFileSystemStore.java      |  23 +-
 .../fs/azurebfs/constants/AbfsHttpConstants.java   |   1 +
 .../fs/azurebfs/constants/ConfigurationKeys.java   |   4 +
 .../constants/FileSystemConfigurations.java        |   4 +-
 .../fs/azurebfs/constants/HttpQueryParams.java     |   1 +
 .../fs/azurebfs/oauth2/AccessTokenProvider.java    |   2 +-
 .../fs/azurebfs/oauth2/AzureADAuthenticator.java   |  57 +++-
 .../fs/azurebfs/oauth2/MsiTokenProvider.java       |  34 +++
 .../hadoop/fs/azurebfs/services/AbfsClient.java    |  22 ++
 .../fs/azurebfs/services/AbfsRestOperation.java    |   3 +
 .../azurebfs/services/AbfsRestOperationType.java   |   3 +-
 .../azurebfs/services/ExponentialRetryPolicy.java  |  11 +-
 .../hadoop-azure/src/site/markdown/abfs.md         |   4 +
 .../fs/azurebfs/ITestAbfsMsiTokenProvider.java     |  93 ++++++
 .../azurebfs/ITestAbfsRestOperationException.java  |  43 ++-
 .../ITestAzureBlobFileSystemCheckAccess.java       | 333 +++++++++++++++++++++
 .../TestAbfsConfigurationFieldsValidation.java     |   2 +-
 .../azurebfs/constants/TestConfigurationKeys.java  |   8 +
 .../fs/azurebfs/oauth2/RetryTestTokenProvider.java |  67 +++++
 26 files changed, 764 insertions(+), 31 deletions(-)
 create mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsMsiTokenProvider.java
 create mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java
 create mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/oauth2/RetryTestTokenProvider.java


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 02/04: HADOOP-16734. Backport HADOOP-16455- "ABFS: Implement FileSystem.access() method" to branch-2. Contributed by Bilahari T H.

Posted by jh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 17a0bf8d78dfb3d9ce4203a56e327a28f8ddbcba
Author: Bilahari T H <Bi...@microsoft.com>
AuthorDate: Sat Jan 4 15:29:59 2020 -0800

    HADOOP-16734. Backport HADOOP-16455- "ABFS: Implement FileSystem.access() method" to branch-2.
    Contributed by Bilahari T H.
---
 .../hadoop/fs/azurebfs/AbfsConfiguration.java      |   8 +
 .../hadoop/fs/azurebfs/AzureBlobFileSystem.java    |  23 +-
 .../fs/azurebfs/AzureBlobFileSystemStore.java      |  19 ++
 .../fs/azurebfs/constants/AbfsHttpConstants.java   |   1 +
 .../fs/azurebfs/constants/ConfigurationKeys.java   |   4 +
 .../constants/FileSystemConfigurations.java        |   4 +-
 .../fs/azurebfs/constants/HttpQueryParams.java     |   1 +
 .../hadoop/fs/azurebfs/services/AbfsClient.java    |  22 ++
 .../azurebfs/services/AbfsRestOperationType.java   |   3 +-
 .../hadoop-azure/src/site/markdown/abfs.md         |   4 +
 .../ITestAzureBlobFileSystemCheckAccess.java       | 320 +++++++++++++++++++++
 .../azurebfs/constants/TestConfigurationKeys.java  |   8 +
 12 files changed, 412 insertions(+), 5 deletions(-)

diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java
index ffddc45..ccc5463 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java
@@ -178,6 +178,10 @@ public class AbfsConfiguration{
       DefaultValue = DEFAULT_USE_UPN)
   private boolean useUpn;
 
+  @BooleanConfigurationValidatorAnnotation(ConfigurationKey =
+      FS_AZURE_ENABLE_CHECK_ACCESS, DefaultValue = DEFAULT_ENABLE_CHECK_ACCESS)
+  private boolean isCheckAccessEnabled;
+
   private Map<String, String> storageAccountKeys;
 
   public AbfsConfiguration(final Configuration rawConfig, String accountName)
@@ -399,6 +403,10 @@ public class AbfsConfiguration{
     return this.azureBlockSize;
   }
 
+  public boolean isCheckAccessEnabled() {
+    return this.isCheckAccessEnabled;
+  }
+
   public String getAzureBlockLocationHost() {
     return this.azureBlockLocationHost;
   }
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java
index 82a38a2..c3791fb 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java
@@ -854,9 +854,14 @@ public class AzureBlobFileSystem extends FileSystem {
    * @throws IOException                   see specific implementation
    */
   @Override
-  public void access(final Path path, FsAction mode) throws IOException {
-    // TODO: make it no-op to unblock hive permission issue for now.
-    // Will add a long term fix similar to the implementation in AdlFileSystem.
+  public void access(final Path path, final FsAction mode) throws IOException {
+    LOG.debug("AzureBlobFileSystem.access path : {}, mode : {}", path, mode);
+    Path qualifiedPath = makeQualified(path);
+    try {
+      this.abfsStore.access(qualifiedPath, mode);
+    } catch (AzureBlobFileSystemException ex) {
+      checkCheckAccessException(path, ex);
+    }
   }
 
   private FileStatus tryGetFileStatus(final Path f) {
@@ -969,6 +974,18 @@ public class AzureBlobFileSystem extends FileSystem {
     }
   }
 
+  private void checkCheckAccessException(final Path path,
+      final AzureBlobFileSystemException exception) throws IOException {
+    if (exception instanceof AbfsRestOperationException) {
+      AbfsRestOperationException ere = (AbfsRestOperationException) exception;
+      if (ere.getStatusCode() == HttpURLConnection.HTTP_FORBIDDEN) {
+        throw (IOException) new AccessControlException(ere.getMessage())
+            .initCause(exception);
+      }
+    }
+    checkException(path, exception);
+  }
+
   /**
    * Given a path and exception, choose which IOException subclass
    * to create.
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
index 6710275..158f834 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
@@ -884,6 +884,25 @@ public class AzureBlobFileSystemStore {
     return aclStatusBuilder.build();
   }
 
+  public void access(final Path path, final FsAction mode)
+      throws AzureBlobFileSystemException {
+    LOG.debug("access for filesystem: {}, path: {}, mode: {}",
+        this.client.getFileSystem(), path, mode);
+    if (!this.abfsConfiguration.isCheckAccessEnabled()
+        || !getIsNamespaceEnabled()) {
+      LOG.debug("Returning; either check access is not enabled or the account"
+          + " used is not namespace enabled");
+      return;
+    }
+    //try (AbfsPerfInfo perfInfo = startTracking("access", "checkAccess")) {
+      String relativePath =
+          AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true);
+      final AbfsRestOperation op = this.client
+          .checkAccess(relativePath, mode.SYMBOL);
+    // perfInfo.registerResult(op.getResult()).registerSuccess(true);
+    //}
+  }
+
   public boolean isAtomicRenameKey(String key) {
     return isKeyForDirectorySet(key, azureAtomicRenameDirSet);
   }
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java
index e85c7f0..c6ade9c 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java
@@ -37,6 +37,7 @@ public final class AbfsHttpConstants {
   public static final String SET_PROPERTIES_ACTION = "setProperties";
   public static final String SET_ACCESS_CONTROL = "setAccessControl";
   public static final String GET_ACCESS_CONTROL = "getAccessControl";
+  public static final String CHECK_ACCESS = "checkAccess";
   public static final String GET_STATUS = "getStatus";
   public static final String DEFAULT_TIMEOUT = "90";
   public static final String TOKEN_VERSION = "2";
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java
index eb4605b..b9e8fad 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java
@@ -62,6 +62,10 @@ public final class ConfigurationKeys {
   public static final String FS_AZURE_DISABLE_OUTPUTSTREAM_FLUSH = "fs.azure.disable.outputstream.flush";
   public static final String FS_AZURE_USER_AGENT_PREFIX_KEY = "fs.azure.user.agent.prefix";
   public static final String FS_AZURE_SSL_CHANNEL_MODE_KEY = "fs.azure.ssl.channel.mode";
+  /** Provides a config to enable/disable the checkAccess API.
+   *  By default this will be
+   *  FileSystemConfigurations.DEFAULT_ENABLE_CHECK_ACCESS. **/
+  public static final String FS_AZURE_ENABLE_CHECK_ACCESS = "fs.azure.enable.check.access";
   public static final String FS_AZURE_USE_UPN = "fs.azure.use.upn";
   /** User principal names (UPNs) have the format “{alias}@{domain}”. If true,
    *  only {alias} is included when a UPN would otherwise appear in the output
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java
index 29367eb..0a0ad37 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java
@@ -68,5 +68,7 @@ public final class FileSystemConfigurations {
 
   public static final boolean DEFAULT_USE_UPN = false;
 
+  public static final boolean DEFAULT_ENABLE_CHECK_ACCESS = false;
+
   private FileSystemConfigurations() {}
-}
\ No newline at end of file
+}
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpQueryParams.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpQueryParams.java
index 87e074f..9f735f7 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpQueryParams.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpQueryParams.java
@@ -32,6 +32,7 @@ public final class HttpQueryParams {
   public static final String QUERY_PARAM_RECURSIVE = "recursive";
   public static final String QUERY_PARAM_MAXRESULTS = "maxResults";
   public static final String QUERY_PARAM_ACTION = "action";
+  public static final String QUERY_FS_ACTION = "fsAction";
   public static final String QUERY_PARAM_POSITION = "position";
   public static final String QUERY_PARAM_TIMEOUT = "timeout";
   public static final String QUERY_PARAM_RETAIN_UNCOMMITTED_DATA = "retainUncommittedData";
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java
index 0b9ad7a..b961b5c 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java
@@ -507,6 +507,28 @@ public class AbfsClient {
     return op;
   }
 
+  /**
+   * Talks to the server to check whether the permission specified in
+   * the rwx parameter is present for the path specified in the path parameter.
+   *
+   * @param path  Path for which access check needs to be performed
+   * @param rwx   The permission to be checked on the path
+   * @return      The {@link AbfsRestOperation} object for the operation
+   * @throws AzureBlobFileSystemException in case of bad requests
+   */
+  public AbfsRestOperation checkAccess(String path, String rwx)
+      throws AzureBlobFileSystemException {
+    AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder();
+    abfsUriQueryBuilder.addQuery(QUERY_PARAM_ACTION, CHECK_ACCESS);
+    abfsUriQueryBuilder.addQuery(QUERY_FS_ACTION, rwx);
+    URL url = createRequestUrl(path, abfsUriQueryBuilder.toString());
+    AbfsRestOperation op = new AbfsRestOperation(
+        AbfsRestOperationType.CheckAccess, this,
+        AbfsHttpConstants.HTTP_METHOD_HEAD, url, createDefaultHeaders());
+    op.execute();
+    return op;
+  }
+
   private URL createRequestUrl(final String query) throws AzureBlobFileSystemException {
     return createRequestUrl(EMPTY_STRING, query);
   }
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperationType.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperationType.java
index 374bfdf..d303186 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperationType.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperationType.java
@@ -39,5 +39,6 @@ public enum AbfsRestOperationType {
     Append,
     Flush,
     ReadFile,
-    DeletePath
+    DeletePath,
+    CheckAccess
 }
diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md
index d8452a2..02807a0 100644
--- a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md
+++ b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md
@@ -92,6 +92,10 @@ performance issues.
 
 ## Testing ABFS
 
+### <a name="flushconfigoptions"></a> Access Options
+Config `fs.azure.enable.check.access` needs to be set true to enable
+ the AzureBlobFileSystem.access().
+
 See the relevant section in [Testing Azure](testing_azure.html).
 
 ## References
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java
new file mode 100644
index 0000000..d1f9ec7
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java
@@ -0,0 +1,320 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.azurebfs;
+
+import com.google.common.collect.Lists;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.List;
+
+import org.junit.Assume;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azurebfs.utils.AclTestHelpers;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.security.AccessControlException;
+
+import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION;
+import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ENABLE_CHECK_ACCESS;
+import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_ID;
+import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_SECRET;
+import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_FS_CHECKACCESS_TEST_USER_GUID;
+import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_FS_CLIENT_ID;
+import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_FS_CLIENT_SECRET;
+import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT;
+
+/**
+ * Test cases for AzureBlobFileSystem.access()
+ */
+public class ITestAzureBlobFileSystemCheckAccess
+    extends AbstractAbfsIntegrationTest {
+
+  private static final String TEST_FOLDER_PATH = "CheckAccessTestFolder";
+  private final FileSystem superUserFs;
+  private final FileSystem testUserFs;
+  private final String testUserGuid;
+  private final boolean isCheckAccessEnabled;
+  private final boolean isHNSEnabled;
+
+  public ITestAzureBlobFileSystemCheckAccess() throws Exception {
+    super.setup();
+    this.superUserFs = getFileSystem();
+    testUserGuid = getConfiguration()
+        .get(FS_AZURE_BLOB_FS_CHECKACCESS_TEST_USER_GUID);
+    this.testUserFs = getTestUserFs();
+    this.isCheckAccessEnabled = getConfiguration().isCheckAccessEnabled();
+    this.isHNSEnabled = getConfiguration()
+        .getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, false);
+  }
+
+  private FileSystem getTestUserFs() throws Exception {
+    String orgClientId = getConfiguration().get(FS_AZURE_BLOB_FS_CLIENT_ID);
+    String orgClientSecret = getConfiguration()
+        .get(FS_AZURE_BLOB_FS_CLIENT_SECRET);
+    Boolean orgCreateFileSystemDurungInit = getConfiguration()
+        .getBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, true);
+    getRawConfiguration().set(FS_AZURE_BLOB_FS_CLIENT_ID,
+        getConfiguration().get(FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_ID));
+    getRawConfiguration().set(FS_AZURE_BLOB_FS_CLIENT_SECRET, getConfiguration()
+        .get(FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_SECRET));
+    getRawConfiguration()
+        .setBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION,
+            false);
+    FileSystem fs = FileSystem.newInstance(getRawConfiguration());
+    getRawConfiguration().set(FS_AZURE_BLOB_FS_CLIENT_ID, orgClientId);
+    getRawConfiguration().set(FS_AZURE_BLOB_FS_CLIENT_SECRET, orgClientSecret);
+    getRawConfiguration()
+        .setBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION,
+            orgCreateFileSystemDurungInit);
+    return fs;
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  @Ignore
+  /*  Ignoring as the FileSystem.checkPath does not check for
+      Preconditions.checkArgument(path != null, "null path");
+      hence results in NPE
+   */
+  public void testCheckAccessWithNullPath() throws IOException {
+    superUserFs.access(null, FsAction.READ);
+  }
+
+  @Test(expected = NullPointerException.class)
+  public void testCheckAccessForFileWithNullFsAction() throws Exception {
+    assumeHNSAndCheckAccessEnabled();
+    //  NPE when trying to convert null FsAction enum
+    superUserFs.access(new Path("test.txt"), null);
+  }
+
+  @Test(expected = FileNotFoundException.class)
+  public void testCheckAccessForNonExistentFile() throws Exception {
+    assumeHNSAndCheckAccessEnabled();
+    Path nonExistentFile = setupTestDirectoryAndUserAccess(
+        "/nonExistentFile1.txt", FsAction.ALL);
+    superUserFs.delete(nonExistentFile, true);
+    testUserFs.access(nonExistentFile, FsAction.READ);
+  }
+
+  @Test
+  public void testWhenCheckAccessConfigIsOff() throws Exception {
+    Assume.assumeTrue(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT + " is false",
+        isHNSEnabled);
+    Configuration conf = getRawConfiguration();
+    conf.setBoolean(FS_AZURE_ENABLE_CHECK_ACCESS, false);
+    FileSystem fs = FileSystem.newInstance(conf);
+    Path testFilePath = setupTestDirectoryAndUserAccess("/test1.txt",
+        FsAction.NONE);
+    fs.access(testFilePath, FsAction.EXECUTE);
+    fs.access(testFilePath, FsAction.READ);
+    fs.access(testFilePath, FsAction.WRITE);
+    fs.access(testFilePath, FsAction.READ_EXECUTE);
+    fs.access(testFilePath, FsAction.WRITE_EXECUTE);
+    fs.access(testFilePath, FsAction.READ_WRITE);
+    fs.access(testFilePath, FsAction.ALL);
+    testFilePath = setupTestDirectoryAndUserAccess("/test1.txt", FsAction.ALL);
+    fs.access(testFilePath, FsAction.EXECUTE);
+    fs.access(testFilePath, FsAction.READ);
+    fs.access(testFilePath, FsAction.WRITE);
+    fs.access(testFilePath, FsAction.READ_EXECUTE);
+    fs.access(testFilePath, FsAction.WRITE_EXECUTE);
+    fs.access(testFilePath, FsAction.READ_WRITE);
+    fs.access(testFilePath, FsAction.ALL);
+    fs.access(testFilePath, null);
+
+    Path nonExistentFile = setupTestDirectoryAndUserAccess(
+        "/nonExistentFile2" + ".txt", FsAction.NONE);
+    superUserFs.delete(nonExistentFile, true);
+    fs.access(nonExistentFile, FsAction.READ);
+  }
+
+  @Test
+  public void testCheckAccessForAccountWithoutNS() throws Exception {
+    Assume.assumeFalse(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT + " is true",
+        getConfiguration()
+            .getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, true));
+    testUserFs.access(new Path("/"), FsAction.READ);
+  }
+
+  @Test
+  public void testFsActionNONE() throws Exception {
+    assumeHNSAndCheckAccessEnabled();
+    Path testFilePath = setupTestDirectoryAndUserAccess("/test2.txt",
+        FsAction.NONE);
+    assertInaccessible(testFilePath, FsAction.EXECUTE);
+    assertInaccessible(testFilePath, FsAction.READ);
+    assertInaccessible(testFilePath, FsAction.WRITE);
+    assertInaccessible(testFilePath, FsAction.READ_EXECUTE);
+    assertInaccessible(testFilePath, FsAction.WRITE_EXECUTE);
+    assertInaccessible(testFilePath, FsAction.READ_WRITE);
+    assertInaccessible(testFilePath, FsAction.ALL);
+  }
+
+  @Test
+  public void testFsActionEXECUTE() throws Exception {
+    assumeHNSAndCheckAccessEnabled();
+    Path testFilePath = setupTestDirectoryAndUserAccess("/test3.txt",
+        FsAction.EXECUTE);
+    assertAccessible(testFilePath, FsAction.EXECUTE);
+
+    assertInaccessible(testFilePath, FsAction.READ);
+    assertInaccessible(testFilePath, FsAction.WRITE);
+    assertInaccessible(testFilePath, FsAction.READ_EXECUTE);
+    assertInaccessible(testFilePath, FsAction.WRITE_EXECUTE);
+    assertInaccessible(testFilePath, FsAction.READ_WRITE);
+    assertInaccessible(testFilePath, FsAction.ALL);
+  }
+
+  @Test
+  public void testFsActionREAD() throws Exception {
+    assumeHNSAndCheckAccessEnabled();
+    Path testFilePath = setupTestDirectoryAndUserAccess("/test4.txt",
+        FsAction.READ);
+    assertAccessible(testFilePath, FsAction.READ);
+
+    assertInaccessible(testFilePath, FsAction.EXECUTE);
+    assertInaccessible(testFilePath, FsAction.WRITE);
+    assertInaccessible(testFilePath, FsAction.READ_EXECUTE);
+    assertInaccessible(testFilePath, FsAction.WRITE_EXECUTE);
+    assertInaccessible(testFilePath, FsAction.READ_WRITE);
+    assertInaccessible(testFilePath, FsAction.ALL);
+  }
+
+  @Test
+  public void testFsActionWRITE() throws Exception {
+    assumeHNSAndCheckAccessEnabled();
+    Path testFilePath = setupTestDirectoryAndUserAccess("/test5.txt",
+        FsAction.WRITE);
+    assertAccessible(testFilePath, FsAction.WRITE);
+
+    assertInaccessible(testFilePath, FsAction.EXECUTE);
+    assertInaccessible(testFilePath, FsAction.READ);
+    assertInaccessible(testFilePath, FsAction.READ_EXECUTE);
+    assertInaccessible(testFilePath, FsAction.WRITE_EXECUTE);
+    assertInaccessible(testFilePath, FsAction.READ_WRITE);
+    assertInaccessible(testFilePath, FsAction.ALL);
+  }
+
+  @Test
+  public void testFsActionREADEXECUTE() throws Exception {
+    assumeHNSAndCheckAccessEnabled();
+    Path testFilePath = setupTestDirectoryAndUserAccess("/test6.txt",
+        FsAction.READ_EXECUTE);
+    assertAccessible(testFilePath, FsAction.EXECUTE);
+    assertAccessible(testFilePath, FsAction.READ);
+    assertAccessible(testFilePath, FsAction.READ_EXECUTE);
+
+    assertInaccessible(testFilePath, FsAction.WRITE);
+    assertInaccessible(testFilePath, FsAction.WRITE_EXECUTE);
+    assertInaccessible(testFilePath, FsAction.READ_WRITE);
+    assertInaccessible(testFilePath, FsAction.ALL);
+  }
+
+  @Test
+  public void testFsActionWRITEEXECUTE() throws Exception {
+    assumeHNSAndCheckAccessEnabled();
+    Path testFilePath = setupTestDirectoryAndUserAccess("/test7.txt",
+        FsAction.WRITE_EXECUTE);
+    assertAccessible(testFilePath, FsAction.EXECUTE);
+    assertAccessible(testFilePath, FsAction.WRITE);
+    assertAccessible(testFilePath, FsAction.WRITE_EXECUTE);
+
+    assertInaccessible(testFilePath, FsAction.READ);
+    assertInaccessible(testFilePath, FsAction.READ_EXECUTE);
+    assertInaccessible(testFilePath, FsAction.READ_WRITE);
+    assertInaccessible(testFilePath, FsAction.ALL);
+  }
+
+  @Test
+  public void testFsActionALL() throws Exception {
+    assumeHNSAndCheckAccessEnabled();
+    Path testFilePath = setupTestDirectoryAndUserAccess("/test8.txt",
+        FsAction.ALL);
+    assertAccessible(testFilePath, FsAction.EXECUTE);
+    assertAccessible(testFilePath, FsAction.WRITE);
+    assertAccessible(testFilePath, FsAction.WRITE_EXECUTE);
+    assertAccessible(testFilePath, FsAction.READ);
+    assertAccessible(testFilePath, FsAction.READ_EXECUTE);
+    assertAccessible(testFilePath, FsAction.READ_WRITE);
+    assertAccessible(testFilePath, FsAction.ALL);
+  }
+
+  private void assumeHNSAndCheckAccessEnabled() {
+    Assume.assumeTrue(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT + " is false",
+        isHNSEnabled);
+    Assume.assumeTrue(FS_AZURE_ENABLE_CHECK_ACCESS + " is false",
+        isCheckAccessEnabled);
+  }
+
+  private void assertAccessible(Path testFilePath, FsAction fsAction)
+      throws IOException {
+    assertTrue(
+        "Should have been given access  " + fsAction + " on " + testFilePath,
+        isAccessible(testUserFs, testFilePath, fsAction));
+  }
+
+  private void assertInaccessible(Path testFilePath, FsAction fsAction)
+      throws IOException {
+    assertFalse(
+        "Should have been denied access  " + fsAction + " on " + testFilePath,
+        isAccessible(testUserFs, testFilePath, fsAction));
+  }
+
+  private void setExecuteAccessForParentDirs(Path dir) throws IOException {
+    dir = dir.getParent();
+    while (dir != null) {
+      modifyAcl(dir, testUserGuid, FsAction.EXECUTE);
+      dir = dir.getParent();
+    }
+  }
+
+  private void modifyAcl(Path file, String uid, FsAction fsAction)
+      throws IOException {
+    List<AclEntry> aclSpec = Lists.newArrayList(AclTestHelpers
+        .aclEntry(AclEntryScope.ACCESS, AclEntryType.USER, uid, fsAction));
+    this.superUserFs.modifyAclEntries(file, aclSpec);
+  }
+
+  private Path setupTestDirectoryAndUserAccess(String testFileName,
+      FsAction fsAction) throws Exception {
+    Path file = new Path(TEST_FOLDER_PATH + testFileName);
+    file = this.superUserFs.makeQualified(file);
+    this.superUserFs.delete(file, true);
+    this.superUserFs.create(file);
+    modifyAcl(file, testUserGuid, fsAction);
+    setExecuteAccessForParentDirs(file);
+    return file;
+  }
+
+  private boolean isAccessible(FileSystem fs, Path path, FsAction fsAction)
+      throws IOException {
+    try {
+      fs.access(path, fsAction);
+    } catch (AccessControlException ace) {
+      return false;
+    }
+    return true;
+  }
+}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/constants/TestConfigurationKeys.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/constants/TestConfigurationKeys.java
index fbd13fe..579e76f 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/constants/TestConfigurationKeys.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/constants/TestConfigurationKeys.java
@@ -34,6 +34,14 @@ public final class TestConfigurationKeys {
   public static final String FS_AZURE_BLOB_DATA_READER_CLIENT_ID = "fs.azure.account.oauth2.reader.client.id";
   public static final String FS_AZURE_BLOB_DATA_READER_CLIENT_SECRET = "fs.azure.account.oauth2.reader.client.secret";
 
+  public static final String FS_AZURE_BLOB_FS_CLIENT_ID = "fs.azure.account.oauth2.client.id";
+  public static final String FS_AZURE_BLOB_FS_CLIENT_SECRET = "fs.azure.account.oauth2.client.secret";
+
+  public static final String FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_ID = "fs.azure.account.test.oauth2.client.id";
+  public static final String FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_SECRET = "fs.azure.account.test.oauth2.client.secret";
+
+  public static final String FS_AZURE_BLOB_FS_CHECKACCESS_TEST_USER_GUID = "fs.azure.check.access.testuser.guid";
+
   public static final String TEST_CONFIGURATION_FILE_NAME = "azure-test.xml";
   public static final String TEST_CONTAINER_PREFIX = "abfs-testcontainer-";
   public static final int TEST_TIMEOUT = 15 * 60 * 1000;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 01/04: HDFS-15036. Active NameNode should not silently fail the image transfer. Contributed by Chen Liang.

Posted by jh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 70c2a2992f4488a76351701af2faf7f6e5b05678
Author: Chen Liang <cl...@apache.org>
AuthorDate: Thu Dec 12 12:12:44 2019 -0800

    HDFS-15036. Active NameNode should not silently fail the image transfer. Contributed by Chen Liang.
---
 .../hadoop/hdfs/server/namenode/ImageServlet.java    |  6 ++++++
 .../hdfs/server/namenode/ha/StandbyCheckpointer.java | 12 +++++++++++-
 .../org/apache/hadoop/hdfs/TestRollingUpgrade.java   | 20 ++++++++++++++++++++
 3 files changed, 37 insertions(+), 1 deletion(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java
index ad8b159..3dcc168 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java
@@ -573,7 +573,13 @@ public class ImageServlet extends HttpServlet {
               long timeDelta = TimeUnit.MILLISECONDS.toSeconds(
                   now - lastCheckpointTime);
 
+              // Since the goal of the check below is to prevent overly
+              // frequent upload from Standby, the check should only be done
+              // for the periodical upload from Standby. For the other
+              // scenarios such as rollback image and ckpt file, they skip
+              // this check, see HDFS-15036 for more info.
               if (checkRecentImageEnable &&
+                  NameNodeFile.IMAGE.equals(parsedParams.getNameNodeFile()) &&
                   timeDelta < checkpointPeriod &&
                   txid - lastCheckpointTxid < checkpointTxnCount) {
                 // only when at least one of two conditions are met we accept
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
index c05a0da..5cac972 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
@@ -289,10 +289,20 @@ public class StandbyCheckpointer {
         // TODO should there be some smarts here about retries nodes that
         //  are not the active NN?
         CheckpointReceiverEntry receiverEntry = checkpointReceivers.get(url);
-        if (upload.get() == TransferFsImage.TransferResult.SUCCESS) {
+        TransferFsImage.TransferResult uploadResult = upload.get();
+        if (uploadResult == TransferFsImage.TransferResult.SUCCESS) {
           receiverEntry.setLastUploadTime(monotonicNow());
           receiverEntry.setIsPrimary(true);
         } else {
+          // Getting here means image upload is explicitly rejected
+          // by the other node. This could happen if:
+          // 1. the other is also a standby, or
+          // 2. the other is active, but already accepted another
+          // newer image, or
+          // 3. the other is active but has a recent enough image.
+          // All these are valid cases, just log for information.
+          LOG.info("Image upload rejected by the other NameNode: " +
+              uploadResult);
           receiverEntry.setIsPrimary(false);
         }
       } catch (ExecutionException e) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
index 749ec96..579c5ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
@@ -57,6 +57,7 @@ import org.junit.Assert;
 import static org.junit.Assert.assertTrue;
 import org.junit.Test;
 
+import static org.apache.hadoop.hdfs.server.namenode.ImageServlet.RECENT_IMAGE_CHECK_ENABLED;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNull;
@@ -430,7 +431,22 @@ public class TestRollingUpgrade {
     testFinalize(3);
   }
 
+  @Test(timeout = 300000)
+  public void testFinalizeWithDeltaCheck() throws Exception {
+    testFinalize(2, true);
+  }
+
+  @Test(timeout = 300000)
+  public void testFinalizeWithMultipleNNDeltaCheck() throws Exception {
+    testFinalize(3, true);
+  }
+
   private void testFinalize(int nnCount) throws Exception {
+    testFinalize(nnCount, false);
+  }
+
+  private void testFinalize(int nnCount, boolean skipImageDeltaCheck)
+      throws Exception {
     final Configuration conf = new HdfsConfiguration();
     MiniQJMHACluster cluster = null;
     final Path foo = new Path("/foo");
@@ -449,6 +465,10 @@ public class TestRollingUpgrade {
       dfsCluster.restartNameNodes();
 
       dfsCluster.transitionToActive(0);
+
+      dfsCluster.getNameNode(0).getHttpServer()
+          .setAttribute(RECENT_IMAGE_CHECK_ENABLED, skipImageDeltaCheck);
+
       DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
       dfs.mkdirs(foo);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 04/04: HADOOP-16933. Backport HADOOP-16890 and HADOOP-16825 to branch-2

Posted by jh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 8f6097113857223861d40f82d10bb4dd231b4aa3
Author: bilaharith <52...@users.noreply.github.com>
AuthorDate: Tue Apr 7 08:40:14 2020 +0530

    HADOOP-16933. Backport HADOOP-16890 and HADOOP-16825 to branch-2
    
    Hadoop 16890. Change in expiry calculation for MSI token provider. Contributed by Bilahari T H
    (cherry picked from commit 0b931f36ec83dc72729a9e84a0d313f471061c64)
    
    HADOOP-16825: ITestAzureBlobFileSystemCheckAccess failing. Contributed by Bilahari T H
    (cherry picked from commit 5944d28130925fe1452f545e96b5e44f064bc69e)
---
 hadoop-project/pom.xml                             |  5 ++
 hadoop-tools/hadoop-azure/pom.xml                  |  6 ++
 .../fs/azurebfs/oauth2/AccessTokenProvider.java    |  2 +-
 .../fs/azurebfs/oauth2/AzureADAuthenticator.java   | 57 +++++++++----
 .../fs/azurebfs/oauth2/MsiTokenProvider.java       | 34 ++++++++
 .../fs/azurebfs/ITestAbfsMsiTokenProvider.java     | 93 ++++++++++++++++++++++
 .../ITestAzureBlobFileSystemCheckAccess.java       | 21 ++++-
 7 files changed, 199 insertions(+), 19 deletions(-)

diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 564009e..5c9fbf6 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1315,6 +1315,11 @@
           </exclusion>
         </exclusions>
       </dependency>
+      <dependency>
+        <groupId>org.hamcrest</groupId>
+        <artifactId>hamcrest-library</artifactId>
+        <version>1.3</version>
+      </dependency>
     </dependencies>
   </dependencyManagement>
 
diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml
index ed2d530..15507f8 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -249,6 +249,12 @@
       <artifactId>mockito-all</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.hamcrest</groupId>
+      <artifactId>hamcrest-library</artifactId>
+      <scope>test</scope>
+    </dependency>
+
   </dependencies>
 
   <profiles>
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AccessTokenProvider.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AccessTokenProvider.java
index 72f37a1..a20e6df 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AccessTokenProvider.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AccessTokenProvider.java
@@ -72,7 +72,7 @@ public abstract class AccessTokenProvider {
    *
    * @return true if the token is expiring in next 5 minutes
    */
-  private boolean isTokenAboutToExpire() {
+  protected boolean isTokenAboutToExpire() {
     if (token == null) {
       LOG.debug("AADToken: no token. Returning expiring=true");
       return true;   // no token should have same response as expired token
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java
index 0eb379c..03be1c0 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java
@@ -136,7 +136,7 @@ public final class AzureADAuthenticator {
     headers.put("Metadata", "true");
 
     LOG.debug("AADToken: starting to fetch token using MSI");
-    return getTokenCall(authEndpoint, qp.serialize(), headers, "GET");
+    return getTokenCall(authEndpoint, qp.serialize(), headers, "GET", true);
   }
 
   /**
@@ -197,8 +197,13 @@ public final class AzureADAuthenticator {
   }
 
   private static AzureADToken getTokenCall(String authEndpoint, String body,
-                                           Hashtable<String, String> headers, String httpMethod)
-          throws IOException {
+      Hashtable<String, String> headers, String httpMethod) throws IOException {
+    return getTokenCall(authEndpoint, body, headers, httpMethod, false);
+  }
+
+  private static AzureADToken getTokenCall(String authEndpoint, String body,
+      Hashtable<String, String> headers, String httpMethod, boolean isMsi)
+      throws IOException {
     AzureADToken token = null;
     ExponentialRetryPolicy retryPolicy
             = new ExponentialRetryPolicy(3, 0, 1000, 2);
@@ -211,7 +216,7 @@ public final class AzureADAuthenticator {
       httperror = 0;
       ex = null;
       try {
-        token = getTokenSingleCall(authEndpoint, body, headers, httpMethod);
+        token = getTokenSingleCall(authEndpoint, body, headers, httpMethod, isMsi);
       } catch (HttpException e) {
         httperror = e.httpErrorCode;
         ex = e;
@@ -227,8 +232,9 @@ public final class AzureADAuthenticator {
     return token;
   }
 
-  private static AzureADToken getTokenSingleCall(
-          String authEndpoint, String payload, Hashtable<String, String> headers, String httpMethod)
+  private static AzureADToken getTokenSingleCall(String authEndpoint,
+      String payload, Hashtable<String, String> headers, String httpMethod,
+      boolean isMsi)
           throws IOException {
 
     AzureADToken token = null;
@@ -268,7 +274,7 @@ public final class AzureADAuthenticator {
       if (httpResponseCode == HttpURLConnection.HTTP_OK
               && responseContentType.startsWith("application/json") && responseContentLength > 0) {
         InputStream httpResponseStream = conn.getInputStream();
-        token = parseTokenFromStream(httpResponseStream);
+        token = parseTokenFromStream(httpResponseStream, isMsi);
       } else {
         String responseBody = consumeInputStream(conn.getErrorStream(), 1024);
         String proxies = "none";
@@ -296,10 +302,12 @@ public final class AzureADAuthenticator {
     return token;
   }
 
-  private static AzureADToken parseTokenFromStream(InputStream httpResponseStream) throws IOException {
+  private static AzureADToken parseTokenFromStream(
+      InputStream httpResponseStream, boolean isMsi) throws IOException {
     AzureADToken token = new AzureADToken();
     try {
-      int expiryPeriod = 0;
+      int expiryPeriodInSecs = 0;
+      long expiresOnInSecs = -1;
 
       JsonFactory jf = new JsonFactory();
       JsonParser jp = jf.createJsonParser(httpResponseStream);
@@ -314,17 +322,38 @@ public final class AzureADAuthenticator {
           if (fieldName.equals("access_token")) {
             token.setAccessToken(fieldValue);
           }
+
           if (fieldName.equals("expires_in")) {
-            expiryPeriod = Integer.parseInt(fieldValue);
+            expiryPeriodInSecs = Integer.parseInt(fieldValue);
+          }
+
+          if (fieldName.equals("expires_on")) {
+            expiresOnInSecs = Long.parseLong(fieldValue);
           }
+
         }
         jp.nextToken();
       }
       jp.close();
-      long expiry = System.currentTimeMillis();
-      expiry = expiry + expiryPeriod * 1000L; // convert expiryPeriod to milliseconds and add
-      token.setExpiry(new Date(expiry));
-      LOG.debug("AADToken: fetched token with expiry " + token.getExpiry().toString());
+      if (expiresOnInSecs > 0) {
+        LOG.debug("Expiry based on expires_on: {}", expiresOnInSecs);
+        token.setExpiry(new Date(expiresOnInSecs * 1000));
+      } else {
+        if (isMsi) {
+          // Currently there is a known issue that MSI does not update expires_in
+          // for refresh and will have the value from first AAD token fetch request.
+          // Due to this known limitation, expires_in is not supported for MSI token fetch flow.
+          throw new UnsupportedOperationException("MSI Responded with invalid expires_on");
+        }
+
+        LOG.debug("Expiry based on expires_in: {}", expiryPeriodInSecs);
+        long expiry = System.currentTimeMillis();
+        expiry = expiry + expiryPeriodInSecs * 1000L; // convert expiryPeriod to milliseconds and add
+        token.setExpiry(new Date(expiry));
+      }
+
+      LOG.debug("AADToken: fetched token with expiry {}, expiresOn passed: {}",
+          token.getExpiry().toString(), expiresOnInSecs);
     } catch (Exception ex) {
       LOG.debug("AADToken: got exception when parsing json token " + ex.toString());
       throw ex;
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/MsiTokenProvider.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/MsiTokenProvider.java
index 38f3045..784365b 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/MsiTokenProvider.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/MsiTokenProvider.java
@@ -36,6 +36,10 @@ public class MsiTokenProvider extends AccessTokenProvider {
 
   private final String clientId;
 
+  private long tokenFetchTime = -1;
+
+  private static final long ONE_HOUR = 3600 * 1000;
+
   private static final Logger LOG = LoggerFactory.getLogger(AccessTokenProvider.class);
 
   public MsiTokenProvider(final String authEndpoint, final String tenantGuid,
@@ -51,6 +55,36 @@ public class MsiTokenProvider extends AccessTokenProvider {
     LOG.debug("AADToken: refreshing token from MSI");
     AzureADToken token = AzureADAuthenticator
         .getTokenFromMsi(authEndpoint, tenantGuid, clientId, authority, false);
+    tokenFetchTime = System.currentTimeMillis();
     return token;
   }
+
+  /**
+   * Checks if the token is about to expire as per base expiry logic.
+   * Otherwise try to expire every 1 hour
+   *
+   * @return true if the token is expiring in next 1 hour or if a token has
+   * never been fetched
+   */
+  @Override
+  protected boolean isTokenAboutToExpire() {
+    if (tokenFetchTime == -1 || super.isTokenAboutToExpire()) {
+      return true;
+    }
+
+    boolean expiring = false;
+    long elapsedTimeSinceLastTokenRefreshInMillis =
+        System.currentTimeMillis() - tokenFetchTime;
+    expiring = elapsedTimeSinceLastTokenRefreshInMillis >= ONE_HOUR
+        || elapsedTimeSinceLastTokenRefreshInMillis < 0;
+    // In case of, Token is not refreshed for 1 hr or any clock skew issues,
+    // refresh token.
+    if (expiring) {
+      LOG.debug("MSIToken: token renewing. Time elapsed since last token fetch:"
+          + " {} milli seconds", elapsedTimeSinceLastTokenRefreshInMillis);
+    }
+
+    return expiring;
+  }
+
 }
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsMsiTokenProvider.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsMsiTokenProvider.java
new file mode 100644
index 0000000..d871bef
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsMsiTokenProvider.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azurebfs;
+
+import java.io.IOException;
+import java.util.Date;
+
+import org.junit.Test;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider;
+import org.apache.hadoop.fs.azurebfs.oauth2.AzureADToken;
+import org.apache.hadoop.fs.azurebfs.oauth2.MsiTokenProvider;
+
+import static org.junit.Assume.assumeThat;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.not;
+import static org.hamcrest.Matchers.isEmptyOrNullString;
+import static org.hamcrest.Matchers.isEmptyString;
+
+import static org.apache.hadoop.fs.azurebfs.constants.AuthConfigurations.DEFAULT_FS_AZURE_ACCOUNT_OAUTH_MSI_AUTHORITY;
+import static org.apache.hadoop.fs.azurebfs.constants.AuthConfigurations.DEFAULT_FS_AZURE_ACCOUNT_OAUTH_MSI_ENDPOINT;
+import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID;
+import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_MSI_AUTHORITY;
+import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_MSI_ENDPOINT;
+import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_MSI_TENANT;
+
+/**
+ * Test MsiTokenProvider.
+ */
+public final class ITestAbfsMsiTokenProvider
+    extends AbstractAbfsIntegrationTest {
+
+  public ITestAbfsMsiTokenProvider() throws Exception {
+    super();
+  }
+
+  @Test
+  public void test() throws IOException {
+    AbfsConfiguration conf = getConfiguration();
+    assumeThat(conf.get(FS_AZURE_ACCOUNT_OAUTH_MSI_ENDPOINT),
+        not(isEmptyOrNullString()));
+    assumeThat(conf.get(FS_AZURE_ACCOUNT_OAUTH_MSI_TENANT),
+        not(isEmptyOrNullString()));
+    assumeThat(conf.get(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID),
+        not(isEmptyOrNullString()));
+    assumeThat(conf.get(FS_AZURE_ACCOUNT_OAUTH_MSI_AUTHORITY),
+        not(isEmptyOrNullString()));
+
+    String tenantGuid = conf
+        .getPasswordString(FS_AZURE_ACCOUNT_OAUTH_MSI_TENANT);
+    String clientId = conf.getPasswordString(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID);
+    String authEndpoint = getTrimmedPasswordString(conf,
+        FS_AZURE_ACCOUNT_OAUTH_MSI_ENDPOINT,
+        DEFAULT_FS_AZURE_ACCOUNT_OAUTH_MSI_ENDPOINT);
+    String authority = getTrimmedPasswordString(conf,
+        FS_AZURE_ACCOUNT_OAUTH_MSI_AUTHORITY,
+        DEFAULT_FS_AZURE_ACCOUNT_OAUTH_MSI_AUTHORITY);
+    AccessTokenProvider tokenProvider = new MsiTokenProvider(authEndpoint,
+        tenantGuid, clientId, authority);
+
+    AzureADToken token = null;
+    token = tokenProvider.getToken();
+    assertThat(token.getAccessToken(), not(isEmptyString()));
+    assertThat(token.getExpiry().after(new Date()), is(true));
+  }
+
+  private String getTrimmedPasswordString(AbfsConfiguration conf, String key,
+      String defaultValue) throws IOException {
+    String value = conf.getPasswordString(key);
+    if (StringUtils.isBlank(value)) {
+      value = defaultValue;
+    }
+    return value.trim();
+  }
+
+}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java
index d1f9ec7..393eff4 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java
@@ -54,7 +54,7 @@ public class ITestAzureBlobFileSystemCheckAccess
 
   private static final String TEST_FOLDER_PATH = "CheckAccessTestFolder";
   private final FileSystem superUserFs;
-  private final FileSystem testUserFs;
+  private FileSystem testUserFs;
   private final String testUserGuid;
   private final boolean isCheckAccessEnabled;
   private final boolean isHNSEnabled;
@@ -64,13 +64,15 @@ public class ITestAzureBlobFileSystemCheckAccess
     this.superUserFs = getFileSystem();
     testUserGuid = getConfiguration()
         .get(FS_AZURE_BLOB_FS_CHECKACCESS_TEST_USER_GUID);
-    this.testUserFs = getTestUserFs();
     this.isCheckAccessEnabled = getConfiguration().isCheckAccessEnabled();
     this.isHNSEnabled = getConfiguration()
         .getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, false);
   }
 
-  private FileSystem getTestUserFs() throws Exception {
+  private void setTestUserFs() throws Exception {
+    if (this.testUserFs != null) {
+      return;
+    }
     String orgClientId = getConfiguration().get(FS_AZURE_BLOB_FS_CLIENT_ID);
     String orgClientSecret = getConfiguration()
         .get(FS_AZURE_BLOB_FS_CLIENT_SECRET);
@@ -89,7 +91,7 @@ public class ITestAzureBlobFileSystemCheckAccess
     getRawConfiguration()
         .setBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION,
             orgCreateFileSystemDurungInit);
-    return fs;
+    this.testUserFs = fs;
   }
 
   @Test(expected = IllegalArgumentException.class)
@@ -112,6 +114,7 @@ public class ITestAzureBlobFileSystemCheckAccess
   @Test(expected = FileNotFoundException.class)
   public void testCheckAccessForNonExistentFile() throws Exception {
     assumeHNSAndCheckAccessEnabled();
+    setTestUserFs();
     Path nonExistentFile = setupTestDirectoryAndUserAccess(
         "/nonExistentFile1.txt", FsAction.ALL);
     superUserFs.delete(nonExistentFile, true);
@@ -155,12 +158,16 @@ public class ITestAzureBlobFileSystemCheckAccess
     Assume.assumeFalse(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT + " is true",
         getConfiguration()
             .getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, true));
+    Assume.assumeTrue(FS_AZURE_ENABLE_CHECK_ACCESS + " is false",
+            isCheckAccessEnabled);
+    setTestUserFs();
     testUserFs.access(new Path("/"), FsAction.READ);
   }
 
   @Test
   public void testFsActionNONE() throws Exception {
     assumeHNSAndCheckAccessEnabled();
+    setTestUserFs();
     Path testFilePath = setupTestDirectoryAndUserAccess("/test2.txt",
         FsAction.NONE);
     assertInaccessible(testFilePath, FsAction.EXECUTE);
@@ -175,6 +182,7 @@ public class ITestAzureBlobFileSystemCheckAccess
   @Test
   public void testFsActionEXECUTE() throws Exception {
     assumeHNSAndCheckAccessEnabled();
+    setTestUserFs();
     Path testFilePath = setupTestDirectoryAndUserAccess("/test3.txt",
         FsAction.EXECUTE);
     assertAccessible(testFilePath, FsAction.EXECUTE);
@@ -190,6 +198,7 @@ public class ITestAzureBlobFileSystemCheckAccess
   @Test
   public void testFsActionREAD() throws Exception {
     assumeHNSAndCheckAccessEnabled();
+    setTestUserFs();
     Path testFilePath = setupTestDirectoryAndUserAccess("/test4.txt",
         FsAction.READ);
     assertAccessible(testFilePath, FsAction.READ);
@@ -205,6 +214,7 @@ public class ITestAzureBlobFileSystemCheckAccess
   @Test
   public void testFsActionWRITE() throws Exception {
     assumeHNSAndCheckAccessEnabled();
+    setTestUserFs();
     Path testFilePath = setupTestDirectoryAndUserAccess("/test5.txt",
         FsAction.WRITE);
     assertAccessible(testFilePath, FsAction.WRITE);
@@ -220,6 +230,7 @@ public class ITestAzureBlobFileSystemCheckAccess
   @Test
   public void testFsActionREADEXECUTE() throws Exception {
     assumeHNSAndCheckAccessEnabled();
+    setTestUserFs();
     Path testFilePath = setupTestDirectoryAndUserAccess("/test6.txt",
         FsAction.READ_EXECUTE);
     assertAccessible(testFilePath, FsAction.EXECUTE);
@@ -235,6 +246,7 @@ public class ITestAzureBlobFileSystemCheckAccess
   @Test
   public void testFsActionWRITEEXECUTE() throws Exception {
     assumeHNSAndCheckAccessEnabled();
+    setTestUserFs();
     Path testFilePath = setupTestDirectoryAndUserAccess("/test7.txt",
         FsAction.WRITE_EXECUTE);
     assertAccessible(testFilePath, FsAction.EXECUTE);
@@ -250,6 +262,7 @@ public class ITestAzureBlobFileSystemCheckAccess
   @Test
   public void testFsActionALL() throws Exception {
     assumeHNSAndCheckAccessEnabled();
+    setTestUserFs();
     Path testFilePath = setupTestDirectoryAndUserAccess("/test8.txt",
         FsAction.ALL);
     assertAccessible(testFilePath, FsAction.EXECUTE);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 03/04: HADOOP-16778. ABFS: Backport HADOOP-16660 ABFS: Make RetryCount in ExponentialRetryPolicy Configurable to Branch-2. Contributed by Sneha Vijayarajan.

Posted by jh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 3bc2e26cd72342fbf294e9d30c1a55301ea10d69
Author: Sneha Vijayarajan <sn...@gmail.com>
AuthorDate: Sat Jan 4 15:39:11 2020 -0800

    HADOOP-16778. ABFS: Backport HADOOP-16660 ABFS: Make RetryCount in ExponentialRetryPolicy Configurable to Branch-2.
    Contributed by Sneha Vijayarajan.
---
 .../fs/azurebfs/AzureBlobFileSystemStore.java      |  4 +-
 .../fs/azurebfs/services/AbfsRestOperation.java    |  3 +
 .../azurebfs/services/ExponentialRetryPolicy.java  | 11 ++--
 .../azurebfs/ITestAbfsRestOperationException.java  | 43 +++++++++++++-
 .../TestAbfsConfigurationFieldsValidation.java     |  2 +-
 .../fs/azurebfs/oauth2/RetryTestTokenProvider.java | 67 ++++++++++++++++++++++
 6 files changed, 120 insertions(+), 10 deletions(-)

diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
index 158f834..cb12645 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
@@ -938,7 +938,9 @@ public class AzureBlobFileSystemStore {
       tokenProvider = abfsConfiguration.getTokenProvider();
     }
 
-    this.client =  new AbfsClient(baseUrl, creds, abfsConfiguration, new ExponentialRetryPolicy(), tokenProvider);
+    this.client = new AbfsClient(baseUrl, creds, abfsConfiguration,
+        new ExponentialRetryPolicy(abfsConfiguration.getMaxIoRetries()),
+        tokenProvider);
   }
 
   private String getOctalNotation(FsPermission fsPermission) {
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java
index fa8f742..54fe14a 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java
@@ -122,8 +122,11 @@ public class AbfsRestOperation {
    */
   void execute() throws AzureBlobFileSystemException {
     int retryCount = 0;
+    LOG.debug("First execution of REST operation - {}", operationType);
     while (!executeHttpOperation(retryCount++)) {
       try {
+        LOG.debug("Retrying REST operation {}. RetryCount = {}",
+            operationType, retryCount);
         Thread.sleep(client.getRetryPolicy().getRetryInterval(retryCount));
       } catch (InterruptedException ex) {
         Thread.currentThread().interrupt();
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java
index 5eb7a66..b272cf2 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java
@@ -26,11 +26,6 @@ import java.net.HttpURLConnection;
  * */
 public class ExponentialRetryPolicy {
   /**
-   * Represents the default number of retry attempts.
-   */
-  private static final int DEFAULT_CLIENT_RETRY_COUNT = 30;
-
-  /**
    * Represents the default amount of time used when calculating a random delta in the exponential
    * delay between retries.
    */
@@ -86,8 +81,10 @@ public class ExponentialRetryPolicy {
   /**
    * Initializes a new instance of the {@link ExponentialRetryPolicy} class.
    */
-  public ExponentialRetryPolicy() {
-    this(DEFAULT_CLIENT_RETRY_COUNT, DEFAULT_MIN_BACKOFF, DEFAULT_MAX_BACKOFF, DEFAULT_CLIENT_BACKOFF);
+  public ExponentialRetryPolicy(final int maxIoRetries) {
+
+    this(maxIoRetries, DEFAULT_MIN_BACKOFF, DEFAULT_MAX_BACKOFF,
+        DEFAULT_CLIENT_BACKOFF);
   }
 
   /**
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java
index ff88b02..cc66cca 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java
@@ -20,12 +20,17 @@ package org.apache.hadoop.fs.azurebfs;
 
 import java.io.IOException;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.azurebfs.oauth2.RetryTestTokenProvider;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
 import org.junit.Assert;
 import org.junit.Test;
 
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+
 /**
  * Verify the AbfsRestOperationException error message format.
  * */
@@ -72,4 +77,40 @@ public class ITestAbfsRestOperationException extends AbstractAbfsIntegrationTest
               && errorFields[5].contains("Time"));
     }
   }
-}
\ No newline at end of file
+
+  @Test
+  public void testRequestRetryConfig() throws Exception {
+    testRetryLogic(0);
+    testRetryLogic(3);
+  }
+
+  public void testRetryLogic(int numOfRetries) throws Exception {
+    AzureBlobFileSystem fs = this.getFileSystem();
+
+    Configuration config = new Configuration(this.getRawConfiguration());
+    String accountName = config.get("fs.azure.abfs.account.name");
+    // Setup to configure custom token provider
+    config.set("fs.azure.account.auth.type." + accountName, "Custom");
+    config.set("fs.azure.account.oauth.provider.type." + accountName, "org.apache.hadoop.fs"
+        + ".azurebfs.oauth2.RetryTestTokenProvider");
+    config.set("fs.azure.io.retry.max.retries", Integer.toString(numOfRetries));
+    // Stop filesystem creation as it will lead to calls to store.
+    config.set("fs.azure.createRemoteFileSystemDuringInitialization", "false");
+
+    final AzureBlobFileSystem fs1 =
+        (AzureBlobFileSystem) FileSystem.newInstance(fs.getUri(),
+        config);
+    RetryTestTokenProvider.ResetStatusToFirstTokenFetch();
+    try {
+      fs1.getFileStatus(new Path("/"));
+    } catch (Exception ex) {
+      // Expected to fail as
+    }
+
+    // Number of retries done should be as configured
+    Assert.assertTrue(
+        "Number of token fetch retries (" + RetryTestTokenProvider.reTryCount
+            + ") done, does not match with max " + "retry count configured (" + numOfRetries
+            + ")", RetryTestTokenProvider.reTryCount == numOfRetries);
+  }
+}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java
index 2a65263..c15532e 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java
@@ -182,4 +182,4 @@ public class TestAbfsConfigurationFieldsValidation {
     assertEquals(SSLSocketFactoryEx.SSLChannelMode.OpenSSL, localAbfsConfiguration.getPreferredSSLFactoryOption());
   }
 
-}
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/oauth2/RetryTestTokenProvider.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/oauth2/RetryTestTokenProvider.java
new file mode 100644
index 0000000..3566ebb
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/oauth2/RetryTestTokenProvider.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.azurebfs.oauth2;
+
+import java.io.IOException;
+import java.util.Date;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.azurebfs.extensions.CustomTokenProviderAdaptee;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test Token provider which should throw exception and trigger retries
+ */
+public class RetryTestTokenProvider implements CustomTokenProviderAdaptee {
+
+  // Need to track first token fetch otherwise will get counted as a retry too.
+  private static boolean isThisFirstTokenFetch = true;
+  public static int reTryCount = 0;
+
+  private static final Logger LOG = LoggerFactory
+      .getLogger(RetryTestTokenProvider.class);
+
+  @Override
+  public void initialize(Configuration configuration, String accountName)
+      throws IOException {
+
+  }
+
+  public static void ResetStatusToFirstTokenFetch() {
+    isThisFirstTokenFetch = true;
+    reTryCount = 0;
+  }
+
+  @Override
+  public String getAccessToken() throws IOException {
+    if (isThisFirstTokenFetch) {
+      isThisFirstTokenFetch = false;
+    } else {
+      reTryCount++;
+    }
+
+    LOG.debug("RetryTestTokenProvider: Throw an exception in fetching tokens");
+    throw new IOException("test exception");
+  }
+
+  @Override
+  public Date getExpiryTime() {
+    return new Date();
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org