You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by pi...@apache.org on 2023/04/28 13:19:03 UTC

[ozone] branch HDDS-7733-Symmetric-Tokens updated: HDDS-8003. E2E integration test cases for block tokens (#4547)

This is an automated email from the ASF dual-hosted git repository.

pifta pushed a commit to branch HDDS-7733-Symmetric-Tokens
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/HDDS-7733-Symmetric-Tokens by this push:
     new 124345cfe7 HDDS-8003. E2E integration test cases for block tokens (#4547)
124345cfe7 is described below

commit 124345cfe7b269495997b20b96841cd1ca53cc6d
Author: Duong Nguyen <du...@gmail.com>
AuthorDate: Fri Apr 28 06:18:56 2023 -0700

    HDDS-8003. E2E integration test cases for block tokens (#4547)
---
 .../symmetric/DefaultSecretKeyVerifierClient.java  |  30 +-
 .../symmetric/SecretKeyVerifierClient.java         |   2 +
 .../security/token/ShortLivedTokenVerifier.java    |   2 +-
 .../hdds/security/token/TokenVerifierTests.java    |   2 +-
 .../hadoop/ozone/client/io/KeyInputStream.java     |  17 +-
 .../org/apache/hadoop/ozone/TestBlockTokens.java   | 395 +++++++++++++++++++++
 .../org/apache/hadoop/ozone/TestSecretKeysApi.java |   5 +-
 7 files changed, 434 insertions(+), 19 deletions(-)

diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeyVerifierClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeyVerifierClient.java
index 56478793cb..c79ae6ef8b 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeyVerifierClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeyVerifierClient.java
@@ -28,6 +28,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.time.Duration;
+import java.util.Optional;
 import java.util.UUID;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
@@ -38,27 +39,38 @@ import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmSecurityClient;
 
 /**
  * Default implementation of {@link SecretKeyVerifierClient} that fetches
- * SecretKeys remotely via {@link SCMSecurityProtocol}.
+ * SecretKeys remotely via {@link SCMSecurityProtocol} and cache them locally.
  */
 public class DefaultSecretKeyVerifierClient implements SecretKeyVerifierClient {
   private static final Logger LOG =
       LoggerFactory.getLogger(DefaultSecretKeyVerifierClient.class);
 
-  private final LoadingCache<UUID, ManagedSecretKey> cache;
+  private final LoadingCache<UUID, Optional<ManagedSecretKey>> cache;
 
   DefaultSecretKeyVerifierClient(SCMSecurityProtocol scmSecurityProtocol,
                                  ConfigurationSource conf) {
     Duration expiryDuration = parseExpiryDuration(conf);
     Duration rotateDuration = parseRotateDuration(conf);
-    long cacheSize = expiryDuration.toMillis() / rotateDuration.toMillis() + 1;
 
-    CacheLoader<UUID, ManagedSecretKey> loader =
-        new CacheLoader<UUID, ManagedSecretKey>() {
+    // if rotation is 1d, and each keys is valid for 7d before expiring,
+    // the expected number valid keys at any time is 7.
+    final long expectedValidKeys =
+        expiryDuration.toMillis() / rotateDuration.toMillis() + 1;
+    // However, we want to cache some expired keys as well, to avoid requesting
+    // SCM for recently expire secret keys. It makes sense to extend the
+    // secret keys cache by twice (e.g. 7 valid one and 7 recent expired).
+    final int secretKeyCacheMultiplier = 2;
+    long cacheSize = expectedValidKeys * secretKeyCacheMultiplier;
+    Duration cacheExpiry = expiryDuration.multipliedBy(
+        secretKeyCacheMultiplier);
+
+    CacheLoader<UUID, Optional<ManagedSecretKey>> loader =
+        new CacheLoader<UUID, Optional<ManagedSecretKey>>() {
           @Override
-          public ManagedSecretKey load(UUID id) throws Exception {
+          public Optional<ManagedSecretKey> load(UUID id) throws Exception {
             ManagedSecretKey secretKey = scmSecurityProtocol.getSecretKey(id);
             LOG.info("Secret key fetched from SCM: {}", secretKey);
-            return secretKey;
+            return Optional.ofNullable(secretKey);
           }
         };
 
@@ -66,7 +78,7 @@ public class DefaultSecretKeyVerifierClient implements SecretKeyVerifierClient {
         cacheSize, expiryDuration);
     cache = CacheBuilder.newBuilder()
         .maximumSize(cacheSize)
-        .expireAfterWrite(expiryDuration.toMillis(), TimeUnit.MILLISECONDS)
+        .expireAfterWrite(cacheExpiry.toMillis(), TimeUnit.MILLISECONDS)
         .recordStats()
         .build(loader);
   }
@@ -74,7 +86,7 @@ public class DefaultSecretKeyVerifierClient implements SecretKeyVerifierClient {
   @Override
   public ManagedSecretKey getSecretKey(UUID id) throws SCMSecurityException {
     try {
-      return cache.get(id);
+      return cache.get(id).orElse(null);
     } catch (ExecutionException e) {
       // handle cache load exception.
       if (e.getCause() instanceof IOException) {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyVerifierClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyVerifierClient.java
index 59f49f72f1..08ed39d7f4 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyVerifierClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyVerifierClient.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.security.symmetric;
 
 import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 
+import javax.annotation.Nullable;
 import java.util.UUID;
 
 /**
@@ -26,5 +27,6 @@ import java.util.UUID;
  * retrieve the relevant secret key to validate token authority.
  */
 public interface SecretKeyVerifierClient {
+  @Nullable
   ManagedSecretKey getSecretKey(UUID id) throws SCMSecurityException;
 }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenVerifier.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenVerifier.java
index 4731f9149c..ae18305f9e 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenVerifier.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenVerifier.java
@@ -111,7 +111,7 @@ public abstract class
     ManagedSecretKey secretKey = secretKeyClient.getSecretKey(
         tokenId.getSecretKeyId());
     if (secretKey == null) {
-      throw new BlockTokenException("Can't find the signer secret key " +
+      throw new BlockTokenException("Can't find the signing secret key " +
           tokenId.getSecretKeyId() + " of the token for user: " +
           tokenId.getUser());
     }
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java
index 1c7085b35b..1ff9bee053 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java
@@ -153,7 +153,7 @@ public abstract class TokenVerifierTests<T extends ShortLivedTokenIdentifier> {
     BlockTokenException ex = assertThrows(BlockTokenException.class, () ->
         subject.verify("anyUser", token, cmd));
     assertThat(ex.getMessage(),
-        containsString("Can't find the signer secret key"));
+        containsString("Can't find the signing secret key"));
   }
 
   @Test
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java
index 91d4b94404..4843c1c45e 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java
@@ -74,6 +74,17 @@ public class KeyInputStream extends MultipartInputStream {
       // BlockInputStream is only created here and not initialized. The
       // BlockInputStream is initialized when a read operation is performed on
       // the block for the first time.
+      Function<BlockID, BlockLocationInfo> retry;
+      if (retryFunction != null) {
+        retry = keyBlockID -> {
+          OmKeyInfo newKeyInfo = retryFunction.apply(keyInfo);
+          return getBlockLocationInfo(newKeyInfo,
+              omKeyLocationInfo.getBlockID());
+        };
+      } else {
+        retry = null;
+      }
+
       BlockExtendedInputStream stream =
           blockStreamFactory.create(
               keyInfo.getReplicationConfig(),
@@ -82,11 +93,7 @@ public class KeyInputStream extends MultipartInputStream {
               omKeyLocationInfo.getToken(),
               verifyChecksum,
               xceiverClientFactory,
-              keyBlockID -> {
-                OmKeyInfo newKeyInfo = retryFunction.apply(keyInfo);
-                return getBlockLocationInfo(newKeyInfo,
-                    omKeyLocationInfo.getBlockID());
-              });
+              retry);
       partStreams.add(stream);
     }
     return partStreams;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java
new file mode 100644
index 0000000000..e24abf9cd4
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java
@@ -0,0 +1,395 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.conf.DefaultConfigManager;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfig;
+import org.apache.hadoop.hdds.scm.XceiverClientFactory;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig;
+import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey;
+import org.apache.hadoop.hdds.security.symmetric.SecretKeyManager;
+import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
+import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.io.BlockInputStreamFactory;
+import org.apache.hadoop.ozone.client.io.BlockInputStreamFactoryImpl;
+import org.apache.hadoop.ozone.client.io.KeyInputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.client.rpc.RpcClient;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.ozone.test.GenericTestUtils;
+import org.apache.ratis.util.ExitUtils;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.InetAddress;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.UUID;
+import java.util.concurrent.TimeoutException;
+import java.util.function.Function;
+
+import static java.util.Objects.requireNonNull;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_TOKEN_ENABLED;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_EXPIRY_DURATION;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_ROTATE_CHECK_DURATION;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_ROTATE_DURATION;
+import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED;
+import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
+import static org.apache.ozone.test.GenericTestUtils.assertExceptionContains;
+import static org.apache.ozone.test.GenericTestUtils.waitFor;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Integration test to verify block tokens in a secure cluster.
+ */
+@InterfaceAudience.Private
+public final class TestBlockTokens {
+  private static final Logger LOG = LoggerFactory
+      .getLogger(TestBlockTokens.class);
+  private static final String TEST_VOLUME = "testvolume";
+  private static final String TEST_BUCKET = "testbucket";
+  private static final String TEST_FILE = "testfile";
+  private static final int ROTATE_DURATION_IN_MS = 3000;
+  private static final int EXPIRY_DURATION_IN_MS = 10000;
+  private static final int ROTATION_CHECK_DURATION_IN_MS = 100;
+
+  @Rule
+  public Timeout timeout = Timeout.seconds(180);
+
+  private static MiniKdc miniKdc;
+  private static OzoneConfiguration conf;
+  private static File workDir;
+  private static File ozoneKeytab;
+  private static File spnegoKeytab;
+  private static File testUserKeytab;
+  private static String testUserPrincipal;
+  private static String host;
+  private static String clusterId;
+  private static String scmId;
+  private static MiniOzoneHAClusterImpl cluster;
+  private static OzoneClient client;
+  private static BlockInputStreamFactory blockInputStreamFactory =
+      new BlockInputStreamFactoryImpl();
+
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost");
+
+    ExitUtils.disableSystemExit();
+
+    workDir =
+        GenericTestUtils.getTestDir(TestBlockTokens.class.getSimpleName());
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+
+    startMiniKdc();
+    setSecureConfig();
+    createCredentialsInKDC();
+    setSecretKeysConfig();
+    startCluster();
+    client = cluster.getClient();
+    createTestData();
+  }
+
+  private static void createTestData() throws IOException {
+    client.getProxy().createVolume(TEST_VOLUME);
+    client.getProxy().createBucket(TEST_VOLUME, TEST_BUCKET);
+    byte[] data = string2Bytes(RandomStringUtils.randomAlphanumeric(1024));
+    OzoneBucket bucket = client.getObjectStore().getVolume(TEST_VOLUME)
+        .getBucket(TEST_BUCKET);
+    try (OzoneOutputStream out = bucket.createKey(TEST_FILE, data.length)) {
+      org.apache.commons.io.IOUtils.write(data, out);
+    }
+  }
+
+  @AfterClass
+  public static void stop() {
+    miniKdc.stop();
+    IOUtils.close(LOG, client);
+    if (cluster != null) {
+      cluster.stop();
+    }
+    DefaultConfigManager.clearDefaultConfigs();
+  }
+
+  @Test
+  public void blockTokensHappyCase() throws Exception {
+    ManagedSecretKey currentScmKey =
+        getScmSecretKeyManager().getCurrentSecretKey();
+    OmKeyInfo keyInfo = getTestKeyInfo();
+
+    // assert block token points to the current SCM key.
+    assertEquals(currentScmKey.getId(), extractSecretKeyId(keyInfo));
+
+    // and the keyInfo can be used to read from datanodes.
+    readDataWithoutRetry(keyInfo);
+
+    // after the rotation passes, the old token is still usable.
+    waitFor(
+        () -> !Objects.equals(getScmSecretKeyManager().getCurrentSecretKey(),
+            currentScmKey),
+        ROTATION_CHECK_DURATION_IN_MS,
+        ROTATE_DURATION_IN_MS + ROTATION_CHECK_DURATION_IN_MS);
+    readDataWithoutRetry(keyInfo);
+  }
+
+  @Test
+  public void blockTokenFailsOnExpiredSecretKey() throws Exception {
+    OmKeyInfo keyInfo = getTestKeyInfo();
+    UUID secretKeyId = extractSecretKeyId(keyInfo);
+    readDataWithoutRetry(keyInfo);
+
+    // wait until the secret key expires.
+    ManagedSecretKey secretKey =
+        requireNonNull(getScmSecretKeyManager().getSecretKey(secretKeyId));
+    waitFor(secretKey::isExpired, ROTATION_CHECK_DURATION_IN_MS,
+        EXPIRY_DURATION_IN_MS);
+    assertTrue(secretKey.isExpired());
+    // verify that the read is denied because of the expired secret key.
+    StorageContainerException ex = assertThrows(StorageContainerException.class,
+        () -> readDataWithoutRetry(keyInfo));
+    assertEquals(BLOCK_TOKEN_VERIFICATION_FAILED, ex.getResult());
+    assertExceptionContains(
+        "Token can't be verified due to expired secret key", ex);
+  }
+
+  @Test
+  public void blockTokenOnExpiredSecretKeyRetrySuccessful() throws Exception {
+    OmKeyInfo keyInfo = getTestKeyInfo();
+    UUID secretKeyId = extractSecretKeyId(keyInfo);
+    readDataWithoutRetry(keyInfo);
+
+    // wait until the secret key expires.
+    ManagedSecretKey secretKey =
+        requireNonNull(getScmSecretKeyManager().getSecretKey(secretKeyId));
+    waitFor(secretKey::isExpired, ROTATION_CHECK_DURATION_IN_MS,
+        EXPIRY_DURATION_IN_MS);
+    assertTrue(secretKey.isExpired());
+    // verify that the read is denied because of the expired secret key.
+    readData(keyInfo, k -> {
+      try {
+        return getTestKeyInfo();
+      } catch (IOException e) {
+        throw new RuntimeException(e);
+      }
+    });
+  }
+
+  @Test
+  public void blockTokenFailsOnWrongSecretKeyId() throws Exception {
+    OmKeyInfo keyInfo = getTestKeyInfo();
+    // replace block token secret key id with wrong id.
+    for (OmKeyLocationInfoGroup v : keyInfo.getKeyLocationVersions()) {
+      for (OmKeyLocationInfo l : v.getLocationList()) {
+        Token<OzoneBlockTokenIdentifier> token = l.getToken();
+        OzoneBlockTokenIdentifier tokenId = token.decodeIdentifier();
+        tokenId.setSecretKeyId(UUID.randomUUID());
+        token.setID(tokenId.getBytes());
+      }
+    }
+
+    // verify that the read is denied because of the unknown secret key.
+    StorageContainerException ex =
+        assertThrows(StorageContainerException.class,
+            () -> readDataWithoutRetry(keyInfo));
+    assertEquals(BLOCK_TOKEN_VERIFICATION_FAILED, ex.getResult());
+    assertExceptionContains("Can't find the signing secret key", ex);
+  }
+
+  @Test
+  public void blockTokenFailsOnWrongPassword() throws Exception {
+    OmKeyInfo keyInfo = getTestKeyInfo();
+    // replace block token secret key id with wrong id.
+    for (OmKeyLocationInfoGroup v : keyInfo.getKeyLocationVersions()) {
+      for (OmKeyLocationInfo l : v.getLocationList()) {
+        Token<OzoneBlockTokenIdentifier> token = l.getToken();
+        token.setPassword(RandomUtils.nextBytes(100));
+      }
+    }
+
+    // verify that the read is denied because of the unknown secret key.
+    StorageContainerException ex =
+        assertThrows(StorageContainerException.class,
+            () -> readDataWithoutRetry(keyInfo));
+    assertEquals(BLOCK_TOKEN_VERIFICATION_FAILED, ex.getResult());
+    assertExceptionContains("Invalid token for user", ex);
+  }
+
+
+  private UUID extractSecretKeyId(OmKeyInfo keyInfo) throws IOException {
+    OmKeyLocationInfo locationInfo =
+        keyInfo.getKeyLocationVersions().get(0).getLocationList().get(0);
+    Token<OzoneBlockTokenIdentifier> token = locationInfo.getToken();
+    return token.decodeIdentifier().getSecretKeyId();
+  }
+
+  private OmKeyInfo getTestKeyInfo() throws IOException {
+    OmKeyArgs arg = new OmKeyArgs.Builder()
+        .setVolumeName(TEST_VOLUME)
+        .setBucketName(TEST_BUCKET)
+        .setKeyName(TEST_FILE)
+        .build();
+    return cluster.getOzoneManager()
+        .getKeyInfo(arg, false).getKeyInfo();
+  }
+
+  private void readDataWithoutRetry(OmKeyInfo keyInfo) throws IOException {
+    readData(keyInfo, null);
+  }
+
+  private void readData(OmKeyInfo keyInfo,
+      Function<OmKeyInfo, OmKeyInfo> retryFunc) throws IOException {
+    XceiverClientFactory xceiverClientManager =
+        ((RpcClient) client.getProxy()).getXceiverClientManager();
+    try (InputStream is = KeyInputStream.getFromOmKeyInfo(keyInfo,
+        xceiverClientManager,
+        false, retryFunc, blockInputStreamFactory)) {
+      byte[] buf = new byte[100];
+      int readBytes = is.read(buf, 0, 100);
+      assertEquals(100, readBytes);
+    }
+  }
+
+  private SecretKeyManager getScmSecretKeyManager() {
+    return cluster.getActiveSCM().getSecretKeyManager();
+  }
+
+  private static void setSecretKeysConfig() {
+    // Secret key lifecycle configs.
+    conf.set(HDDS_SECRET_KEY_ROTATE_CHECK_DURATION,
+        ROTATION_CHECK_DURATION_IN_MS + "ms");
+    conf.set(HDDS_SECRET_KEY_ROTATE_DURATION, ROTATE_DURATION_IN_MS + "ms");
+    conf.set(HDDS_SECRET_KEY_EXPIRY_DURATION, EXPIRY_DURATION_IN_MS + "ms");
+
+    // enable tokens
+    conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true);
+    conf.setBoolean(HDDS_CONTAINER_TOKEN_ENABLED, true);
+  }
+
+  private static void createCredentialsInKDC() throws Exception {
+    ScmConfig scmConfig = conf.getObject(ScmConfig.class);
+    SCMHTTPServerConfig httpServerConfig =
+        conf.getObject(SCMHTTPServerConfig.class);
+    createPrincipal(ozoneKeytab, scmConfig.getKerberosPrincipal());
+    createPrincipal(spnegoKeytab, httpServerConfig.getKerberosPrincipal());
+    createPrincipal(testUserKeytab, testUserPrincipal);
+  }
+
+  private static void createPrincipal(File keytab, String... principal)
+      throws Exception {
+    miniKdc.createPrincipal(keytab, principal);
+  }
+
+  private static void startMiniKdc() throws Exception {
+    Properties securityProperties = MiniKdc.createConf();
+    miniKdc = new MiniKdc(securityProperties, workDir);
+    miniKdc.start();
+  }
+
+  private static void setSecureConfig() throws IOException {
+    conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
+    host = InetAddress.getLocalHost().getCanonicalHostName()
+        .toLowerCase();
+
+    conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name());
+
+    String curUser = UserGroupInformation.getCurrentUser().getUserName();
+    conf.set(OZONE_ADMINISTRATORS, curUser);
+
+    String realm = miniKdc.getRealm();
+    String hostAndRealm = host + "@" + realm;
+    conf.set(HDDS_SCM_KERBEROS_PRINCIPAL_KEY, "scm/" + hostAndRealm);
+    conf.set(HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY, "HTTP_SCM/" + hostAndRealm);
+    conf.set(OZONE_OM_KERBEROS_PRINCIPAL_KEY, "scm/" + hostAndRealm);
+    conf.set(OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY, "HTTP_OM/" + hostAndRealm);
+    conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, "scm/" + hostAndRealm);
+
+    ozoneKeytab = new File(workDir, "scm.keytab");
+    spnegoKeytab = new File(workDir, "http.keytab");
+    testUserKeytab = new File(workDir, "testuser.keytab");
+    testUserPrincipal = "test@" + realm;
+
+    conf.set(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
+        ozoneKeytab.getAbsolutePath());
+    conf.set(HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY,
+        spnegoKeytab.getAbsolutePath());
+    conf.set(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
+        ozoneKeytab.getAbsolutePath());
+    conf.set(OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE,
+        spnegoKeytab.getAbsolutePath());
+    conf.set(DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY,
+        ozoneKeytab.getAbsolutePath());
+  }
+
+  private static void startCluster()
+      throws IOException, TimeoutException, InterruptedException {
+    OzoneManager.setTestSecureOmFlag(true);
+    MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf)
+        .setClusterId(clusterId)
+        .setSCMServiceId("TestSecretKey")
+        .setScmId(scmId)
+        .setNumDatanodes(3)
+        .setNumOfStorageContainerManagers(3)
+        .setNumOfOzoneManagers(1);
+
+    cluster = (MiniOzoneHAClusterImpl) builder.build();
+    cluster.waitForClusterToBeReady();
+  }
+
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecretKeysApi.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecretKeysApi.java
index 84423cabac..217b08e728 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecretKeysApi.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecretKeysApi.java
@@ -85,7 +85,7 @@ public final class TestSecretKeysApi {
       .getLogger(TestSecretKeysApi.class);
 
   @Rule
-  public Timeout timeout = Timeout.seconds(1600);
+  public Timeout timeout = Timeout.seconds(180);
 
   private MiniKdc miniKdc;
   private OzoneConfiguration conf;
@@ -94,7 +94,6 @@ public final class TestSecretKeysApi {
   private File spnegoKeytab;
   private File testUserKeytab;
   private String testUserPrincipal;
-  private String host;
   private String clusterId;
   private String scmId;
   private MiniOzoneHAClusterImpl cluster;
@@ -146,7 +145,7 @@ public final class TestSecretKeysApi {
 
   private void setSecureConfig() throws IOException {
     conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
-    host = InetAddress.getLocalHost().getCanonicalHostName()
+    String host = InetAddress.getLocalHost().getCanonicalHostName()
         .toLowerCase();
 
     conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name());


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org