You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ms...@apache.org on 2021/02/23 11:24:53 UTC

[ozone] branch HDDS-2823 updated: HDDS-4107. replace scmID with clusterID for container and volume at Datanode side. (#1947)

This is an automated email from the ASF dual-hosted git repository.

msingh pushed a commit to branch HDDS-2823
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/HDDS-2823 by this push:
     new d17ff92  HDDS-4107. replace scmID with clusterID for container and volume at Datanode side. (#1947)
d17ff92 is described below

commit d17ff927b1b729f02c8250661cce2daa057b9a8c
Author: Mukul Kumar Singh <ms...@apache.org>
AuthorDate: Tue Feb 23 16:54:37 2021 +0530

    HDDS-4107. replace scmID with clusterID for container and volume at Datanode side. (#1947)
---
 .../apache/hadoop/ozone/HddsDatanodeService.java   |  2 +-
 .../container/common/impl/HddsDispatcher.java      | 12 +++++------
 .../common/interfaces/ContainerDispatcher.java     |  2 +-
 .../ozone/container/common/interfaces/Handler.java |  6 +++---
 .../states/endpoint/VersionEndpointTask.java       |  2 +-
 .../container/common/utils/HddsVolumeUtil.java     | 23 ++++++++++++++++------
 .../ozone/container/common/volume/HddsVolume.java  |  4 ++--
 .../container/keyvalue/KeyValueContainer.java      | 16 +++++++--------
 .../ozone/container/keyvalue/KeyValueHandler.java  |  4 ++--
 .../helpers/KeyValueContainerLocationUtil.java     | 23 +++++++++++-----------
 .../ozone/container/ozoneimpl/ContainerReader.java | 20 +++++++++----------
 .../ozone/container/ozoneimpl/OzoneContainer.java  |  4 ++--
 .../hadoop/ozone/container/common/ScmTestMock.java | 16 ++++-----------
 .../container/common/impl/TestHddsDispatcher.java  |  4 ++--
 .../container/keyvalue/TestKeyValueHandler.java    |  2 +-
 .../ozone/container/common/TestEndPoint.java       | 12 +++++------
 .../main/compose/upgrade/{test.sh => todo_test.sh} |  0
 .../client/rpc/TestOzoneAtRestEncryption.java      |  4 +++-
 .../client/rpc/TestOzoneRpcClientAbstract.java     |  9 ++++++---
 .../ozone/client/rpc/TestSecureOzoneRpcClient.java |  4 +++-
 .../transport/server/ratis/TestCSMMetrics.java     |  2 +-
 .../container/metrics/TestContainerMetrics.java    |  2 +-
 .../container/server/TestContainerServer.java      |  4 ++--
 .../server/TestSecureContainerServer.java          |  2 +-
 .../ozone/freon/ClosedContainerReplicator.java     |  2 +-
 .../ozone/genesis/BenchMarkDatanodeDispatcher.java |  2 +-
 26 files changed, 96 insertions(+), 87 deletions(-)

diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index 0de14e6..b5f2a3f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -282,7 +282,7 @@ public class HddsDatanodeService extends GenericCli implements ServicePlugin {
   private void startRatisForTest() throws IOException {
     String scmId = "scm-01";
     String clusterId = "clusterId";
-    datanodeStateMachine.getContainer().start(scmId);
+    datanodeStateMachine.getContainer().start(clusterId);
     MutableVolumeSet volumeSet =
         getDatanodeStateMachine().getContainer().getVolumeSet();
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index cfbec1c..55002c4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -87,7 +87,7 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor {
   private final ProtocolMessageMetrics<ProtocolMessageEnum> protocolMetrics;
   private OzoneProtocolMessageDispatcher<ContainerCommandRequestProto,
       ContainerCommandResponseProto, ProtocolMessageEnum> dispatcher;
-  private String scmID;
+  private String clusterId;
   private ContainerMetrics metrics;
   private final TokenVerifier tokenVerifier;
   private final boolean isBlockTokenEnabled;
@@ -551,12 +551,12 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor {
   }
 
   @Override
-  public void setScmId(String scmId) {
-    Preconditions.checkNotNull(scmId, "scmId Cannot be null");
-    if (this.scmID == null) {
-      this.scmID = scmId;
+  public void setClusterId(String clusterId) {
+    Preconditions.checkNotNull(clusterId, "clusterId Cannot be null");
+    if (this.clusterId == null) {
+      this.clusterId = clusterId;
       for (Map.Entry<ContainerType, Handler> handlerMap : handlers.entrySet()) {
-        handlerMap.getValue().setScmID(scmID);
+        handlerMap.getValue().setClusterID(clusterId);
       }
     }
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
index ee0b6bc..77145ca 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
@@ -82,5 +82,5 @@ public interface ContainerDispatcher {
    * If scmId is not set, this will set scmId, otherwise it is a no-op.
    * @param scmId
    */
-  void setScmId(String scmId);
+  void setClusterId(String scmId);
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
index 4ba7572..e585234 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
@@ -48,7 +48,7 @@ public abstract class Handler {
   protected final ConfigurationSource conf;
   protected final ContainerSet containerSet;
   protected final VolumeSet volumeSet;
-  protected String scmID;
+  protected String clusterId;
   protected final ContainerMetrics metrics;
   protected String datanodeId;
   private Consumer<ContainerReplicaProto> icrSender;
@@ -186,8 +186,8 @@ public abstract class Handler {
   public abstract void deleteBlock(Container container, BlockData blockData)
       throws IOException;
 
-  public void setScmID(String scmId) {
-    this.scmID = scmId;
+  public void setClusterID(String clusterID) {
+    this.clusterId = clusterID;
   }
 
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
index 6c53756..4762c78 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
@@ -109,7 +109,7 @@ public class VersionEndpointTask implements
           }
 
           // Start the container services after getting the version information
-          ozoneContainer.start(scmId);
+          ozoneContainer.start(clusterId);
         }
         EndpointStateMachine.EndPointStates nextState =
             rpcEndPoint.getState().getNextState();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
index 2d68d07..41b8788 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
 import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 
@@ -175,6 +176,7 @@ public final class HddsVolumeUtil {
       clusterId, Logger logger) {
     File hddsRoot = hddsVolume.getHddsRootDir();
     String volumeRoot = hddsRoot.getPath();
+    File clusterDir = new File(hddsRoot, clusterId);
     File scmDir = new File(hddsRoot, scmId);
 
     try {
@@ -194,18 +196,27 @@ public final class HddsVolumeUtil {
     } else if (hddsFiles.length == 1) {
       // DN started for first time or this is a newly added volume.
       // So we create scm directory.
-      if (!scmDir.mkdir()) {
-        logger.error("Unable to create scmDir {}", scmDir);
+      if (!clusterDir.mkdir()) {
+        logger.error("Unable to create scmDir {}", clusterDir);
         return false;
       }
       return true;
     } else if(hddsFiles.length == 2) {
-      // The files should be Version and SCM directory
       if (scmDir.exists()) {
+        String msg = "Volume " + volumeRoot +
+            " is in Inconsistent state, and contains the" +
+            "SCM Directory:" + scmDir.getAbsolutePath() +
+            " which is a older format, please upgrade the volume.";
+        logger.error(msg);
+        ExitUtil.terminate(-2, msg);
+        return false;
+      }
+      // The files should be Version and SCM directory
+      if (clusterDir.exists()) {
         return true;
       } else {
-        logger.error("Volume {} is in Inconsistent state, expected scm " +
-                "directory {} does not exist", volumeRoot, scmDir
+        logger.error("Volume {} is in Inconsistent state, expected cluster " +
+                "directory {} does not exist", volumeRoot, clusterDir
             .getAbsolutePath());
         return false;
       }
@@ -213,7 +224,7 @@ public final class HddsVolumeUtil {
       // The hdds root dir should always have 2 files. One is Version file
       // and other is SCM directory.
       logger.error("The hdds root dir {} should always have 2 files. " +
-              "One is Version file and other is SCM directory. " +
+              "One is Version file and other is Cluster directory. " +
               "Please remove any other extra files from the directory " +
               "so that DataNode startup can proceed.",
               hddsRoot.getAbsolutePath());
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
index 1dee1ba..afc7e62 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
@@ -52,9 +52,9 @@ import org.slf4j.LoggerFactory;
  * <p>
  * The disk layout per volume is as follows:
  * <p>../hdds/VERSION
- * <p>{@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID
+ * <p>{@literal ../hdds/<<clusterUuid>>/current/<<containerDir>>/<<containerID
  * >>/metadata}
- * <p>{@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID
+ * <p>{@literal ../hdds/<<clusterUuid>>/current/<<containerDir>>/<<containerID
  * >>/<<dataDir>>}
  * <p>
  * Each hdds volume has its own VERSION file. The hdds volume will have one
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 53d6162..4b46abb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -97,11 +97,11 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
 
   @Override
   public void create(VolumeSet volumeSet, VolumeChoosingPolicy
-      volumeChoosingPolicy, String scmId) throws StorageContainerException {
+      volumeChoosingPolicy, String clusterId) throws StorageContainerException {
     Preconditions.checkNotNull(volumeChoosingPolicy, "VolumeChoosingPolicy " +
         "cannot be null");
     Preconditions.checkNotNull(volumeSet, "VolumeSet cannot be null");
-    Preconditions.checkNotNull(scmId, "scmId cannot be null");
+    Preconditions.checkNotNull(clusterId, "clusterId cannot be null");
 
     File containerMetaDataPath = null;
     //acquiring volumeset read lock
@@ -115,11 +115,11 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
       long containerID = containerData.getContainerID();
 
       containerMetaDataPath = KeyValueContainerLocationUtil
-          .getContainerMetaDataPath(hddsVolumeDir, scmId, containerID);
+          .getContainerMetaDataPath(hddsVolumeDir, clusterId, containerID);
       containerData.setMetadataPath(containerMetaDataPath.getPath());
 
       File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath(
-          hddsVolumeDir, scmId, containerID);
+          hddsVolumeDir, clusterId, containerID);
 
       // Check if it is new Container.
       ContainerUtils.verifyIsNewContainer(containerMetaDataPath);
@@ -173,20 +173,20 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
    * Set all of the path realted container data fields based on the name
    * conventions.
    *
-   * @param scmId
+   * @param clusterId
    * @param containerVolume
    * @param hddsVolumeDir
    */
-  public void populatePathFields(String scmId,
+  public void populatePathFields(String clusterId,
       HddsVolume containerVolume, String hddsVolumeDir) {
 
     long containerId = containerData.getContainerID();
 
     File containerMetaDataPath = KeyValueContainerLocationUtil
-        .getContainerMetaDataPath(hddsVolumeDir, scmId, containerId);
+        .getContainerMetaDataPath(hddsVolumeDir, clusterId, containerId);
 
     File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath(
-        hddsVolumeDir, scmId, containerId);
+        hddsVolumeDir, clusterId, containerId);
     File dbFile = KeyValueContainerLocationUtil.getContainerDBFile(
         containerMetaDataPath, containerId);
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index dbc2a97..40e86ce 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -255,7 +255,7 @@ public class KeyValueHandler extends Handler {
     boolean created = false;
     try (AutoCloseableLock l = containerCreationLock.acquire()) {
       if (containerSet.getContainer(containerID) == null) {
-        newContainer.create(volumeSet, volumeChoosingPolicy, scmID);
+        newContainer.create(volumeSet, volumeChoosingPolicy, clusterId);
         created = containerSet.addContainer(newContainer);
       } else {
         // The create container request for an already existing container can
@@ -284,7 +284,7 @@ public class KeyValueHandler extends Handler {
       HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet
           .getVolumesList(), container.getContainerData().getMaxSize());
       String hddsVolumeDir = containerVolume.getHddsRootDir().toString();
-      container.populatePathFields(scmID, containerVolume, hddsVolumeDir);
+      container.populatePathFields(clusterId, containerVolume, hddsVolumeDir);
     } finally {
       volumeSet.readUnlock();
     }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
index 0c7a04e..ad1673a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
@@ -36,16 +36,16 @@ public final class KeyValueContainerLocationUtil {
    * Returns Container Metadata Location.
    * @param hddsVolumeDir base dir of the hdds volume where scm directories
    *                      are stored
-   * @param scmId
+   * @param clusterId
    * @param containerId
    * @return containerMetadata Path to container metadata location where
    * .container file will be stored.
    */
   public static File getContainerMetaDataPath(String hddsVolumeDir,
-                                              String scmId,
+                                              String clusterId,
                                               long containerId) {
     String containerMetaDataPath =
-        getBaseContainerLocation(hddsVolumeDir, scmId,
+        getBaseContainerLocation(hddsVolumeDir, clusterId,
             containerId);
     containerMetaDataPath = containerMetaDataPath + File.separator +
         OzoneConsts.CONTAINER_META_PATH;
@@ -56,35 +56,36 @@ public final class KeyValueContainerLocationUtil {
   /**
    * Returns Container Chunks Location.
    * @param baseDir
-   * @param scmId
+   * @param clusterId
    * @param containerId
    * @return chunksPath
    */
-  public static File getChunksLocationPath(String baseDir, String scmId,
+  public static File getChunksLocationPath(String baseDir, String clusterId,
                                            long containerId) {
-    String chunksPath = getBaseContainerLocation(baseDir, scmId, containerId)
-        + File.separator + OzoneConsts.STORAGE_DIR_CHUNKS;
+    String chunksPath =
+        getBaseContainerLocation(baseDir, clusterId, containerId)
+            + File.separator + OzoneConsts.STORAGE_DIR_CHUNKS;
     return new File(chunksPath);
   }
 
   /**
    * Returns base directory for specified container.
    * @param hddsVolumeDir
-   * @param scmId
+   * @param clusterId
    * @param containerId
    * @return base directory for container.
    */
   private static String getBaseContainerLocation(String hddsVolumeDir,
-                                                 String scmId,
+                                                 String clusterId,
                                                  long containerId) {
     Preconditions.checkNotNull(hddsVolumeDir, "Base Directory cannot be null");
-    Preconditions.checkNotNull(scmId, "scmUuid cannot be null");
+    Preconditions.checkNotNull(clusterId, "scmUuid cannot be null");
     Preconditions.checkState(containerId >= 0,
         "Container Id cannot be negative.");
 
     String containerSubDirectory = getContainerSubDirectory(containerId);
 
-    String containerMetaDataPath = hddsVolumeDir  + File.separator + scmId +
+    String containerMetaDataPath = hddsVolumeDir  + File.separator + clusterId +
         File.separator + Storage.STORAGE_DIR_CURRENT + File.separator +
         containerSubDirectory + File.separator + containerId;
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
index fa63cf1..0b3af00 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
@@ -46,22 +46,22 @@ import org.slf4j.LoggerFactory;
  * Layout of the container directory on disk is as follows:
  *
  * <p>../hdds/VERSION
- * <p>{@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID
+ * <p>{@literal ../hdds/<<clusterUuid>>/current/<<containerDir>>/<<containerID
  * >/metadata/<<containerID>>.container}
- * <p>{@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID
+ * <p>{@literal ../hdds/<<clusterUuid>>/current/<<containerDir>>/<<containerID
  * >/<<dataPath>>}
  * <p>
  * Some ContainerTypes will have extra metadata other than the .container
  * file. For example, KeyValueContainer will have a .db file. This .db file
  * will also be stored in the metadata folder along with the .container file.
  * <p>
- * {@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<KVcontainerID
+ * {@literal ../hdds/<<clusterUuid>>/current/<<containerDir>>/<<KVcontainerID
  * >/metadata/<<KVcontainerID>>.db}
  * <p>
  * Note that the {@literal <<dataPath>>} is dependent on the ContainerType.
  * For KeyValueContainers, the data is stored in a "chunks" folder. As such,
  * the {@literal <<dataPath>>} layout for KeyValueContainers is:
- * <p>{@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<KVcontainerID
+ * <p>{@literal ../hdds/<<clusterUuid>>/current/<<containerDir>>/<<KVcontainerID
  * >/chunks/<<chunksFile>>}
  *
  */
@@ -99,30 +99,30 @@ public class ContainerReader implements Runnable {
     Preconditions.checkNotNull(hddsVolumeRootDir, "hddsVolumeRootDir" +
         "cannot be null");
 
-    //filtering scm directory
-    File[] scmDir = hddsVolumeRootDir.listFiles(new FileFilter() {
+    //filtering storage directory
+    File[] storageDir = hddsVolumeRootDir.listFiles(new FileFilter() {
       @Override
       public boolean accept(File pathname) {
         return pathname.isDirectory();
       }
     });
 
-    if (scmDir == null) {
+    if (storageDir == null) {
       LOG.error("IO error for the volume {}, skipped loading",
           hddsVolumeRootDir);
       volumeSet.failVolume(hddsVolumeRootDir.getPath());
       return;
     }
 
-    if (scmDir.length > 1) {
+    if (storageDir.length > 1) {
       LOG.error("Volume {} is in Inconsistent state", hddsVolumeRootDir);
       volumeSet.failVolume(hddsVolumeRootDir.getPath());
       return;
     }
 
     LOG.info("Start to verify containers on volume {}", hddsVolumeRootDir);
-    for (File scmLoc : scmDir) {
-      File currentDir = new File(scmLoc, Storage.STORAGE_DIR_CURRENT);
+    for (File storageLoc : storageDir) {
+      File currentDir = new File(storageLoc, Storage.STORAGE_DIR_CURRENT);
       File[] containerTopDirs = currentDir.listFiles();
       if (containerTopDirs != null) {
         for (File containerTopDir : containerTopDirs) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index e4fe60b..35664e1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -255,7 +255,7 @@ public class OzoneContainer {
    *
    * @throws IOException
    */
-  public void start(String scmId) throws IOException {
+  public void start(String clusterId) throws IOException {
     if (!isStarted.compareAndSet(false, true)) {
       LOG.info("Ignore. OzoneContainer already started.");
       return;
@@ -269,7 +269,7 @@ public class OzoneContainer {
     writeChannel.start();
     readChannel.start();
     hddsDispatcher.init();
-    hddsDispatcher.setScmId(scmId);
+    hddsDispatcher.setClusterId(clusterId);
     blockDeletingService.start();
   }
 
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
index 534f9ef..3e969ac 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
@@ -342,19 +342,11 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
     scmCommandRequests.add(scmCmd);
   }
 
-  /**
-   * Set scmId.
-   * @param id
-   */
-  public void setScmId(String id) {
-    this.scmId = id;
+  public String getClusterId() {
+    return clusterId;
   }
 
-  /**
-   * Set scmId.
-   * @return scmId
-   */
-  public String getScmId() {
-    return scmId;
+  public void setClusterId(String clusterId) {
+    this.clusterId = clusterId;
   }
 }
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
index e6998fe..32bf23a 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
@@ -129,7 +129,7 @@ public class TestHddsDispatcher {
       }
       HddsDispatcher hddsDispatcher = new HddsDispatcher(
           conf, containerSet, volumeSet, handlers, context, metrics, null);
-      hddsDispatcher.setScmId(scmId.toString());
+      hddsDispatcher.setClusterId(scmId.toString());
       ContainerCommandResponseProto responseOne = hddsDispatcher
           .dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null);
       Assert.assertEquals(ContainerProtos.Result.SUCCESS,
@@ -291,7 +291,7 @@ public class TestHddsDispatcher {
 
     HddsDispatcher hddsDispatcher = new HddsDispatcher(
         conf, containerSet, volumeSet, handlers, context, metrics, null);
-    hddsDispatcher.setScmId(scmId.toString());
+    hddsDispatcher.setClusterId(scmId.toString());
     return hddsDispatcher;
   }
 
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index a89bd17..7457874 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -372,7 +372,7 @@ public class TestKeyValueHandler {
       final KeyValueHandler kvHandler = new KeyValueHandler(conf,
           UUID.randomUUID().toString(), containerSet, volumeSet, metrics,
           c -> icrReceived.incrementAndGet());
-      kvHandler.setScmID(UUID.randomUUID().toString());
+      kvHandler.setClusterID(UUID.randomUUID().toString());
 
       final ContainerCommandRequestProto createContainer =
           ContainerCommandRequestProto.newBuilder()
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 7a71c80..0bee9b8 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -186,8 +186,8 @@ public class TestEndPoint {
 
       // Now change server scmId, so datanode scmId  will be
       // different from SCM server response scmId
-      String newScmId = UUID.randomUUID().toString();
-      scmServerImpl.setScmId(newScmId);
+      String newClusterId = UUID.randomUUID().toString();
+      scmServerImpl.setClusterId(newClusterId);
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
       newState = versionTask.call();
       Assert.assertEquals(EndpointStateMachine.EndPointStates.SHUTDOWN,
@@ -195,11 +195,9 @@ public class TestEndPoint {
       List<HddsVolume> volumesList = ozoneContainer.getVolumeSet()
           .getFailedVolumesList();
       Assert.assertTrue(volumesList.size() == 1);
-      File expectedScmDir = new File(volumesList.get(0).getHddsRootDir(),
-          scmServerImpl.getScmId());
-      Assert.assertTrue(logCapturer.getOutput().contains("expected scm " +
-          "directory " + expectedScmDir.getAbsolutePath() + " does not " +
-          "exist"));
+      Assert.assertTrue(logCapturer.getOutput()
+          .contains("org.apache.hadoop.ozone.common" +
+              ".InconsistentStorageStateException: Mismatched ClusterIDs"));
       Assert.assertTrue(ozoneContainer.getVolumeSet().getVolumesList().size()
           == 0);
       Assert.assertTrue(ozoneContainer.getVolumeSet().getFailedVolumesList()
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh b/hadoop-ozone/dist/src/main/compose/upgrade/todo_test.sh
similarity index 100%
rename from hadoop-ozone/dist/src/main/compose/upgrade/test.sh
rename to hadoop-ozone/dist/src/main/compose/upgrade/todo_test.sh
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
index 7aced89..6970bc3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
@@ -80,6 +80,7 @@ public class TestOzoneAtRestEncryption extends TestOzoneRpcClient {
       storageContainerLocationClient;
 
   private static final String SCM_ID = UUID.randomUUID().toString();
+  private static final String CLUSTER_ID = UUID.randomUUID().toString();
   private static File testDir;
   private static OzoneConfiguration conf;
   private static final String TEST_KEY = "key1";
@@ -115,6 +116,7 @@ public class TestOzoneAtRestEncryption extends TestOzoneRpcClient {
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(10)
         .setScmId(SCM_ID)
+        .setClusterId(CLUSTER_ID)
         .setCertificateClient(certificateClientTest)
         .build();
     cluster.getOzoneManager().startSecretManager();
@@ -130,7 +132,7 @@ public class TestOzoneAtRestEncryption extends TestOzoneRpcClient {
     TestOzoneRpcClient.setStorageContainerLocationClient(
         storageContainerLocationClient);
     TestOzoneRpcClient.setStore(store);
-    TestOzoneRpcClient.setScmId(SCM_ID);
+    TestOzoneRpcClient.setClusterId(CLUSTER_ID);
 
     // create test key
     createKey(TEST_KEY, cluster.getOzoneManager().getKmsProvider(), conf);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 8198c9e..9319f13 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -162,6 +162,8 @@ public abstract class TestOzoneRpcClientAbstract {
       remoteGroupName, READ, ACCESS);
 
   private static String scmId = UUID.randomUUID().toString();
+  private static String clusterId = UUID.randomUUID().toString();
+
 
   /**
    * Create a MiniOzoneCluster for testing.
@@ -173,6 +175,7 @@ public abstract class TestOzoneRpcClientAbstract {
         .setNumDatanodes(3)
         .setTotalPipelineNumLimit(10)
         .setScmId(scmId)
+        .setClusterId(clusterId)
         .build();
     cluster.waitForClusterToBeReady();
     ozClient = OzoneClientFactory.getRpcClient(conf);
@@ -226,8 +229,8 @@ public abstract class TestOzoneRpcClientAbstract {
     return TestOzoneRpcClientAbstract.store;
   }
 
-  public static void setScmId(String scmId) {
-    TestOzoneRpcClientAbstract.scmId = scmId;
+  public static void setClusterId(String clusterId) {
+    TestOzoneRpcClientAbstract.clusterId = clusterId;
   }
 
   /**
@@ -1594,7 +1597,7 @@ public abstract class TestOzoneRpcClientAbstract {
       String containreBaseDir =
           container.getContainerData().getVolume().getHddsRootDir().getPath();
       File chunksLocationPath = KeyValueContainerLocationUtil
-          .getChunksLocationPath(containreBaseDir, scmId, containerID);
+          .getChunksLocationPath(containreBaseDir, clusterId, containerID);
       byte[] corruptData = "corrupted data".getBytes(UTF_8);
       // Corrupt the contents of chunk files
       for (File file : FileUtils.listFiles(chunksLocationPath, null, false)) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
index f3ff90a..a0257e0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
@@ -78,6 +78,7 @@ public class TestSecureOzoneRpcClient extends TestOzoneRpcClient {
       storageContainerLocationClient;
 
   private static final String SCM_ID = UUID.randomUUID().toString();
+  private static final String CLUSTER_ID = UUID.randomUUID().toString();
   private static File testDir;
   private static OzoneConfiguration conf;
   private static OzoneBlockTokenSecretManager secretManager;
@@ -104,6 +105,7 @@ public class TestSecureOzoneRpcClient extends TestOzoneRpcClient {
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(10)
         .setScmId(SCM_ID)
+        .setClusterId(CLUSTER_ID)
         .setCertificateClient(certificateClientTest)
         .build();
     String user = UserGroupInformation.getCurrentUser().getShortUserName();
@@ -127,7 +129,7 @@ public class TestSecureOzoneRpcClient extends TestOzoneRpcClient {
     TestOzoneRpcClient.setStorageContainerLocationClient(
         storageContainerLocationClient);
     TestOzoneRpcClient.setStore(store);
-    TestOzoneRpcClient.setScmId(SCM_ID);
+    TestOzoneRpcClient.setClusterId(CLUSTER_ID);
   }
 
   /**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
index 4911f95..011ce2d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
@@ -223,7 +223,7 @@ public class TestCSMMetrics {
     }
 
     @Override
-    public void setScmId(String scmId) {
+    public void setClusterId(String scmId) {
 
     }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index b25d4b0..31bb216 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -112,7 +112,7 @@ public class TestContainerMetrics {
       }
       HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet,
           volumeSet, handlers, context, metrics, null);
-      dispatcher.setScmId(UUID.randomUUID().toString());
+      dispatcher.setClusterId(UUID.randomUUID().toString());
 
       server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, null);
       client = new XceiverClientGrpc(pipeline, conf);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index af67853..4c031d1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -217,7 +217,7 @@ public class TestContainerServer {
       }
       HddsDispatcher dispatcher = new HddsDispatcher(
           conf, containerSet, volumeSet, handlers, context, metrics, null);
-      dispatcher.setScmId(scmId.toString());
+      dispatcher.setClusterId(scmId.toString());
       dispatcher.init();
 
       server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher,
@@ -274,7 +274,7 @@ public class TestContainerServer {
     }
 
     @Override
-    public void setScmId(String scmId) {
+    public void setClusterId(String scmId) {
 
     }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
index 2fdcf39..13faacc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
@@ -167,7 +167,7 @@ public class TestSecureContainerServer {
     HddsDispatcher hddsDispatcher = new HddsDispatcher(
         conf, containerSet, volumeSet, handlers, context, metrics,
         new BlockTokenVerifier(new SecurityConfig((conf)), caClient));
-    hddsDispatcher.setScmId(scmId.toString());
+    hddsDispatcher.setClusterId(scmId.toString());
     return hddsDispatcher;
   }
 
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java
index ad2810a..6f03af1 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java
@@ -186,7 +186,7 @@ public class ClosedContainerReplicator extends BaseFreonGenerator implements
               metrics,
               containerReplicaProto -> {
               });
-      handler.setScmID(UUID.randomUUID().toString());
+      handler.setClusterID(UUID.randomUUID().toString());
       handlers.put(containerType, handler);
     }
 
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
index 7ffebec..0afe54e 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
@@ -113,7 +113,7 @@ public class BenchMarkDatanodeDispatcher {
           containerType, conf, "datanodeid",
           containerSet, volumeSet, metrics,
           c -> {});
-      handler.setScmID("scm");
+      handler.setClusterID("scm");
       handlers.put(containerType, handler);
     }
     dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers,


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org