You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ck...@apache.org on 2022/11/01 03:44:21 UTC

[ozone] 01/02: HDDS-7320. Enable Container SchemaV3 by default. (#3841)

This is an automated email from the ASF dual-hosted git repository.

ckj pushed a commit to branch ozone-1.3
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 5fe8351e4af3b5c88ce928c0cbcbcd3bd3a90ecb
Author: Sammi Chen <sa...@apache.org>
AuthorDate: Tue Nov 1 10:25:14 2022 +0800

    HDDS-7320. Enable Container SchemaV3 by default. (#3841)
    
    * HDDS-7320. Enable Container SchemaV3 by default.
---
 .../common/statemachine/DatanodeConfiguration.java |  4 +--
 .../container/common/utils/DatanodeStoreCache.java | 29 +++++++++++++++--
 .../container/keyvalue/helpers/BlockUtils.java     |  2 +-
 .../metadata/DatanodeSchemaThreeDBDefinition.java  |  4 +++
 .../container/common/TestDatanodeStoreCache.java   |  2 +-
 .../container/common/impl/TestHddsDispatcher.java  |  8 +++++
 .../container/common/volume/TestHddsVolume.java    | 10 +++---
 .../container/keyvalue/TestKeyValueContainer.java  | 14 +++++++--
 .../TestKeyValueContainerMarkUnhealthy.java        |  2 ++
 .../container/keyvalue/TestKeyValueHandler.java    | 10 ++++--
 .../keyvalue/impl/AbstractTestChunkManager.java    |  5 ++-
 .../smoketest/compatibility/dn-one-rocksdb.robot   |  2 +-
 .../ozone/TestStorageContainerManagerHelper.java   | 19 +++++++-----
 .../TestDatanodeHddsVolumeFailureDetection.java    | 36 ++++++++++++++++++++--
 14 files changed, 118 insertions(+), 29 deletions(-)

diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
index 75c34477de..642298af08 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
@@ -72,7 +72,7 @@ public class DatanodeConfiguration {
   static final long DISK_CHECK_TIMEOUT_DEFAULT =
       Duration.ofMinutes(10).toMillis();
 
-  static final boolean CONTAINER_SCHEMA_V3_ENABLED_DEFAULT = false;
+  static final boolean CONTAINER_SCHEMA_V3_ENABLED_DEFAULT = true;
   static final long ROCKSDB_LOG_MAX_FILE_SIZE_BYTES_DEFAULT = 32 * 1024 * 1024;
   static final int ROCKSDB_LOG_MAX_FILE_NUM_DEFAULT = 64;
   // one hour
@@ -294,7 +294,7 @@ public class DatanodeConfiguration {
   }
 
   @Config(key = "container.schema.v3.enabled",
-      defaultValue = "false",
+      defaultValue = "true",
       type = ConfigType.BOOLEAN,
       tags = { DATANODE },
       description = "Enable use of container schema v3(one rocksdb per disk)."
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DatanodeStoreCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DatanodeStoreCache.java
index 0f7baa6317..64ac2f72ea 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DatanodeStoreCache.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DatanodeStoreCache.java
@@ -17,9 +17,13 @@
  */
 package org.apache.hadoop.ozone.container.common.utils;
 
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
+import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
@@ -51,10 +55,30 @@ public final class DatanodeStoreCache {
 
   public void addDB(String containerDBPath, RawDB db) {
     datanodeStoreMap.putIfAbsent(containerDBPath, db);
+    LOG.info("Added db {} to cache", containerDBPath);
   }
 
-  public RawDB getDB(String containerDBPath) {
-    return datanodeStoreMap.get(containerDBPath);
+  public RawDB getDB(String containerDBPath, ConfigurationSource conf)
+      throws IOException {
+    RawDB db = datanodeStoreMap.get(containerDBPath);
+    if (db == null) {
+      synchronized (this) {
+        db = datanodeStoreMap.get(containerDBPath);
+        if (db == null) {
+          try {
+            DatanodeStore store = new DatanodeStoreSchemaThreeImpl(
+                conf, containerDBPath, false);
+            db = new RawDB(store, containerDBPath);
+            datanodeStoreMap.put(containerDBPath, db);
+          } catch (IOException e) {
+            LOG.error("Failed to get DB store {}", containerDBPath, e);
+            throw new IOException("Failed to get DB store " +
+                containerDBPath, e);
+          }
+        }
+      }
+    }
+    return db;
   }
 
   public void removeDB(String containerDBPath) {
@@ -69,6 +93,7 @@ public final class DatanodeStoreCache {
     } catch (Exception e) {
       LOG.error("Stop DatanodeStore: {} failed", containerDBPath, e);
     }
+    LOG.info("Removed db {} from cache", containerDBPath);
   }
 
   public void shutdownCache() {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
index 73881f3a99..98eea76919 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
@@ -129,7 +129,7 @@ public final class BlockUtils {
       if (containerData.getSchemaVersion().equals(OzoneConsts.SCHEMA_V3)) {
         DatanodeStoreCache cache = DatanodeStoreCache.getInstance();
         Preconditions.checkNotNull(cache);
-        return cache.getDB(containerDBPath);
+        return cache.getDB(containerDBPath, conf);
       } else {
         ContainerCache cache = ContainerCache.getInstance(conf);
         Preconditions.checkNotNull(cache);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java
index f69484f2a4..65f2ffe0aa 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java
@@ -153,6 +153,10 @@ public class DatanodeSchemaThreeDBDefinition
         getContainerKeyPrefix(0L)).length;
   }
 
+  public static String getKeyWithoutPrefix(String keyWithPrefix) {
+    return keyWithPrefix.substring(keyWithPrefix.indexOf(separator) + 1);
+  }
+
   private void setSeparator(String keySeparator) {
     separator = keySeparator;
   }
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
index b26ed68009..8be137c6e1 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
@@ -58,7 +58,7 @@ public class TestDatanodeStoreCache {
     Assert.assertEquals(2, cache.size());
 
     // test get, test reference the same object using ==
-    Assert.assertTrue(store1 == cache.getDB(dbPath1).getStore());
+    Assert.assertTrue(store1 == cache.getDB(dbPath1, conf).getStore());
 
     // test remove
     cache.removeDB(dbPath1);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
index 8a6dedcc03..0d6b3b79bd 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
@@ -305,6 +305,14 @@ public class TestHddsDispatcher {
     ContainerSet containerSet = new ContainerSet(1000);
     VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null,
         StorageVolume.VolumeType.DATA_VOLUME, null);
+    volumeSet.getVolumesList().stream().forEach(v -> {
+      try {
+        v.format(scmId.toString());
+        v.createWorkingDir(scmId.toString(), null);
+      } catch (IOException e) {
+        throw new RuntimeException(e);
+      }
+    });
     DatanodeStateMachine stateMachine = Mockito.mock(
         DatanodeStateMachine.class);
     StateContext context = Mockito.mock(StateContext.class);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
index 9f26a0b061..4559869c27 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
@@ -327,14 +327,13 @@ public class TestHddsVolume {
         CONTAINER_DB_NAME);
     assertTrue(containerDBFile.exists());
     assertNotNull(DatanodeStoreCache.getInstance().getDB(
-        containerDBFile.getAbsolutePath()));
+        containerDBFile.getAbsolutePath(), CONF));
 
     // Make it a bad volume
     volume.failVolume();
 
     // The db should be removed from cache
-    assertNull(DatanodeStoreCache.getInstance().getDB(
-        containerDBFile.getAbsolutePath()));
+    assertEquals(0, DatanodeStoreCache.getInstance().size());
   }
 
   @Test
@@ -361,14 +360,13 @@ public class TestHddsVolume {
         CONTAINER_DB_NAME);
     assertTrue(containerDBFile.exists());
     assertNotNull(DatanodeStoreCache.getInstance().getDB(
-        containerDBFile.getAbsolutePath()));
+        containerDBFile.getAbsolutePath(), CONF));
 
     // Make it a bad volume
     volume.failVolume();
 
     // The db should be removed from cache
-    assertNull(DatanodeStoreCache.getInstance().getDB(
-        containerDBFile.getAbsolutePath()));
+    assertEquals(0, DatanodeStoreCache.getInstance().size());
   }
 
   private MutableVolumeSet createDbVolumeSet() throws IOException {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index 366d75af5b..439bf540f8 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -545,6 +545,7 @@ public class TestKeyValueContainer {
 
     // Create Container 2 with different DBProfile in otherConf
     OzoneConfiguration otherConf = new OzoneConfiguration();
+    ContainerTestVersionInfo.setTestSchemaVersion(schemaVersion, otherConf);
     // Use a dedicated profile for test
     otherConf.setEnum(HDDS_DB_PROFILE, DBProfile.SSD);
 
@@ -563,9 +564,16 @@ public class TestKeyValueContainer {
       outProfile2 = ((AbstractDatanodeStore) store2).getDbProfile();
     }
 
-    // DBOtions should be different
-    Assert.assertNotEquals(outProfile1.getDBOptions().compactionReadaheadSize(),
-        outProfile2.getDBOptions().compactionReadaheadSize());
+    // DBOtions should be different, except SCHEMA-V3
+    if (schemaVersion.equals(OzoneConsts.SCHEMA_V3)) {
+      Assert.assertEquals(
+          outProfile1.getDBOptions().compactionReadaheadSize(),
+          outProfile2.getDBOptions().compactionReadaheadSize());
+    } else {
+      Assert.assertNotEquals(
+          outProfile1.getDBOptions().compactionReadaheadSize(),
+          outProfile2.getDBOptions().compactionReadaheadSize());
+    }
   }
 
   @Test
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java
index d7b520aba6..a8da836835 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java
@@ -97,6 +97,8 @@ public class TestKeyValueContainerMarkUnhealthy {
     HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot()
         .getAbsolutePath()).conf(conf).datanodeUuid(datanodeId
         .toString()).build();
+    hddsVolume.format(scmId);
+    hddsVolume.createWorkingDir(scmId, null);
 
     volumeSet = mock(MutableVolumeSet.class);
     volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index 991ab7a58a..a249f66467 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -353,9 +353,15 @@ public class TestKeyValueHandler {
       final ContainerSet containerSet = new ContainerSet(1000);
       final VolumeSet volumeSet = Mockito.mock(VolumeSet.class);
 
+      String clusterId = UUID.randomUUID().toString();
+      HddsVolume hddsVolume = new HddsVolume.Builder(testDir).conf(conf)
+          .clusterID(clusterId).datanodeUuid(UUID.randomUUID().toString())
+          .build();
+      hddsVolume.format(clusterId);
+      hddsVolume.createWorkingDir(clusterId, null);
+
       Mockito.when(volumeSet.getVolumesList())
-          .thenReturn(Collections.singletonList(
-              new HddsVolume.Builder(testDir).conf(conf).build()));
+          .thenReturn(Collections.singletonList(hddsVolume));
 
       final int[] interval = new int[1];
       interval[0] = 2;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java
index 57deb5c48e..d4e1963b83 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java
@@ -76,9 +76,12 @@ public abstract class AbstractTestChunkManager {
     OzoneConfiguration config = new OzoneConfiguration();
     getStrategy().updateConfig(config);
     UUID datanodeId = UUID.randomUUID();
+    UUID clusterId = UUID.randomUUID();
     hddsVolume = new HddsVolume.Builder(confDir
         .getAbsolutePath()).conf(config).datanodeUuid(datanodeId
-        .toString()).build();
+        .toString()).clusterID(clusterId.toString()).build();
+    hddsVolume.format(clusterId.toString());
+    hddsVolume.createWorkingDir(clusterId.toString(), null);
 
     VolumeSet volumeSet = mock(MutableVolumeSet.class);
 
diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/dn-one-rocksdb.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/dn-one-rocksdb.robot
index 1ef6d14f39..9599e31984 100644
--- a/hadoop-ozone/dist/src/main/smoketest/compatibility/dn-one-rocksdb.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/dn-one-rocksdb.robot
@@ -26,4 +26,4 @@ Create a container and check container schema version
     ${output} =         Execute          ozone admin container create
                         Should not contain  ${output}       Failed
     ${output} =         Execute          ozone debug container list
-                        Should contain  ${output}    \"schemaVersion\" : \"2\"
+                        Should contain  ${output}    \"schemaVersion\" : \"3\"
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index 48d152e8da..3ff6041232 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@ -32,8 +32,9 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.interfaces.DBHandle;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
+import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
-import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl;
+import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -137,7 +138,8 @@ public class TestStorageContainerManagerHelper {
                   cData.containerPrefix(), cData.getUnprefixedKeyFilter());
 
       for (Table.KeyValue<String, BlockData> entry : kvs) {
-        allBlocks.add(Long.valueOf(entry.getKey()));
+        allBlocks.add(Long.valueOf(DatanodeSchemaThreeDBDefinition
+            .getKeyWithoutPrefix(entry.getKey())));
       }
     }
     return allBlocks;
@@ -149,13 +151,14 @@ public class TestStorageContainerManagerHelper {
       KeyValueContainerData cData = getContainerMetadata(entry.getKey());
       try (DBHandle db = BlockUtils.getDB(cData, conf)) {
         DatanodeStore ds = db.getStore();
-        DatanodeStoreSchemaTwoImpl dnStoreTwoImpl =
-            (DatanodeStoreSchemaTwoImpl) ds;
-        List<? extends Table.KeyValue<Long, DeletedBlocksTransaction>>
-            txnsInTxnTable = dnStoreTwoImpl.getDeleteTransactionTable()
-            .getRangeKVs(null, Integer.MAX_VALUE, null);
+        DatanodeStoreSchemaThreeImpl dnStoreImpl =
+            (DatanodeStoreSchemaThreeImpl) ds;
+        List<? extends Table.KeyValue<String, DeletedBlocksTransaction>>
+            txnsInTxnTable = dnStoreImpl.getDeleteTransactionTable()
+            .getRangeKVs(cData.startKeyEmpty(), Integer.MAX_VALUE,
+                cData.containerPrefix());
         List<Long> conID = new ArrayList<>();
-        for (Table.KeyValue<Long, DeletedBlocksTransaction> txn :
+        for (Table.KeyValue<String, DeletedBlocksTransaction> txn :
             txnsInTxnTable) {
           conID.addAll(txn.getValue().getLocalIDList());
         }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java
index 2a5873071a..408e1d0d96 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java
@@ -38,11 +38,14 @@ import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.common.Storage;
+import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
+import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.dn.DatanodeTestUtils;
 import org.junit.After;
@@ -53,12 +56,16 @@ import org.junit.Test;
 import java.io.File;
 import java.io.IOException;
 import java.time.Duration;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.UUID;
 
 import org.junit.Rule;
 import org.junit.rules.Timeout;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
@@ -71,7 +78,19 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
 /**
  * This class tests datanode can detect failed volumes.
  */
+@RunWith(Parameterized.class)
 public class TestDatanodeHddsVolumeFailureDetection {
+  private boolean schemaV3;
+  public TestDatanodeHddsVolumeFailureDetection(boolean enableV3) {
+    this.schemaV3 = enableV3;
+  }
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(
+        new Object[]{false},
+        new Object[]{true});
+  }
 
   /**
    * Set a timeout for each test.
@@ -97,6 +116,9 @@ public class TestDatanodeHddsVolumeFailureDetection {
     // keep the cache size = 1, so we could trigger io exception on
     // reading on-disk db instance
     ozoneConfig.setInt(OZONE_CONTAINER_CACHE_SIZE, 1);
+    if (!schemaV3) {
+      ContainerTestUtils.disableSchemaV3(ozoneConfig);
+    }
     // set tolerated = 1
     // shorten the gap between successive checks to ease tests
     DatanodeConfiguration dnConf =
@@ -265,9 +287,19 @@ public class TestDatanodeHddsVolumeFailureDetection {
         .equals(HddsProtos.LifeCycleState.OPEN));
 
     // corrupt db by rename dir->file
-    File metadataDir = new File(c1.getContainerFile().getParent());
-    File dbDir = new File(metadataDir, "1" + OzoneConsts.DN_CONTAINER_DB);
+    File dbDir;
+    if (schemaV3) {
+      dbDir = new File(((KeyValueContainerData)(c1.getContainerData()))
+          .getDbFile().getAbsolutePath());
+    } else {
+      File metadataDir = new File(c1.getContainerFile().getParent());
+      dbDir = new File(metadataDir, "1" + OzoneConsts.DN_CONTAINER_DB);
+    }
     DatanodeTestUtils.injectDataDirFailure(dbDir);
+    if (schemaV3) {
+      // remove rocksDB from cache
+      DatanodeStoreCache.getInstance().shutdownCache();
+    }
 
     // simulate bad volume by removing write permission on root dir
     // refer to HddsVolume.check()


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org