You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by sa...@apache.org on 2022/04/07 05:21:22 UTC

[ozone] branch HDDS-3630 updated: HDDS-6486. [Merge rocksdb in datanode] Add new container schema v3 definitions. (#3253)

This is an automated email from the ASF dual-hosted git repository.

sammichen pushed a commit to branch HDDS-3630
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/HDDS-3630 by this push:
     new 3e91e79c82 HDDS-6486. [Merge rocksdb in datanode] Add new container schema v3 definitions. (#3253)
3e91e79c82 is described below

commit 3e91e79c8257785f93e155fb2f07b74e90caa2d7
Author: Gui Hecheng <ma...@tencent.com>
AuthorDate: Thu Apr 7 13:21:17 2022 +0800

    HDDS-6486. [Merge rocksdb in datanode] Add new container schema v3 definitions. (#3253)
---
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |   4 +
 .../metadata/AbstractDatanodeDBDefinition.java     |  11 +-
 .../container/metadata/AbstractDatanodeStore.java  |  15 ++-
 .../metadata/DatanodeSchemaOneDBDefinition.java    |   6 +-
 .../metadata/DatanodeSchemaThreeDBDefinition.java  | 147 +++++++++++++++++++++
 .../metadata/DatanodeSchemaTwoDBDefinition.java    |   6 +-
 .../ozone/container/metadata/DatanodeStore.java    |   5 +-
 .../metadata/DatanodeStoreSchemaOneImpl.java       |   2 +-
 .../metadata/DatanodeStoreSchemaThreeImpl.java     |  77 +++++++++++
 .../metadata/DatanodeStoreSchemaTwoImpl.java       |   4 +-
 .../hdds/utils/db/FixedLengthStringCodec.java      |  50 +++++++
 .../hdds/utils/db/FixedLengthStringUtils.java      |  57 ++++++++
 .../hdds/utils/db/TestFixedLengthStringUtils.java  |  45 +++++++
 .../hadoop/ozone/debug/DBDefinitionFactory.java    |  14 +-
 .../org/apache/hadoop/ozone/debug/DBScanner.java   |   3 +-
 .../ozone/debug/TestDBDefinitionFactory.java       |  13 +-
 16 files changed, 439 insertions(+), 20 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index bdc87899a4..7983f8eaf1 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -271,7 +271,11 @@ public final class OzoneConsts {
   // V2: Metadata, block data, and delete transactions in their own
   // column families.
   public static final String SCHEMA_V2 = "2";
+  // V3: Column families definitions are close to V2,
+  // but have containerID as key prefixes.
+  public static final String SCHEMA_V3 = "3";
 
+  // TODO(markgui): Add SCHEMA_V3 until it is fully supported.
   public static final String[] SCHEMA_VERSIONS =
       new String[] {SCHEMA_V1, SCHEMA_V2};
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java
index 2fb117441a..49bcd8edd2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java
@@ -33,12 +33,17 @@ public abstract class AbstractDatanodeDBDefinition implements DBDefinition {
 
   private File dbDir;
 
+  private ConfigurationSource config;
+
   /**
    * @param dbPath The absolute path to the .db file corresponding to this
+   * @param config The ozone global configuration.
    * {@link DBDefinition}.
    */
-  protected AbstractDatanodeDBDefinition(String dbPath) {
+  protected AbstractDatanodeDBDefinition(String dbPath,
+      ConfigurationSource config) {
     this.dbDir = new File(dbPath);
+    this.config = config;
   }
 
   @Override
@@ -57,6 +62,10 @@ public abstract class AbstractDatanodeDBDefinition implements DBDefinition {
             "No location config key available for datanode databases.");
   }
 
+  public ConfigurationSource getConfig() {
+    return config;
+  }
+
   @Override
   public DBColumnFamilyDefinition[] getColumnFamilies() {
     return new DBColumnFamilyDefinition[] {getBlockDataColumnFamily(),
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
index 9adf673352..cab08dd5cd 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
@@ -176,14 +176,15 @@ public abstract class AbstractDatanodeStore implements DatanodeStore {
   }
 
   @Override
-  public BlockIterator<BlockData> getBlockIterator(long containerID) {
+  public BlockIterator<BlockData> getBlockIterator(long containerID)
+      throws IOException {
     return new KeyValueBlockIterator(containerID,
             blockDataTableWithIterator.iterator());
   }
 
   @Override
   public BlockIterator<BlockData> getBlockIterator(long containerID,
-      KeyPrefixFilter filter) {
+      KeyPrefixFilter filter) throws IOException {
     return new KeyValueBlockIterator(containerID,
             blockDataTableWithIterator.iterator(), filter);
   }
@@ -208,6 +209,14 @@ public abstract class AbstractDatanodeStore implements DatanodeStore {
     return dbProfile;
   }
 
+  protected AbstractDatanodeDBDefinition getDbDef() {
+    return this.dbDef;
+  }
+
+  protected Table<String, BlockData> getBlockDataTableWithIterator() {
+    return this.blockDataTableWithIterator;
+  }
+
   private static void checkTableStatus(Table<?, ?> table, String name)
           throws IOException {
     String logMessage = "Unable to get a reference to %s table. Cannot " +
@@ -227,7 +236,7 @@ public abstract class AbstractDatanodeStore implements DatanodeStore {
    * {@link MetadataKeyFilters#getUnprefixedKeyFilter()}
    */
   @InterfaceAudience.Public
-  private static class KeyValueBlockIterator implements
+  public static class KeyValueBlockIterator implements
           BlockIterator<BlockData>, Closeable {
 
     private static final Logger LOG = LoggerFactory.getLogger(
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java
index 1382dafbd1..f3a9196b88 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.ozone.container.metadata;
 
 import org.apache.hadoop.hdds.StringUtils;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
 import org.apache.hadoop.hdds.utils.db.LongCodec;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
@@ -68,8 +69,9 @@ public class DatanodeSchemaOneDBDefinition
             ChunkInfoList.class,
             new SchemaOneChunkInfoListCodec());
 
-  public DatanodeSchemaOneDBDefinition(String dbPath) {
-    super(dbPath);
+  public DatanodeSchemaOneDBDefinition(String dbPath,
+      ConfigurationSource config) {
+    super(dbPath, config);
   }
 
   @Override
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java
new file mode 100644
index 0000000000..5286056119
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.metadata;
+
+import com.google.common.primitives.Longs;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
+import org.apache.hadoop.hdds.utils.db.FixedLengthStringUtils;
+import org.apache.hadoop.hdds.utils.db.LongCodec;
+import org.apache.hadoop.hdds.utils.db.FixedLengthStringCodec;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList;
+import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile;
+import org.rocksdb.ColumnFamilyOptions;
+
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE;
+import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE;
+
+/**
+ * This class defines the RocksDB structure for datanode following schema
+ * version 3, where the block data, metadata, and transactions which are to be
+ * deleted are put in their own separate column families and with containerID
+ * as key prefix.
+ *
+ * Some key format illustrations for the column families:
+ * - block_data:     containerID | blockID
+ * - metadata:       containerID | #BLOCKCOUNT
+ *                   containerID | #BYTESUSED
+ *                   ...
+ * - deleted_blocks: containerID | blockID
+ * - delete_txns:    containerID | TransactionID
+ *
+ * The keys would be encoded in a fix-length encoding style in order to
+ * utilize the "Prefix Seek" feature from Rocksdb to optimize seek.
+ */
+public class DatanodeSchemaThreeDBDefinition
+    extends AbstractDatanodeDBDefinition {
+  public static final DBColumnFamilyDefinition<String, BlockData>
+      BLOCK_DATA =
+      new DBColumnFamilyDefinition<>(
+          "block_data",
+          String.class,
+          new FixedLengthStringCodec(),
+          BlockData.class,
+          new BlockDataCodec());
+
+  public static final DBColumnFamilyDefinition<String, Long>
+      METADATA =
+      new DBColumnFamilyDefinition<>(
+          "metadata",
+          String.class,
+          new FixedLengthStringCodec(),
+          Long.class,
+          new LongCodec());
+
+  public static final DBColumnFamilyDefinition<String, ChunkInfoList>
+      DELETED_BLOCKS =
+      new DBColumnFamilyDefinition<>(
+          "deleted_blocks",
+          String.class,
+          new FixedLengthStringCodec(),
+          ChunkInfoList.class,
+          new ChunkInfoListCodec());
+
+  public static final DBColumnFamilyDefinition<String, DeletedBlocksTransaction>
+      DELETE_TRANSACTION =
+      new DBColumnFamilyDefinition<>(
+          "delete_txns",
+          String.class,
+          new FixedLengthStringCodec(),
+          DeletedBlocksTransaction.class,
+          new DeletedBlocksTransactionCodec());
+
+  public DatanodeSchemaThreeDBDefinition(String dbPath,
+      ConfigurationSource config) {
+    super(dbPath, config);
+
+    // Get global ColumnFamilyOptions first.
+    DatanodeDBProfile dbProfile = DatanodeDBProfile
+        .getProfile(config.getEnum(HDDS_DB_PROFILE, HDDS_DEFAULT_DB_PROFILE));
+
+    ColumnFamilyOptions cfOptions = dbProfile.getColumnFamilyOptions(config);
+    // Use prefix seek to mitigating seek overhead.
+    // See: https://github.com/facebook/rocksdb/wiki/Prefix-Seek
+    cfOptions.useFixedLengthPrefixExtractor(getContainerKeyPrefixLength());
+
+    BLOCK_DATA.setCfOptions(cfOptions);
+    METADATA.setCfOptions(cfOptions);
+    DELETED_BLOCKS.setCfOptions(cfOptions);
+    DELETE_TRANSACTION.setCfOptions(cfOptions);
+  }
+
+  @Override
+  public DBColumnFamilyDefinition[] getColumnFamilies() {
+    return new DBColumnFamilyDefinition[] {getBlockDataColumnFamily(),
+        getMetadataColumnFamily(), getDeletedBlocksColumnFamily(),
+        getDeleteTransactionsColumnFamily()};
+  }
+
+  @Override
+  public DBColumnFamilyDefinition<String, BlockData>
+      getBlockDataColumnFamily() {
+    return BLOCK_DATA;
+  }
+
+  @Override
+  public DBColumnFamilyDefinition<String, Long> getMetadataColumnFamily() {
+    return METADATA;
+  }
+
+  @Override
+  public DBColumnFamilyDefinition<String, ChunkInfoList>
+      getDeletedBlocksColumnFamily() {
+    return DELETED_BLOCKS;
+  }
+
+  public DBColumnFamilyDefinition<String, DeletedBlocksTransaction>
+      getDeleteTransactionsColumnFamily() {
+    return DELETE_TRANSACTION;
+  }
+
+  public static String getContainerKeyPrefix(long containerID) {
+    // NOTE: Rocksdb normally needs a fixed length prefix.
+    return FixedLengthStringUtils.bytes2String(Longs.toByteArray(containerID));
+  }
+
+  private static int getContainerKeyPrefixLength() {
+    return FixedLengthStringUtils.string2Bytes(
+        getContainerKeyPrefix(0L)).length;
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java
index b72bad1ae6..8641d7857f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ozone.container.metadata;
 
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
 import org.apache.hadoop.hdds.utils.db.LongCodec;
@@ -70,8 +71,9 @@ public class DatanodeSchemaTwoDBDefinition extends
           StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction.class,
           new DeletedBlocksTransactionCodec());
 
-  public DatanodeSchemaTwoDBDefinition(String dbPath) {
-    super(dbPath);
+  public DatanodeSchemaTwoDBDefinition(String dbPath,
+      ConfigurationSource config) {
+    super(dbPath, config);
   }
 
   @Override
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java
index cc6ecaa920..d48a93232b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java
@@ -87,8 +87,9 @@ public interface DatanodeStore {
 
   void compactDB() throws IOException;
 
-  BlockIterator<BlockData> getBlockIterator(long containerID);
+  BlockIterator<BlockData> getBlockIterator(long containerID)
+      throws IOException;
 
   BlockIterator<BlockData> getBlockIterator(long containerID,
-      KeyPrefixFilter filter);
+      KeyPrefixFilter filter) throws IOException;
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java
index 463ec87de9..4b514c04e4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java
@@ -36,7 +36,7 @@ public class DatanodeStoreSchemaOneImpl extends AbstractDatanodeStore {
    */
   public DatanodeStoreSchemaOneImpl(ConfigurationSource config, String dbPath,
       boolean openReadOnly) throws IOException {
-    super(config, new DatanodeSchemaOneDBDefinition(dbPath),
+    super(config, new DatanodeSchemaOneDBDefinition(dbPath, config),
         openReadOnly);
   }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java
new file mode 100644
index 0000000000..45b5b08c48
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.metadata;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
+
+import java.io.IOException;
+
+import static org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition.getContainerKeyPrefix;
+
+/**
+ * Constructs a datanode store in accordance with schema version 3, which uses
+ * three column families/tables:
+ * 1. A block data table.
+ * 2. A metadata table.
+ * 3. A Delete Transaction Table.
+ *
+ * This is different from schema version 2 from these points:
+ * - All keys have containerID as prefix.
+ * - The table 3 has String as key instead of Long since we want to use prefix.
+ */
+public class DatanodeStoreSchemaThreeImpl extends AbstractDatanodeStore {
+
+  private final Table<String, DeletedBlocksTransaction> deleteTransactionTable;
+
+  public DatanodeStoreSchemaThreeImpl(ConfigurationSource config,
+      String dbPath, boolean openReadOnly) throws IOException {
+    super(config, new DatanodeSchemaThreeDBDefinition(dbPath, config),
+        openReadOnly);
+    this.deleteTransactionTable = ((DatanodeSchemaThreeDBDefinition) getDbDef())
+        .getDeleteTransactionsColumnFamily().getTable(getStore());
+  }
+
+  public Table<String, DeletedBlocksTransaction> getDeleteTransactionTable() {
+    return this.deleteTransactionTable;
+  }
+
+  @Override
+  public BlockIterator<BlockData> getBlockIterator(long containerID)
+      throws IOException {
+    // Here we need to filter the keys with containerID as prefix
+    // and followed by metadata prefixes such as #deleting#.
+    return new KeyValueBlockIterator(containerID,
+        getBlockDataTableWithIterator()
+            .iterator(getContainerKeyPrefix(containerID)),
+        new MetadataKeyFilters.KeyPrefixFilter().addFilter(
+            getContainerKeyPrefix(containerID) + "#", true));
+  }
+
+  @Override
+  public BlockIterator<BlockData> getBlockIterator(long containerID,
+      MetadataKeyFilters.KeyPrefixFilter filter) throws IOException {
+    return new KeyValueBlockIterator(containerID,
+        getBlockDataTableWithIterator()
+            .iterator(getContainerKeyPrefix(containerID)), filter);
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java
index 9669c8d981..7e42fabec2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java
@@ -44,9 +44,9 @@ public class DatanodeStoreSchemaTwoImpl extends AbstractDatanodeStore {
    */
   public DatanodeStoreSchemaTwoImpl(ConfigurationSource config, String dbPath,
       boolean openReadOnly) throws IOException {
-    super(config, new DatanodeSchemaTwoDBDefinition(dbPath),
+    super(config, new DatanodeSchemaTwoDBDefinition(dbPath, config),
         openReadOnly);
-    this.deleteTransactionTable = new DatanodeSchemaTwoDBDefinition(dbPath)
+    this.deleteTransactionTable = ((DatanodeSchemaTwoDBDefinition) getDbDef())
         .getDeleteTransactionsColumnFamily().getTable(getStore());
   }
 
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/FixedLengthStringCodec.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/FixedLengthStringCodec.java
new file mode 100644
index 0000000000..a493e6a974
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/FixedLengthStringCodec.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.hadoop.hdds.utils.db;
+
+import java.io.IOException;
+
+/**
+ * Codec to convert a prefixed String to/from byte array.
+ * The prefix has to be of fixed-length.
+ */
+public class FixedLengthStringCodec implements Codec<String> {
+  @Override
+  public byte[] toPersistedFormat(String object) throws IOException {
+    if (object != null) {
+      return FixedLengthStringUtils.string2Bytes(object);
+    } else {
+      return null;
+    }
+  }
+
+  @Override
+  public String fromPersistedFormat(byte[] rawData) throws IOException {
+    if (rawData != null) {
+      return FixedLengthStringUtils.bytes2String(rawData);
+    } else {
+      return null;
+    }
+  }
+
+  @Override
+  public String copyObject(String object) {
+    return object;
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/FixedLengthStringUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/FixedLengthStringUtils.java
new file mode 100644
index 0000000000..ce2c59a82c
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/FixedLengthStringUtils.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.utils.db;
+
+import java.io.UnsupportedEncodingException;
+import java.nio.charset.StandardCharsets;
+
+/**
+ * String utility class for conversion between byte[] and string
+ * which requires string to be of non-variable-length encoding(e.g. ASCII).
+ * This is different from StringUtils which uses UTF-8 encoding which is
+ * a variable-length encoding style.
+ * This is mainly for FixedLengthStringCodec which requires a fixed-length
+ * prefix.
+ */
+public final class FixedLengthStringUtils {
+
+  private FixedLengthStringUtils() {
+  }
+
+  // An ASCII extension: https://en.wikipedia.org/wiki/ISO/IEC_8859-1
+  // Each character is encoded as a single eight-bit code value.
+  private static final String ASCII_CSN = StandardCharsets.ISO_8859_1.name();
+
+  public static String bytes2String(byte[] bytes) {
+    try {
+      return new String(bytes, 0, bytes.length, ASCII_CSN);
+    } catch (UnsupportedEncodingException e) {
+      throw new IllegalArgumentException(
+          "ISO_8859_1 encoding is not supported", e);
+    }
+  }
+
+  public static byte[] string2Bytes(String str) {
+    try {
+      return str.getBytes(ASCII_CSN);
+    } catch (UnsupportedEncodingException e) {
+      throw new IllegalArgumentException(
+          "ISO_8859_1 decoding is not supported", e);
+    }
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestFixedLengthStringUtils.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestFixedLengthStringUtils.java
new file mode 100644
index 0000000000..d2ad45b172
--- /dev/null
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestFixedLengthStringUtils.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.utils.db;
+
+import com.google.common.primitives.Longs;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Test for class FixedLengthStringUtils.
+ */
+public class TestFixedLengthStringUtils {
+
+  @Test
+  public void testStringEncodeAndDecode() {
+    long[] testContainerIDs = {
+        0L, 1L, 2L, 12345L,
+        Long.MAX_VALUE / 2, Long.MAX_VALUE - 1, Long.MAX_VALUE
+    };
+
+    for (long containerID : testContainerIDs) {
+      String containerPrefix = FixedLengthStringUtils.bytes2String(
+          Longs.toByteArray(containerID));
+      long decodedContainerID = Longs.fromByteArray(
+          FixedLengthStringUtils.string2Bytes(containerPrefix));
+      assertEquals(containerID, decodedContainerID);
+    }
+  }
+}
\ No newline at end of file
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java
index 271a03c441..369da54c73 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java
@@ -22,9 +22,11 @@ import java.nio.file.Path;
 import java.util.Arrays;
 import java.util.HashMap;
 
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
 import org.apache.hadoop.hdds.utils.db.DBDefinition;
 import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaOneDBDefinition;
+import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition;
 import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaTwoDBDefinition;
 import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
 import org.apache.hadoop.ozone.recon.scm.ReconSCMDBDefinition;
@@ -63,7 +65,8 @@ public final class DBDefinitionFactory {
     return getReconDBDefinition(dbName);
   }
 
-  public static DBDefinition getDefinition(Path dbPath) {
+  public static DBDefinition getDefinition(Path dbPath,
+      ConfigurationSource config) {
     Preconditions.checkNotNull(dbPath,
         "Path is required to identify the used db scheme");
     final Path fileName = dbPath.getFileName();
@@ -72,14 +75,17 @@ public final class DBDefinitionFactory {
           "Path is required to identify the used db scheme");
     }
     String dbName = fileName.toString();
-    if (dbName.endsWith("-container.db")) {
+    if (dbName.endsWith("container.db")) {
       switch (dnDBSchemaVersion) {
       case "V1":
         return new DatanodeSchemaOneDBDefinition(
-            dbPath.toAbsolutePath().toString());
+            dbPath.toAbsolutePath().toString(), config);
+      case "V3":
+        return new DatanodeSchemaThreeDBDefinition(
+            dbPath.toAbsolutePath().toString(), config);
       default:
         return new DatanodeSchemaTwoDBDefinition(
-            dbPath.toAbsolutePath().toString());
+            dbPath.toAbsolutePath().toString(), config);
       }
     }
     return getDefinition(dbName);
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
index 275908e641..94cbb855a7 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
@@ -32,6 +32,7 @@ import java.util.List;
 import java.util.concurrent.Callable;
 
 import org.apache.hadoop.hdds.cli.SubcommandWithParent;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
 import org.apache.hadoop.hdds.utils.db.DBDefinition;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -216,7 +217,7 @@ public class DBScanner implements Callable<Void>, SubcommandWithParent {
     dbPath = removeTrailingSlashIfNeeded(dbPath);
     DBDefinitionFactory.setDnDBSchemaVersion(dnDBSchemaVersion);
     this.constructColumnFamilyMap(DBDefinitionFactory.
-            getDefinition(Paths.get(dbPath)));
+            getDefinition(Paths.get(dbPath), new OzoneConfiguration()));
     if (this.columnFamilyMap != null) {
       if (!this.columnFamilyMap.containsKey(tableName)) {
         System.out.print("Table with name:" + tableName + " does not exist");
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java
index 4d018d1e30..6ba9faa961 100644
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java
+++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java
@@ -20,9 +20,11 @@ package org.apache.hadoop.ozone.debug;
 
 import java.nio.file.Paths;
 
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
 import org.apache.hadoop.hdds.utils.db.DBDefinition;
 import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaOneDBDefinition;
+import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition;
 import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaTwoDBDefinition;
 import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
 import org.apache.hadoop.ozone.recon.scm.ReconSCMDBDefinition;
@@ -61,11 +63,18 @@ public class TestDBDefinitionFactory {
     assertTrue(definition instanceof ReconDBDefinition);
     DBDefinitionFactory.setDnDBSchemaVersion("V2");
     definition =
-        DBDefinitionFactory.getDefinition(Paths.get("/tmp/test-container.db"));
+        DBDefinitionFactory.getDefinition(Paths.get("/tmp/test-container.db"),
+            new OzoneConfiguration());
     assertTrue(definition instanceof DatanodeSchemaTwoDBDefinition);
     DBDefinitionFactory.setDnDBSchemaVersion("V1");
     definition =
-        DBDefinitionFactory.getDefinition(Paths.get("/tmp/test-container.db"));
+        DBDefinitionFactory.getDefinition(Paths.get("/tmp/test-container.db"),
+            new OzoneConfiguration());
     assertTrue(definition instanceof DatanodeSchemaOneDBDefinition);
+    DBDefinitionFactory.setDnDBSchemaVersion("V3");
+    definition =
+        DBDefinitionFactory.getDefinition(Paths.get("/tmp/test-container.db"),
+            new OzoneConfiguration());
+    assertTrue(definition instanceof DatanodeSchemaThreeDBDefinition);
   }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org