You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by el...@apache.org on 2021/04/08 12:22:27 UTC
[ozone] branch master updated: HDDS-4840. Make datanode db profile configurable with existing hdds.d… (#1955)
This is an automated email from the ASF dual-hosted git repository.
elek pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 9c92855 HDDS-4840. Make datanode db profile configurable with existing hdds.d… (#1955)
9c92855 is described below
commit 9c92855d3297bca0146b417c05e60d4282abad58
Author: Gui Hecheng <ma...@tencent.com>
AuthorDate: Thu Apr 8 20:21:04 2021 +0800
HDDS-4840. Make datanode db profile configurable with existing hdds.d… (#1955)
---
.../container/metadata/AbstractDatanodeStore.java | 23 +++++++-----
.../container/keyvalue/TestKeyValueContainer.java | 43 +++++++++++++++++++++-
.../org/apache/hadoop/hdds/utils/db/DBProfile.java | 4 +-
3 files changed, 57 insertions(+), 13 deletions(-)
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
index 12921af..e1c5768 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
@@ -33,7 +33,6 @@ import org.rocksdb.BloomFilter;
import org.rocksdb.ColumnFamilyOptions;
import org.rocksdb.DBOptions;
import org.rocksdb.LRUCache;
-import org.rocksdb.RocksDB;
import org.rocksdb.Statistics;
import org.rocksdb.StatsLevel;
import org.rocksdb.util.SizeUnit;
@@ -47,6 +46,8 @@ import java.util.Map;
import java.util.NoSuchElementException;
import java.util.concurrent.ConcurrentHashMap;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE;
+import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF;
@@ -74,7 +75,7 @@ public abstract class AbstractDatanodeStore implements DatanodeStore {
private final long containerID;
private final ColumnFamilyOptions cfOptions;
- private static final DBProfile DEFAULT_PROFILE = DBProfile.DISK;
+ private final DBProfile dbProfile;
private static final Map<ConfigurationSource, ColumnFamilyOptions>
OPTIONS_CACHE = new ConcurrentHashMap<>();
private final boolean openReadOnly;
@@ -89,6 +90,9 @@ public abstract class AbstractDatanodeStore implements DatanodeStore {
AbstractDatanodeDBDefinition dbDef, boolean openReadOnly)
throws IOException {
+ dbProfile = config.getEnum(HDDS_DB_PROFILE,
+ HDDS_DEFAULT_DB_PROFILE);
+
// The same config instance is used on each datanode, so we can share the
// corresponding column family options, providing a single shared cache
// for all containers on a datanode.
@@ -107,7 +111,7 @@ public abstract class AbstractDatanodeStore implements DatanodeStore {
public void start(ConfigurationSource config)
throws IOException {
if (this.store == null) {
- DBOptions options = DEFAULT_PROFILE.getDBOptions();
+ DBOptions options = dbProfile.getDBOptions();
options.setCreateIfMissing(true);
options.setCreateMissingColumnFamilies(true);
@@ -216,6 +220,11 @@ public abstract class AbstractDatanodeStore implements DatanodeStore {
return Collections.unmodifiableMap(OPTIONS_CACHE);
}
+ @VisibleForTesting
+ public DBProfile getDbProfile() {
+ return dbProfile;
+ }
+
private static void checkTableStatus(Table<?, ?> table, String name)
throws IOException {
String logMessage = "Unable to get a reference to %s table. Cannot " +
@@ -228,23 +237,19 @@ public abstract class AbstractDatanodeStore implements DatanodeStore {
}
}
- private static ColumnFamilyOptions buildColumnFamilyOptions(
+ private ColumnFamilyOptions buildColumnFamilyOptions(
ConfigurationSource config) {
long cacheSize = (long) config.getStorageSize(
HDDS_DATANODE_METADATA_ROCKSDB_CACHE_SIZE,
HDDS_DATANODE_METADATA_ROCKSDB_CACHE_SIZE_DEFAULT,
StorageUnit.BYTES);
- // Enables static creation of RocksDB objects.
- RocksDB.loadLibrary();
-
BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
tableConfig.setBlockCache(new LRUCache(cacheSize * SizeUnit.MB))
.setPinL0FilterAndIndexBlocksInCache(true)
.setFilterPolicy(new BloomFilter());
- return DEFAULT_PROFILE
- .getColumnFamilyOptions()
+ return dbProfile.getColumnFamilyOptions()
.setTableFormatConfig(tableConfig);
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index 8989c3e..7d9f3cd 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
+import org.apache.hadoop.hdds.utils.db.DBProfile;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
@@ -38,6 +39,7 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.metadata.AbstractDatanodeStore;
+import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
@@ -67,6 +69,7 @@ import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE;
import static org.apache.ratis.util.Preconditions.assertTrue;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -437,7 +440,6 @@ public class TestKeyValueContainer {
// Create Container 1
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
- Assert.assertEquals(1, cachedOptions.size());
ColumnFamilyOptions options1 = cachedOptions.get(CONF);
Assert.assertNotNull(options1);
@@ -449,11 +451,48 @@ public class TestKeyValueContainer {
keyValueContainer = new KeyValueContainer(keyValueContainerData, CONF);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
- Assert.assertEquals(1, cachedOptions.size());
ColumnFamilyOptions options2 = cachedOptions.get(CONF);
Assert.assertNotNull(options2);
// Column family options object should be reused.
Assert.assertSame(options1, options2);
}
+
+ @Test
+ public void testDBProfileAffectsDBOptions() throws Exception {
+ // Create Container 1
+ keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
+
+ DBProfile outProfile1;
+ try (ReferenceCountedDB db1 =
+ BlockUtils.getDB(keyValueContainer.getContainerData(), CONF)) {
+ DatanodeStore store1 = db1.getStore();
+ Assert.assertTrue(store1 instanceof AbstractDatanodeStore);
+ outProfile1 = ((AbstractDatanodeStore) store1).getDbProfile();
+ }
+
+ // Create Container 2 with different DBProfile in otherConf
+ OzoneConfiguration otherConf = new OzoneConfiguration();
+ // Use a dedicated profile for test
+ otherConf.setEnum(HDDS_DB_PROFILE, DBProfile.SSD);
+
+ keyValueContainerData = new KeyValueContainerData(2L,
+ layout,
+ (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
+ datanodeId.toString());
+ keyValueContainer = new KeyValueContainer(keyValueContainerData, otherConf);
+ keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
+
+ DBProfile outProfile2;
+ try (ReferenceCountedDB db2 =
+ BlockUtils.getDB(keyValueContainer.getContainerData(), otherConf)) {
+ DatanodeStore store2 = db2.getStore();
+ Assert.assertTrue(store2 instanceof AbstractDatanodeStore);
+ outProfile2 = ((AbstractDatanodeStore) store2).getDbProfile();
+ }
+
+ // DBOtions should be different
+ Assert.assertNotEquals(outProfile1.getDBOptions().compactionReadaheadSize(),
+ outProfile2.getDBOptions().compactionReadaheadSize());
+ }
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java
index b9b7ef7..442fd25 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java
@@ -43,7 +43,7 @@ public enum DBProfile {
SSD {
@Override
public String toString() {
- return "DBProfile.SSD";
+ return "SSD";
}
@Override
@@ -90,7 +90,7 @@ public enum DBProfile {
DISK {
@Override
public String toString() {
- return "DBProfile.DISK";
+ return "DISK";
}
@Override
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org