You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ca...@apache.org on 2022/11/04 06:10:16 UTC
[ozone] branch ozone-1.3 updated: HDDS-7362. Bring back the mechanism to share LRU cache between RocksDB in datanodes (#3921)
This is an automated email from the ASF dual-hosted git repository.
captainzmc pushed a commit to branch ozone-1.3
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/ozone-1.3 by this push:
new d84b58c7da HDDS-7362. Bring back the mechanism to share LRU cache between RocksDB in datanodes (#3921)
d84b58c7da is described below
commit d84b58c7daf1fa714e6792fca98730ac8eeb0d9b
Author: Duong Nguyen <du...@gmail.com>
AuthorDate: Thu Nov 3 21:01:31 2022 -0700
HDDS-7362. Bring back the mechanism to share LRU cache between RocksDB in datanodes (#3921)
---
.../common/utils/db/DatanodeDBProfile.java | 16 ++++++++--
.../container/keyvalue/TestKeyValueContainer.java | 37 ++++++++++++++++++++++
.../apache/hadoop/hdds/utils/db/RocksDatabase.java | 8 ++++-
.../apache/hadoop/hdds/utils/db/TableConfig.java | 4 ++-
.../db/managed/ManagedColumnFamilyOptions.java | 13 ++++++++
5 files changed, 74 insertions(+), 4 deletions(-)
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/db/DatanodeDBProfile.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/db/DatanodeDBProfile.java
index e04c7254e8..2f4b888156 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/db/DatanodeDBProfile.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/db/DatanodeDBProfile.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedLRUCache;
+import java.util.concurrent.atomic.AtomicReference;
+
import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_METADATA_ROCKSDB_CACHE_SIZE;
import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_METADATA_ROCKSDB_CACHE_SIZE_DEFAULT;
@@ -104,6 +106,8 @@ public abstract class DatanodeDBProfile {
* Base profile for datanode storage disks.
*/
private static final class StorageBasedProfile {
+ private final AtomicReference<ManagedColumnFamilyOptions> cfOptions =
+ new AtomicReference<>();
private final DBProfile baseProfile;
private StorageBasedProfile(DBProfile profile) {
@@ -116,9 +120,17 @@ public abstract class DatanodeDBProfile {
private ManagedColumnFamilyOptions getColumnFamilyOptions(
ConfigurationSource config) {
- ManagedColumnFamilyOptions cfOptions =
+ cfOptions.updateAndGet(op -> op != null ? op :
+ createColumnFamilyOptions(config));
+ return cfOptions.get();
+ }
+
+ private ManagedColumnFamilyOptions createColumnFamilyOptions(
+ ConfigurationSource config) {
+ ManagedColumnFamilyOptions options =
baseProfile.getColumnFamilyOptions();
- return cfOptions.closeAndSetTableFormatConfig(
+ options.setReused(true);
+ return options.closeAndSetTableFormatConfig(
getBlockBasedTableConfig(config));
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index 439bf540f8..b23600ffd8 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.container.keyvalue;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -57,6 +58,7 @@ import org.junit.rules.TemporaryFolder;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.mockito.Mockito;
+import org.rocksdb.ColumnFamilyOptions;
import java.io.File;
@@ -71,6 +73,7 @@ import java.util.Map;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
@@ -530,6 +533,40 @@ public class TestKeyValueContainer {
}
}
+ @Test
+ public void testContainersShareColumnFamilyOptions() {
+ ConfigurationSource conf = new OzoneConfiguration();
+
+ // Make sure ColumnFamilyOptions are same for a particular db profile
+ for (Supplier<DatanodeDBProfile> dbProfileSupplier : new Supplier[] {
+ DatanodeDBProfile.Disk::new, DatanodeDBProfile.SSD::new }) {
+ // ColumnFamilyOptions should be same across configurations
+ ColumnFamilyOptions columnFamilyOptions1 = dbProfileSupplier.get()
+ .getColumnFamilyOptions(new OzoneConfiguration());
+ ColumnFamilyOptions columnFamilyOptions2 = dbProfileSupplier.get()
+ .getColumnFamilyOptions(new OzoneConfiguration());
+ Assert.assertEquals(columnFamilyOptions1, columnFamilyOptions2);
+
+ // ColumnFamilyOptions should be same when queried multiple times
+ // for a particulat configuration
+ columnFamilyOptions1 = dbProfileSupplier.get()
+ .getColumnFamilyOptions(conf);
+ columnFamilyOptions2 = dbProfileSupplier.get()
+ .getColumnFamilyOptions(conf);
+ Assert.assertEquals(columnFamilyOptions1, columnFamilyOptions2);
+ }
+
+ // Make sure ColumnFamilyOptions are different for different db profile
+ DatanodeDBProfile diskProfile = new DatanodeDBProfile.Disk();
+ DatanodeDBProfile ssdProfile = new DatanodeDBProfile.SSD();
+ Assert.assertNotEquals(
+ diskProfile.getColumnFamilyOptions(new OzoneConfiguration()),
+ ssdProfile.getColumnFamilyOptions(new OzoneConfiguration()));
+ Assert.assertNotEquals(diskProfile.getColumnFamilyOptions(conf),
+ ssdProfile.getColumnFamilyOptions(conf));
+ }
+
+
@Test
public void testDBProfileAffectsDBOptions() throws Exception {
// Create Container 1
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
index 5fb7d51f6d..59952dc8ec 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.utils.db;
import org.apache.hadoop.hdds.utils.HddsServerUtil;
import org.apache.hadoop.hdds.utils.db.managed.ManagedCheckpoint;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedFlushOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedIngestExternalFileOptions;
@@ -152,7 +153,12 @@ public final class RocksDatabase {
}
private static void close(ColumnFamilyDescriptor d) {
- runWithTryCatch(() -> closeDeeply(d.getOptions()), new Object() {
+ ManagedColumnFamilyOptions options =
+ (ManagedColumnFamilyOptions) d.getOptions();
+ if (options.isReused()) {
+ return;
+ }
+ runWithTryCatch(() -> closeDeeply(options), new Object() {
@Override
public String toString() {
return d.getClass() + ":" + bytes2String(d.getName());
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java
index 3a3b5a3c3a..42d494443e 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java
@@ -108,6 +108,8 @@ public class TableConfig implements AutoCloseable {
@Override
public void close() {
- columnFamilyOptions.close();
+ if (!columnFamilyOptions.isReused()) {
+ columnFamilyOptions.close();
+ }
}
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java
index b29b7f1777..577e0364d5 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java
@@ -29,6 +29,12 @@ public class ManagedColumnFamilyOptions extends ColumnFamilyOptions {
super();
}
+ /**
+ * Indicate if this ColumnFamilyOptions is intentionally used across RockDB
+ * instances.
+ */
+ private boolean reused = false;
+
public ManagedColumnFamilyOptions(ColumnFamilyOptions columnFamilyOptions) {
super(columnFamilyOptions);
}
@@ -60,6 +66,13 @@ public class ManagedColumnFamilyOptions extends ColumnFamilyOptions {
return this;
}
+ public void setReused(boolean reused) {
+ this.reused = reused;
+ }
+
+ public boolean isReused() {
+ return reused;
+ }
@Override
protected void finalize() throws Throwable {
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org