You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ms...@apache.org on 2019/10/20 06:42:49 UTC

[hadoop-ozone] branch master updated: HDDS-2283. Container creation on datanodes take time because of Rocksdb option creation. Contributed by Siddharth Wagle.(#41)

This is an automated email from the ASF dual-hosted git repository.

msingh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 18c4cab  HDDS-2283. Container creation on datanodes take time because of Rocksdb option creation. Contributed by  Siddharth Wagle.(#41)
18c4cab is described below

commit 18c4cabdaa64a059dd22a4c6690ad9e933e758c1
Author: Siddharth <sw...@hortonworks.com>
AuthorDate: Sat Oct 19 23:42:44 2019 -0700

    HDDS-2283. Container creation on datanodes take time because of Rocksdb option creation. Contributed by  Siddharth Wagle.(#41)
---
 .../hadoop/hdds/utils/MetadataStoreBuilder.java    | 59 +++++++++++++---------
 .../org/apache/hadoop/hdds/utils/RocksDBStore.java | 11 ++--
 .../container/keyvalue/TestKeyValueContainer.java  | 38 ++++++++++----
 3 files changed, 66 insertions(+), 42 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java
index 85bb6aa..4404115 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java
@@ -20,7 +20,9 @@ package org.apache.hadoop.hdds.utils;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.Map;
 import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -32,6 +34,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF;
+
 import org.iq80.leveldb.Options;
 import org.rocksdb.BlockBasedTableConfig;
 import org.rocksdb.Statistics;
@@ -45,13 +48,18 @@ import org.slf4j.LoggerFactory;
 public class MetadataStoreBuilder {
 
   @VisibleForTesting
-  static final Logger LOG =
-      LoggerFactory.getLogger(MetadataStoreBuilder.class);
+  static final Logger LOG = LoggerFactory.getLogger(MetadataStoreBuilder.class);
   private File dbFile;
   private long cacheSize;
   private boolean createIfMissing = true;
   private Optional<Configuration> optionalConf = Optional.empty();
   private String dbType;
+  @VisibleForTesting
+  public static final Map<Configuration, org.rocksdb.Options> CACHED_OPTS =
+      new ConcurrentHashMap<>();
+  @VisibleForTesting
+  public static final OzoneConfiguration DEFAULT_CONF =
+      new OzoneConfiguration();
 
   public static MetadataStoreBuilder newBuilder() {
     return new MetadataStoreBuilder();
@@ -87,7 +95,6 @@ public class MetadataStoreBuilder {
     return this;
   }
 
-
   public MetadataStore build() throws IOException {
     if (dbFile == null) {
       throw new IllegalArgumentException("Failed to build metadata store, "
@@ -95,10 +102,9 @@ public class MetadataStoreBuilder {
     }
 
     // Build db store based on configuration
-    final Configuration conf = optionalConf.orElseGet(
-        () -> new OzoneConfiguration());
+    final Configuration conf = optionalConf.orElse(DEFAULT_CONF);
 
-    if(dbType == null) {
+    if (dbType == null) {
       LOG.debug("dbType is null, using ");
       dbType = conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
               OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT);
@@ -115,24 +121,29 @@ public class MetadataStoreBuilder {
       }
       return new LevelDBStore(dbFile, options);
     } else if (OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(dbType)) {
-      org.rocksdb.Options opts = new org.rocksdb.Options();
-      opts.setCreateIfMissing(createIfMissing);
-
-      if (cacheSize > 0) {
-        BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
-        tableConfig.setBlockCacheSize(cacheSize);
-        opts.setTableFormatConfig(tableConfig);
-      }
-
-      String rocksDbStat = conf.getTrimmed(
-          OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
-          OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);
-
-      if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
-        Statistics statistics = new Statistics();
-        statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));
-        opts = opts.setStatistics(statistics);
-
+      org.rocksdb.Options opts;
+      // Used cached options if config object passed down is the same
+      if (CACHED_OPTS.containsKey(conf)) {
+        opts = CACHED_OPTS.get(conf);
+      } else {
+        opts = new org.rocksdb.Options();
+        opts.setCreateIfMissing(createIfMissing);
+        if (cacheSize > 0) {
+          BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
+          tableConfig.setBlockCacheSize(cacheSize);
+          opts.setTableFormatConfig(tableConfig);
+        }
+
+        String rocksDbStat = conf.getTrimmed(
+            OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
+            OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);
+
+        if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
+          Statistics statistics = new Statistics();
+          statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));
+          opts = opts.setStatistics(statistics);
+        }
+        CACHED_OPTS.put(conf, opts);
       }
       return new RocksDBStore(dbFile, opts);
     }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStore.java
index 7dd1bde..2b77363 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStore.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStore.java
@@ -50,8 +50,7 @@ import java.util.Map;
  */
 public class RocksDBStore implements MetadataStore {
 
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RocksDBStore.class);
+  private static final Logger LOG = LoggerFactory.getLogger(RocksDBStore.class);
 
   private RocksDB db = null;
   private File dbLocation;
@@ -59,24 +58,20 @@ public class RocksDBStore implements MetadataStore {
   private Options dbOptions;
   private ObjectName statMBeanName;
 
-  public RocksDBStore(File dbFile, Options options)
-      throws IOException {
+  public RocksDBStore(File dbFile, Options options) throws IOException {
     Preconditions.checkNotNull(dbFile, "DB file location cannot be null");
     RocksDB.loadLibrary();
     dbOptions = options;
     dbLocation = dbFile;
     writeOptions = new WriteOptions();
     try {
-
       db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath());
       if (dbOptions.statistics() != null) {
-
         Map<String, String> jmxProperties = new HashMap<String, String>();
         jmxProperties.put("dbName", dbFile.getName());
         statMBeanName = HddsUtils.registerWithJmxProperties(
             "Ozone", "RocksDbStore", jmxProperties,
-            RocksDBStoreMBean.create(dbOptions.statistics(),
-                dbFile.getName()));
+            RocksDBStoreMBean.create(dbOptions.statistics(), dbFile.getName()));
         if (statMBeanName == null) {
           LOG.warn("jmx registration failed during RocksDB init, db path :{}",
               dbFile.getAbsolutePath());
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index 81d3065..ac369aa 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
+import org.apache.hadoop.hdds.utils.MetadataStoreBuilder;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
@@ -45,6 +46,7 @@ import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 
 import org.mockito.Mockito;
+import org.rocksdb.Options;
 
 import java.io.File;
 
@@ -74,7 +76,6 @@ public class TestKeyValueContainer {
   @Rule
   public TemporaryFolder folder = new TemporaryFolder();
 
-
   private OzoneConfiguration conf;
   private String scmId = UUID.randomUUID().toString();
   private VolumeSet volumeSet;
@@ -100,9 +101,7 @@ public class TestKeyValueContainer {
         (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
         datanodeId.toString());
 
-    keyValueContainer = new KeyValueContainer(
-        keyValueContainerData, conf);
-
+    keyValueContainer = new KeyValueContainer(keyValueContainerData, conf);
   }
 
   @Test
@@ -159,17 +158,15 @@ public class TestKeyValueContainer {
     // Create Container.
     keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
 
-    keyValueContainerData = keyValueContainer
-        .getContainerData();
+    keyValueContainerData = keyValueContainer.getContainerData();
 
-    String containerMetaDataPath = keyValueContainerData
-        .getMetadataPath();
+    String containerMetaDataPath = keyValueContainerData.getMetadataPath();
     String chunksPath = keyValueContainerData.getChunksPath();
 
     // Check whether containerMetaDataPath and chunksPath exists or not.
     assertTrue(containerMetaDataPath != null);
     assertTrue(chunksPath != null);
-    //Check whether container file and container db file exists or not.
+    // Check whether container file and container db file exists or not.
     assertTrue(keyValueContainer.getContainerFile().exists(),
         ".Container File does not exist");
     assertTrue(keyValueContainer.getContainerDBFile().exists(), "Container " +
@@ -313,7 +310,6 @@ public class TestKeyValueContainer {
         keyValueContainer.getContainerDBFile().exists());
   }
 
-
   @Test
   public void testCloseContainer() throws Exception {
     keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
@@ -390,5 +386,27 @@ public class TestKeyValueContainer {
     }
   }
 
+  @Test
+  public void testRocksDBCreateUsesCachedOptions() throws Exception {
+    // Create Container 1
+    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
+    Assert.assertTrue("Rocks DB options should be cached.",
+        MetadataStoreBuilder.CACHED_OPTS.containsKey(conf));
+
+    Options opts = MetadataStoreBuilder.CACHED_OPTS.get(conf);
+
+    // Create Container 2
+    keyValueContainerData = new KeyValueContainerData(2L,
+        (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
+        datanodeId.toString());
 
+    keyValueContainer = new KeyValueContainer(keyValueContainerData, conf);
+    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
+
+    Assert.assertEquals(1, MetadataStoreBuilder.CACHED_OPTS.size());
+    Options cachedOpts = MetadataStoreBuilder.CACHED_OPTS.get(conf);
+    Assert.assertEquals("Cache object should not be updated.",
+        opts, cachedOpts);
+
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: hdfs-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: hdfs-commits-help@hadoop.apache.org