You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2019/03/05 19:36:07 UTC

[hadoop] branch trunk updated: HDDS-1171. Add benchmark for OM and OM client in Genesis. Contributed by Lokesh Jain.

This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 7fd8901  HDDS-1171. Add benchmark for OM and OM client in Genesis. Contributed by Lokesh Jain.
7fd8901 is described below

commit 7fd890116ab941d51b28cf84b01330b6e33c9b18
Author: Anu Engineer <ae...@apache.org>
AuthorDate: Tue Mar 5 11:31:09 2019 -0800

    HDDS-1171. Add benchmark for OM and OM client in Genesis.
    Contributed by Lokesh Jain.
---
 .../hadoop/ozone/genesis/BenchMarkOMClient.java    | 153 +++++++++++++++++
 .../ozone/genesis/BenchMarkOzoneManager.java       | 183 +++++++++++++++++++++
 ...enchMarkBlockManager.java => BenchMarkSCM.java} | 108 ++++--------
 .../org/apache/hadoop/ozone/genesis/Genesis.java   |  25 +--
 .../apache/hadoop/ozone/genesis/GenesisUtil.java   |  96 +++++++++++
 5 files changed, 475 insertions(+), 90 deletions(-)

diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOMClient.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOMClient.java
new file mode 100644
index 0000000..cfc1e1e
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOMClient.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.genesis;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.*;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.ratis.protocol.ClientId;
+import org.openjdk.jmh.annotations.*;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.*;
+import java.util.concurrent.locks.ReentrantLock;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
+
+/**
+ * Benchmarks OM Client.
+ */
+@State(Scope.Thread)
+public class BenchMarkOMClient {
+
+  private static String testDir;
+  private static ReentrantLock lock = new ReentrantLock();
+  private static String volumeName = UUID.randomUUID().toString();
+  private static String bucketName = UUID.randomUUID().toString();
+  private static List<String> keyNames = new ArrayList<>();
+  private static List<Long> clientIDs = new ArrayList<>();
+  private static OzoneManagerProtocolClientSideTranslatorPB ozoneManagerClient;
+  private static volatile boolean bool = false;
+
+  @Setup(Level.Trial)
+  public static void initialize() throws IOException {
+    try {
+      lock.lock();
+      if (!bool) {
+        bool = true;
+        OzoneConfiguration conf = new OzoneConfiguration();
+        conf.setBoolean(OZONE_ENABLED, true);
+        testDir = GenesisUtil.getTempPath()
+            .resolve(RandomStringUtils.randomNumeric(7)).toString();
+        conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir);
+
+        // set the ip address and port number for the OM service
+        conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "OMADDR:PORT");
+        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+        long omVersion = RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
+        InetSocketAddress omAddress = OmUtils.getOmAddressForClients(conf);
+        RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class,
+            ProtobufRpcEngine.class);
+        ozoneManagerClient = new OzoneManagerProtocolClientSideTranslatorPB(
+            RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, omAddress,
+                ugi, conf, NetUtils.getDefaultSocketFactory(conf),
+                Client.getRpcTimeout(conf)), ClientId.randomId().toString());
+
+        // prepare OM
+        ozoneManagerClient.createVolume(
+            new OmVolumeArgs.Builder().setVolume(volumeName)
+                .setAdminName(UserGroupInformation.getLoginUser().getUserName())
+                .setOwnerName(UserGroupInformation.getLoginUser().getUserName())
+                .build());
+        ozoneManagerClient.createBucket(
+            new OmBucketInfo.Builder().setBucketName(bucketName)
+                .setVolumeName(volumeName).build());
+        createKeys(10);
+      }
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  private static void createKeys(int numKeys) throws IOException {
+    for (int i = 0; i < numKeys; i++) {
+      String key = UUID.randomUUID().toString();
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(volumeName)
+          .setBucketName(bucketName)
+          .setKeyName(key)
+          .setDataSize(0)
+          .setFactor(HddsProtos.ReplicationFactor.ONE)
+          .setType(HddsProtos.ReplicationType.RATIS)
+          .build();
+      OpenKeySession keySession = ozoneManagerClient.openKey(omKeyArgs);
+      long clientID = keySession.getId();
+      keyNames.add(key);
+      clientIDs.add(clientID);
+    }
+  }
+
+  @TearDown(Level.Trial)
+  public static void tearDown() throws IOException {
+    try {
+      lock.lock();
+      if (ozoneManagerClient != null) {
+        ozoneManagerClient.close();
+        ozoneManagerClient = null;
+        FileUtil.fullyDelete(new File(testDir));
+      }
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  @Threads(6)
+  @Benchmark
+  public void allocateBlockBenchMark(BenchMarkOMClient state,
+      Blackhole bh) throws IOException {
+    int index = (int) (Math.random() * keyNames.size());
+    String key = keyNames.get(index);
+    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(key)
+        .setDataSize(50)
+        .setFactor(HddsProtos.ReplicationFactor.ONE)
+        .setType(HddsProtos.ReplicationType.RATIS)
+        .build();
+    state.ozoneManagerClient
+        .allocateBlock(omKeyArgs, clientIDs.get(index), new ExcludeList());
+  }
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOzoneManager.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOzoneManager.java
new file mode 100644
index 0000000..12932f7
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOzoneManager.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.genesis;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.*;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.openjdk.jmh.annotations.*;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.locks.ReentrantLock;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
+
+/**
+ * Benchmarks OzoneManager.
+ */
+@State(Scope.Thread)
+public class BenchMarkOzoneManager {
+
+  private static String testDir;
+  private static OzoneManager om;
+  private static StorageContainerManager scm;
+  private static ReentrantLock lock = new ReentrantLock();
+  private static String volumeName = UUID.randomUUID().toString();
+  private static String bucketName = UUID.randomUUID().toString();
+  private static List<String> keyNames = new ArrayList<>();
+  private static List<Long> clientIDs = new ArrayList<>();
+
+  private static int numPipelines = 1;
+  private static int numContainersPerPipeline = 3;
+
+  @Setup(Level.Trial)
+  public static void initialize()
+      throws IOException, AuthenticationException, InterruptedException {
+    try {
+      lock.lock();
+      if (scm == null) {
+        OzoneConfiguration conf = new OzoneConfiguration();
+        conf.setBoolean(OZONE_ENABLED, true);
+        testDir = GenesisUtil.getTempPath()
+            .resolve(RandomStringUtils.randomNumeric(7)).toString();
+        conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir);
+
+        GenesisUtil.configureSCM(conf, 10);
+        GenesisUtil.configureOM(conf, 20);
+        conf.setInt(OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
+            numContainersPerPipeline);
+        GenesisUtil.addPipelines(ReplicationFactor.THREE, numPipelines, conf);
+
+        scm = GenesisUtil.getScm(conf, new SCMConfigurator());
+        scm.start();
+        om = GenesisUtil.getOm(conf);
+        om.start();
+
+        // prepare SCM
+        PipelineManager pipelineManager = scm.getPipelineManager();
+        for (Pipeline pipeline : pipelineManager
+            .getPipelines(ReplicationType.RATIS, ReplicationFactor.THREE)) {
+          pipelineManager.openPipeline(pipeline.getId());
+        }
+        scm.getEventQueue().fireEvent(SCMEvents.CHILL_MODE_STATUS, false);
+        Thread.sleep(1000);
+
+        // prepare OM
+        om.createVolume(new OmVolumeArgs.Builder().setVolume(volumeName)
+            .setAdminName(UserGroupInformation.getLoginUser().getUserName())
+            .setOwnerName(UserGroupInformation.getLoginUser().getUserName())
+            .build());
+        om.createBucket(new OmBucketInfo.Builder().setBucketName(bucketName)
+            .setVolumeName(volumeName).build());
+        createKeys(100000);
+      }
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  private static void createKeys(int numKeys) throws IOException {
+    for (int i = 0; i < numKeys; i++) {
+      String key = UUID.randomUUID().toString();
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(volumeName)
+          .setBucketName(bucketName)
+          .setKeyName(key)
+          .setDataSize(0)
+          .setFactor(HddsProtos.ReplicationFactor.THREE)
+          .setType(HddsProtos.ReplicationType.RATIS)
+          .build();
+      OpenKeySession keySession = om.getKeyManager().openKey(omKeyArgs);
+      long clientID = keySession.getId();
+      keyNames.add(key);
+      clientIDs.add(clientID);
+    }
+  }
+
+  @TearDown(Level.Trial)
+  public static void tearDown() {
+    try {
+      lock.lock();
+      if (scm != null) {
+        scm.stop();
+        scm.join();
+        scm = null;
+        om.stop();
+        om.join();
+        om = null;
+        FileUtil.fullyDelete(new File(testDir));
+      }
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  @Threads(4)
+  @Benchmark
+  public void allocateBlockBenchMark(BenchMarkOzoneManager state,
+      Blackhole bh) throws IOException {
+    int index = (int) (Math.random() * keyNames.size());
+    String key = keyNames.get(index);
+    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(key)
+        .setDataSize(50)
+        .setFactor(HddsProtos.ReplicationFactor.THREE)
+        .setType(HddsProtos.ReplicationType.RATIS)
+        .build();
+    state.om.allocateBlock(omKeyArgs, clientIDs.get(index), new ExcludeList());
+  }
+
+  @Threads(4)
+  @Benchmark
+  public void createAndCommitKeyBenchMark(BenchMarkOzoneManager state,
+      Blackhole bh) throws IOException {
+    String key = UUID.randomUUID().toString();
+    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(key)
+        .setDataSize(50)
+        .setFactor(HddsProtos.ReplicationFactor.THREE)
+        .setType(HddsProtos.ReplicationType.RATIS)
+        .build();
+    OpenKeySession openKeySession = state.om.openKey(omKeyArgs);
+    state.om.allocateBlock(omKeyArgs, openKeySession.getId(),
+        new ExcludeList());
+  }
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkBlockManager.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkSCM.java
similarity index 52%
rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkBlockManager.java
rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkSCM.java
index edfa397..7750d09 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkBlockManager.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkSCM.java
@@ -18,77 +18,45 @@
 
 package org.apache.hadoop.ozone.genesis;
 
+import java.io.File;
+import java.io.IOException;
+import java.util.concurrent.locks.ReentrantLock;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.block.BlockManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
-import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.Storage;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.utils.MetadataStore;
-import org.apache.hadoop.utils.MetadataStoreBuilder;
 import org.openjdk.jmh.annotations.*;
 import org.openjdk.jmh.infra.Blackhole;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.UUID;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.concurrent.locks.ReentrantLock;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConsts.SCM_PIPELINE_DB;
 
 /**
  * Benchmarks BlockManager class.
  */
 @State(Scope.Thread)
-public class BenchMarkBlockManager {
+public class BenchMarkSCM {
 
   private static String testDir;
   private static StorageContainerManager scm;
-  private static PipelineManager pipelineManager;
   private static BlockManager blockManager;
   private static ReentrantLock lock = new ReentrantLock();
 
-  @Param({"1", "10", "100", "1000", "10000", "100000"})
+  @Param({ "1", "10", "100", "1000", "10000", "100000" })
   private static int numPipelines;
-  @Param({"3", "10", "100"})
+  @Param({ "3", "10", "100" })
   private static int numContainersPerPipeline;
 
-  private static StorageContainerManager getScm(OzoneConfiguration conf,
-      SCMConfigurator configurator) throws IOException,
-      AuthenticationException {
-    conf.setBoolean(OZONE_ENABLED, true);
-    SCMStorageConfig scmStore = new SCMStorageConfig(conf);
-    if(scmStore.getState() != Storage.StorageState.INITIALIZED) {
-      String clusterId = UUID.randomUUID().toString();
-      String scmId = UUID.randomUUID().toString();
-      scmStore.setClusterId(clusterId);
-      scmStore.setScmId(scmId);
-      // writes the version file properties
-      scmStore.initialize();
-    }
-    return new StorageContainerManager(conf, configurator);
-  }
-
   @Setup(Level.Trial)
   public static void initialize()
       throws IOException, AuthenticationException, InterruptedException {
@@ -96,29 +64,26 @@ public class BenchMarkBlockManager {
       lock.lock();
       if (scm == null) {
         OzoneConfiguration conf = new OzoneConfiguration();
+        conf.setBoolean(OZONE_ENABLED, true);
         testDir = GenesisUtil.getTempPath()
             .resolve(RandomStringUtils.randomNumeric(7)).toString();
         conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir);
+
+        GenesisUtil.configureSCM(conf, 10);
         conf.setInt(OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
             numContainersPerPipeline);
-        final File metaDir = ServerUtils.getScmDbDir(conf);
-        final File pipelineDBPath = new File(metaDir, SCM_PIPELINE_DB);
-        int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
-            OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
-        MetadataStore pipelineStore =
-            MetadataStoreBuilder.newBuilder().setCreateIfMissing(true)
-                .setConf(conf).setDbFile(pipelineDBPath)
-                .setCacheSize(cacheSize * OzoneConsts.MB).build();
-        addPipelines(ReplicationFactor.THREE,
-            pipelineStore);
-        pipelineStore.close();
-        scm = getScm(conf, new SCMConfigurator());
-        pipelineManager = scm.getPipelineManager();
+        GenesisUtil.addPipelines(ReplicationFactor.THREE, numPipelines, conf);
+
+        scm = GenesisUtil.getScm(conf, new SCMConfigurator());
+        scm.start();
+        blockManager = scm.getScmBlockManager();
+
+        // prepare SCM
+        PipelineManager pipelineManager = scm.getPipelineManager();
         for (Pipeline pipeline : pipelineManager
             .getPipelines(ReplicationType.RATIS, ReplicationFactor.THREE)) {
           pipelineManager.openPipeline(pipeline.getId());
         }
-        blockManager = scm.getScmBlockManager();
         scm.getEventQueue().fireEvent(SCMEvents.CHILL_MODE_STATUS, false);
         Thread.sleep(1000);
       }
@@ -127,39 +92,24 @@ public class BenchMarkBlockManager {
     }
   }
 
-  @Setup(Level.Trial)
+  @TearDown(Level.Trial)
   public static void tearDown() {
-    if (scm != null) {
-      scm.stop();
-      scm.join();
-      FileUtil.fullyDelete(new File(testDir));
-    }
-  }
-
-  private static void addPipelines(ReplicationFactor factor,
-      MetadataStore pipelineStore) throws IOException {
-    List<DatanodeDetails> nodes = new ArrayList<>();
-    for (int i = 0; i < factor.getNumber(); i++) {
-      nodes
-          .add(GenesisUtil.createDatanodeDetails(UUID.randomUUID().toString()));
-    }
-    for (int i = 0; i < numPipelines; i++) {
-      Pipeline pipeline =
-          Pipeline.newBuilder()
-              .setState(Pipeline.PipelineState.OPEN)
-              .setId(PipelineID.randomId())
-              .setType(ReplicationType.RATIS)
-              .setFactor(factor)
-              .setNodes(nodes)
-              .build();
-      pipelineStore.put(pipeline.getId().getProtobuf().toByteArray(),
-          pipeline.getProtobufMessage().toByteArray());
+    try {
+      lock.lock();
+      if (scm != null) {
+        scm.stop();
+        scm.join();
+        scm = null;
+        FileUtil.fullyDelete(new File(testDir));
+      }
+    } finally {
+      lock.unlock();
     }
   }
 
   @Threads(4)
   @Benchmark
-  public void allocateBlockBenchMark(BenchMarkBlockManager state,
+  public void allocateBlockBenchMark(BenchMarkSCM state,
       Blackhole bh) throws IOException {
     state.blockManager
         .allocateBlock(50, ReplicationType.RATIS, ReplicationFactor.THREE,
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java
index 74de2d9..2de9b0f 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java
@@ -39,20 +39,16 @@ import picocli.CommandLine.Command;
     mixinStandardHelpOptions = true)
 public final class Genesis {
 
-  // For adding benchmark to Genesis add the benchmark name in the default value
-  // and description for this option.
-  @Option(names = "-benchmark", required = true, split = ",",
-      defaultValue = "BenchMarkContainerStateMap,BenchMarkOMKeyAllocation,"
-          + "BenchMarkBlockManager,BenchMarkMetadataStoreReads,"
-          + "BenchMarkMetadataStoreWrites,BenchMarkDatanodeDispatcher"
-          + "BenchMarkRocksDbStore",
-      description =
+  // After adding benchmark in genesis package add the benchmark name in the
+  // description for this option.
+  @Option(names = "-benchmark", split = ",", description =
       "Option used for specifying benchmarks to run.\n"
           + "Ex. ozone genesis -benchmark BenchMarkContainerStateMap,"
           + "BenchMarkOMKeyAllocation.\n"
           + "Possible benchmarks which can be used are "
           + "{BenchMarkContainerStateMap, BenchMarkOMKeyAllocation, "
-          + "BenchMarkBlockManager, BenchMarkMetadataStoreReads, "
+          + "BenchMarkOzoneManager, BenchMarkOMClient, "
+          + "BenchMarkSCM, BenchMarkMetadataStoreReads, "
           + "BenchMarkMetadataStoreWrites, BenchMarkDatanodeDispatcher, "
           + "BenchMarkRocksDbStore}")
   private static String[] benchmarks;
@@ -74,8 +70,15 @@ public final class Genesis {
     }
 
     OptionsBuilder optionsBuilder = new OptionsBuilder();
-    for (String benchmark : benchmarks) {
-      optionsBuilder.include(benchmark);
+    if (benchmarks != null) {
+      // The OptionsBuilder#include takes a regular expression as argument.
+      // Therefore it is important to keep the benchmark names unique for
+      // running a benchmark. For example if there are two benchmarks -
+      // BenchMarkOM and BenchMarkOMClient and we include BenchMarkOM then
+      // both the benchmarks will be run.
+      for (String benchmark : benchmarks) {
+        optionsBuilder.include(benchmark);
+      }
     }
     optionsBuilder.warmupIterations(2)
         .measurementIterations(20)
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java
index 7936d0c..6bff82b 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java
@@ -20,14 +20,37 @@ package org.apache.hadoop.ozone.genesis;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
+import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.server.ServerUtils;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.common.Storage;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMStorage;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.utils.MetadataStore;
 import org.apache.hadoop.utils.MetadataStoreBuilder;
 
+import java.io.File;
 import java.io.IOException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Random;
+import java.util.UUID;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
+import static org.apache.hadoop.ozone.OzoneConsts.SCM_PIPELINE_DB;
 
 /**
  * Utility class for benchmark test cases.
@@ -97,4 +120,77 @@ public final class GenesisUtil {
         .addPort(restPort);
     return builder.build();
   }
+
+  static StorageContainerManager getScm(OzoneConfiguration conf,
+      SCMConfigurator configurator) throws IOException,
+      AuthenticationException {
+    SCMStorageConfig scmStore = new SCMStorageConfig(conf);
+    if(scmStore.getState() != Storage.StorageState.INITIALIZED) {
+      String clusterId = UUID.randomUUID().toString();
+      String scmId = UUID.randomUUID().toString();
+      scmStore.setClusterId(clusterId);
+      scmStore.setScmId(scmId);
+      // writes the version file properties
+      scmStore.initialize();
+    }
+    return new StorageContainerManager(conf, configurator);
+  }
+
+  static void configureSCM(Configuration conf, int numHandlers) {
+    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
+    conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
+    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
+    conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
+    conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numHandlers);
+  }
+
+  static void addPipelines(HddsProtos.ReplicationFactor factor,
+      int numPipelines, Configuration conf) throws IOException {
+    final File metaDir = ServerUtils.getScmDbDir(conf);
+    final File pipelineDBPath = new File(metaDir, SCM_PIPELINE_DB);
+    int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
+        OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
+    MetadataStore pipelineStore =
+        MetadataStoreBuilder.newBuilder().setCreateIfMissing(true)
+            .setConf(conf).setDbFile(pipelineDBPath)
+            .setCacheSize(cacheSize * OzoneConsts.MB).build();
+
+    List<DatanodeDetails> nodes = new ArrayList<>();
+    for (int i = 0; i < factor.getNumber(); i++) {
+      nodes
+          .add(GenesisUtil.createDatanodeDetails(UUID.randomUUID().toString()));
+    }
+    for (int i = 0; i < numPipelines; i++) {
+      Pipeline pipeline =
+          Pipeline.newBuilder()
+              .setState(Pipeline.PipelineState.OPEN)
+              .setId(PipelineID.randomId())
+              .setType(HddsProtos.ReplicationType.RATIS)
+              .setFactor(factor)
+              .setNodes(nodes)
+              .build();
+      pipelineStore.put(pipeline.getId().getProtobuf().toByteArray(),
+          pipeline.getProtobufMessage().toByteArray());
+    }
+
+    pipelineStore.close();
+  }
+
+  static OzoneManager getOm(OzoneConfiguration conf)
+      throws IOException, AuthenticationException {
+    OMStorage omStorage = new OMStorage(conf);
+    SCMStorageConfig scmStore = new SCMStorageConfig(conf);
+    if (omStorage.getState() != Storage.StorageState.INITIALIZED) {
+      omStorage.setClusterId(scmStore.getClusterID());
+      omStorage.setScmId(scmStore.getScmId());
+      omStorage.setOmId(UUID.randomUUID().toString());
+      omStorage.initialize();
+    }
+    return OzoneManager.createOm(null, conf);
+  }
+
+  static void configureOM(Configuration conf, int numHandlers) {
+    conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
+    conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numHandlers);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org