You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2017/08/23 17:24:20 UTC

hadoop git commit: HDFS-12337. Ozone: Concurrent RocksDB open calls fail because of "No locks available". Contributed by Mukul Kumar Singh.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 b1bbd9a03 -> 076bd5d64


HDFS-12337. Ozone: Concurrent RocksDB open calls fail because of "No locks available". Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/076bd5d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/076bd5d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/076bd5d6

Branch: refs/heads/HDFS-7240
Commit: 076bd5d64a811931343e7e36d809abacd6fd9d8d
Parents: b1bbd9a
Author: Anu Engineer <ae...@apache.org>
Authored: Wed Aug 23 10:21:31 2017 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Wed Aug 23 10:21:31 2017 -0700

----------------------------------------------------------------------
 .../container/common/helpers/KeyUtils.java      | 15 ++----
 .../server/ratis/XceiverServerRatis.java        |  6 ++-
 .../container/common/utils/ContainerCache.java  | 48 ++++++++++----------
 .../apache/hadoop/ozone/MiniOzoneCluster.java   | 25 ++++++++--
 .../hadoop/ozone/web/client/TestKeys.java       |  8 ----
 5 files changed, 54 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/076bd5d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
index bced2e2..d508e91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
@@ -64,18 +64,11 @@ public final class KeyUtils {
     ContainerCache cache = ContainerCache.getInstance(conf);
     Preconditions.checkNotNull(cache);
     try {
-      MetadataStore db = cache.getDB(container.getContainerName());
-      if (db == null) {
-        db = MetadataStoreBuilder.newBuilder()
-            .setDbFile(new File(container.getDBPath()))
-            .setCreateIfMissing(false)
-            .build();
-        cache.putDB(container.getContainerName(), db);
-      }
-      return db;
+      return cache.getDB(container.getContainerName(), container.getDBPath());
     } catch (IOException ex) {
-      String message = "Unable to open DB. DB Name: %s, Path: %s. ex: %s"
-          .format(container.getContainerName(), container.getDBPath(), ex);
+      String message =
+          String.format("Unable to open DB. DB Name: %s, Path: %s. ex: %s",
+          container.getContainerName(), container.getDBPath(), ex.getMessage());
       throw new StorageContainerException(message, UNABLE_TO_READ_METADATA_DB);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/076bd5d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index ba613c8..5bd1211 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -41,7 +41,9 @@ import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.net.ServerSocket;
+import java.net.SocketAddress;
 import java.util.Collections;
 import java.util.Objects;
 
@@ -113,8 +115,10 @@ public final class XceiverServerRatis implements XceiverServerSpi {
     if (ozoneConf.getBoolean(OzoneConfigKeys
             .DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,
         OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT)) {
-      try (ServerSocket socket = new ServerSocket(0)) {
+      try (ServerSocket socket = new ServerSocket()) {
         socket.setReuseAddress(true);
+        SocketAddress address = new InetSocketAddress(0);
+        socket.bind(address);
         localPort = socket.getLocalPort();
         LOG.info("Found a free port for the server : {}", localPort);
         // If we have random local ports configured this means that it

http://git-wip-us.apache.org/repos/asf/hadoop/blob/076bd5d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
index 0dc08fb..e8bee35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
@@ -21,12 +21,14 @@ package org.apache.hadoop.ozone.container.common.utils;
 import com.google.common.base.Preconditions;
 import org.apache.commons.collections.MapIterator;
 import org.apache.commons.collections.map.LRUMap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.utils.MetadataStore;
+import org.apache.hadoop.utils.MetadataStoreBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import java.io.File;
 import java.io.IOException;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
@@ -35,7 +37,8 @@ import java.util.concurrent.locks.ReentrantLock;
  * container cache is a LRUMap that maintains the DB handles.
  */
 public final class ContainerCache extends LRUMap {
-  static final Log LOG = LogFactory.getLog(ContainerCache.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ContainerCache.class);
   private final Lock lock = new ReentrantLock();
   private static ContainerCache cache;
   private static final float LOAD_FACTOR = 0.75f;
@@ -115,17 +118,31 @@ public final class ContainerCache extends LRUMap {
   }
 
   /**
-   * Returns a DB handle if available, null otherwise.
+   * Returns a DB handle if available, create the handler otherwise.
    *
    * @param containerName - Name of the container.
    * @return MetadataStore.
    */
-  public MetadataStore getDB(String containerName) {
+  public MetadataStore getDB(String containerName, String containerDBPath)
+      throws IOException {
     Preconditions.checkNotNull(containerName);
     Preconditions.checkState(!containerName.isEmpty());
     lock.lock();
     try {
-      return (MetadataStore) this.get(containerName);
+      MetadataStore db = (MetadataStore) this.get(containerName);
+
+      if (db == null) {
+        db = MetadataStoreBuilder.newBuilder()
+            .setDbFile(new File(containerDBPath))
+            .setCreateIfMissing(false)
+            .build();
+        this.put(containerName, db);
+      }
+      return db;
+    } catch (Exception e) {
+      LOG.error("Error opening DB. Container:{} ContainerPath:{}",
+          containerName, containerDBPath, e);
+      throw e;
     } finally {
       lock.unlock();
     }
@@ -141,28 +158,11 @@ public final class ContainerCache extends LRUMap {
     Preconditions.checkState(!containerName.isEmpty());
     lock.lock();
     try {
-      MetadataStore db = this.getDB(containerName);
+      MetadataStore db = (MetadataStore)this.get(containerName);
       closeDB(containerName, db);
       this.remove(containerName);
     } finally {
       lock.unlock();
     }
   }
-
-  /**
-   * Add a new DB to the cache.
-   *
-   * @param containerName - Name of the container
-   * @param db            - DB handle
-   */
-  public void putDB(String containerName, MetadataStore db) {
-    Preconditions.checkNotNull(containerName);
-    Preconditions.checkState(!containerName.isEmpty());
-    lock.lock();
-    try {
-      this.put(containerName, db);
-    } finally {
-      lock.unlock();
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/076bd5d6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index e91c523..e0b6ccb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
 import org.apache.hadoop.ozone.ksm.KeySpaceManager;
 import org.apache.hadoop.ozone.web.client.OzoneRestClient;
@@ -57,6 +58,11 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys
     .DFS_CONTAINER_IPC_PORT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys
     .DFS_CONTAINER_IPC_RANDOM_PORT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .DFS_CONTAINER_RATIS_IPC_PORT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .DFS_CONTAINER_RATIS_IPC_RANDOM_PORT;
+
 import static org.apache.hadoop.ozone.protocol.proto.OzoneProtos.NodeState
     .HEALTHY;
 import static org.junit.Assert.assertFalse;
@@ -148,14 +154,25 @@ public final class MiniOzoneCluster extends MiniDFSCluster
   public boolean restartDataNode(int i, boolean keepPort) throws IOException {
     if (keepPort) {
       DataNodeProperties dnProp = dataNodes.get(i);
-      int currentPort = dnProp.getDatanode().getOzoneContainerManager()
-          .getContainerServerPort();
+      OzoneContainer container =
+          dnProp.getDatanode().getOzoneContainerManager();
       Configuration config = dnProp.getConf();
+      int currentPort = container.getContainerServerPort();
       config.setInt(DFS_CONTAINER_IPC_PORT, currentPort);
       config.setBoolean(DFS_CONTAINER_IPC_RANDOM_PORT, false);
+      int ratisPort = container.getRatisContainerServerPort();
+      config.setInt(DFS_CONTAINER_RATIS_IPC_PORT, ratisPort);
+      config.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, false);
+    }
+    boolean status =  super.restartDataNode(i, keepPort);
+
+    try {
+      this.waitActive();
+      this.waitForHeartbeatProcessed();
+      this.waitOzoneReady();
+    } catch (TimeoutException | InterruptedException e) {
+      Thread.interrupted();
     }
-    boolean status =  super.restartDataNode(i, true);
-    this.waitActive();
     return status;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/076bd5d6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
index af871e5..5e07fa5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
@@ -71,11 +71,6 @@ public class TestKeys {
 
   /**
    * Create a MiniDFSCluster for testing.
-   *
-   * Ozone is made active by setting OZONE_ENABLED = true and
-   * OZONE_HANDLER_TYPE_KEY = "local" , which uses a local
-   * directory to emulate Ozone backend.
-   *
    * @throws IOException
    */
   @BeforeClass
@@ -83,9 +78,6 @@ public class TestKeys {
     OzoneConfiguration conf = new OzoneConfiguration();
 
     path = GenericTestUtils.getTempPath(TestKeys.class.getSimpleName());
-    path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
-                            OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
-    conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
     Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
 
     ozoneCluster = new MiniOzoneCluster.Builder(conf)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org