You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ww...@apache.org on 2017/09/20 02:12:05 UTC

hadoop git commit: HDFS-12329. Ozone: Ratis: Readonly calls in XceiverClientRatis should use sendReadOnly. Contributed by Mukul Kumar Singh.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 f01017b5b -> 68f7cc620


HDFS-12329. Ozone: Ratis: Readonly calls in XceiverClientRatis should use sendReadOnly. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68f7cc62
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68f7cc62
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68f7cc62

Branch: refs/heads/HDFS-7240
Commit: 68f7cc6206e42974e2c624ea7fc629968a3195e3
Parents: f01017b
Author: Weiwei Yang <ww...@apache.org>
Authored: Wed Sep 20 10:08:15 2017 +0800
Committer: Weiwei Yang <ww...@apache.org>
Committed: Wed Sep 20 10:08:15 2017 +0800

----------------------------------------------------------------------
 .../apache/hadoop/scm/XceiverClientRatis.java   | 50 +++++++++++++++++---
 .../hadoop/hdfs/server/datanode/DataNode.java   |  9 ++++
 .../apache/hadoop/ozone/MiniOzoneCluster.java   | 20 +++++++-
 3 files changed, 71 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68f7cc62/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientRatis.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientRatis.java
index d6c7937..3551ceb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientRatis.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientRatis.java
@@ -20,14 +20,17 @@ package org.apache.hadoop.scm;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerCommandResponseProto;
+import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
+    .ContainerCommandRequestProto;
+import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
+    .ContainerCommandResponseProto;
 import org.apache.ratis.RatisHelper;
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.apache.ratis.client.RaftClient;
 import org.apache.ratis.protocol.RaftClientReply;
 import org.apache.ratis.rpc.RpcType;
 import org.apache.ratis.rpc.SupportedRpcType;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.apache.ratis.shaded.com.google.protobuf.ShadedProtoUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -94,13 +97,48 @@ public final class XceiverClientRatis extends XceiverClientSpi {
     return Objects.requireNonNull(client.get(), "client is null");
   }
 
+  private boolean isReadOnly(ContainerCommandRequestProto proto) {
+    switch (proto.getCmdType()) {
+    case ReadContainer:
+    case ReadChunk:
+    case ListKey:
+    case GetKey:
+    case GetSmallFile:
+    case ListContainer:
+    case ListChunk:
+      return true;
+    case CloseContainer:
+    case WriteChunk:
+    case UpdateContainer:
+    case CompactChunk:
+    case CreateContainer:
+    case DeleteChunk:
+    case DeleteContainer:
+    case DeleteKey:
+    case PutKey:
+    case PutSmallFile:
+    default:
+      return false;
+    }
+  }
+
+  private RaftClientReply sendRequest(ContainerCommandRequestProto request)
+      throws IOException {
+    boolean isReadOnlyRequest = isReadOnly(request);
+    ByteString byteString =
+        ShadedProtoUtil.asShadedByteString(request.toByteArray());
+    LOG.debug("sendCommand {} {}", isReadOnlyRequest, request);
+    final RaftClientReply reply =  isReadOnlyRequest ?
+        getClient().sendReadOnly(() -> byteString) :
+        getClient().send(() -> byteString);
+    LOG.debug("reply {} {}", isReadOnlyRequest, reply);
+    return reply;
+  }
+
   @Override
   public ContainerCommandResponseProto sendCommand(
       ContainerCommandRequestProto request) throws IOException {
-    LOG.debug("sendCommand {}", request);
-    final RaftClientReply reply = getClient().send(
-        () -> ShadedProtoUtil.asShadedByteString(request.toByteArray()));
-    LOG.debug("reply {}", reply);
+    final RaftClientReply reply = sendRequest(request);
     Preconditions.checkState(reply.isSuccess());
     return ContainerCommandResponseProto.parseFrom(
         ShadedProtoUtil.asByteString(reply.getMessage().getContent()));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68f7cc62/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 9096831..6be1b9f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1601,6 +1601,15 @@ public class DataNode extends ReconfigurableBase
     return this.datanodeStateMachine.getContainer();
   }
 
+  @VisibleForTesting
+  public DatanodeStateMachine.DatanodeStates getOzoneStateMachineState() {
+    if (this.datanodeStateMachine != null) {
+      return this.datanodeStateMachine.getContext().getState();
+    }
+    // if the state machine doesn't exist then DN initialization is in progress
+    return DatanodeStateMachine.DatanodeStates.INIT;
+  }
+
   /**
    * After the block pool has contacted the NN, registers that block pool
    * with the secret manager, updating it with the secrets provided by the NN.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68f7cc62/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index e0b6ccb..3e90a34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -27,6 +27,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.container.common
+    .statemachine.DatanodeStateMachine;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
 import org.apache.hadoop.ozone.ksm.KeySpaceManager;
@@ -168,8 +170,7 @@ public final class MiniOzoneCluster extends MiniDFSCluster
 
     try {
       this.waitActive();
-      this.waitForHeartbeatProcessed();
-      this.waitOzoneReady();
+      waitDatanodeOzoneReady(i);
     } catch (TimeoutException | InterruptedException e) {
       Thread.interrupted();
     }
@@ -264,6 +265,21 @@ public final class MiniOzoneCluster extends MiniDFSCluster
   }
 
   /**
+   * Waits for a particular Datanode to be ready for processing ozone requests.
+   */
+  public void waitDatanodeOzoneReady(int dnIndex)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(() -> {
+      DatanodeStateMachine.DatanodeStates state =
+          dataNodes.get(dnIndex).getDatanode().getOzoneStateMachineState();
+      final boolean rebootComplete =
+          (state == DatanodeStateMachine.DatanodeStates.RUNNING);
+      LOG.info("{} Current state:{}", rebootComplete, state);
+      return rebootComplete;
+    }, 1000, 60 * 1000); //wait for 1 min.
+  }
+
+  /**
    * Waits for SCM to be out of Chill Mode. Many tests can be run iff we are out
    * of Chill mode.
    *


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org