You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by bo...@apache.org on 2018/08/02 17:17:36 UTC
[13/50] [abbrv] hadoop git commit: HDDS-302. Fix javadoc and add
implementation details in ContainerStateMachine. Contributed by Shashikant
Banerjee.
HDDS-302. Fix javadoc and add implementation details in ContainerStateMachine. Contributed by Shashikant Banerjee.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/952dc2fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/952dc2fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/952dc2fd
Branch: refs/heads/YARN-7402
Commit: 952dc2fd557f9aaf0f144ee32d0b7731a84bad73
Parents: 3108d27
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Mon Jul 30 18:45:58 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Mon Jul 30 18:45:58 2018 +0530
----------------------------------------------------------------------
.../hadoop/hdds/scm/XceiverClientRatis.java | 30 ++----------------
.../java/org/apache/hadoop/hdds/HddsUtils.java | 33 ++++++++++++++++++++
.../server/ratis/ContainerStateMachine.java | 14 ++++++++-
3 files changed, 49 insertions(+), 28 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/952dc2fd/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 0effa8f..2541415 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hdds.scm;
import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.HddsUtils;
import org.apache.ratis.shaded.com.google.protobuf
.InvalidProtocolBufferException;
import org.apache.hadoop.conf.Configuration;
@@ -183,34 +184,9 @@ public final class XceiverClientRatis extends XceiverClientSpi {
return Objects.requireNonNull(client.get(), "client is null");
}
- private boolean isReadOnly(ContainerCommandRequestProto proto) {
- switch (proto.getCmdType()) {
- case ReadContainer:
- case ReadChunk:
- case ListKey:
- case GetKey:
- case GetSmallFile:
- case ListContainer:
- case ListChunk:
- return true;
- case CloseContainer:
- case WriteChunk:
- case UpdateContainer:
- case CompactChunk:
- case CreateContainer:
- case DeleteChunk:
- case DeleteContainer:
- case DeleteKey:
- case PutKey:
- case PutSmallFile:
- default:
- return false;
- }
- }
-
private RaftClientReply sendRequest(ContainerCommandRequestProto request)
throws IOException {
- boolean isReadOnlyRequest = isReadOnly(request);
+ boolean isReadOnlyRequest = HddsUtils.isReadOnly(request);
ByteString byteString = request.toByteString();
LOG.debug("sendCommand {} {}", isReadOnlyRequest, request);
final RaftClientReply reply = isReadOnlyRequest ?
@@ -222,7 +198,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
private CompletableFuture<RaftClientReply> sendRequestAsync(
ContainerCommandRequestProto request) throws IOException {
- boolean isReadOnlyRequest = isReadOnly(request);
+ boolean isReadOnlyRequest = HddsUtils.isReadOnly(request);
ByteString byteString = request.toByteString();
LOG.debug("sendCommandAsync {} {}", isReadOnlyRequest, request);
return isReadOnlyRequest ? getClient().sendReadOnlyAsync(() -> byteString) :
http://git-wip-us.apache.org/repos/asf/hadoop/blob/952dc2fd/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 48c6dce..33bf90c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -24,6 +24,7 @@ import com.google.common.net.HostAndPort;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.net.NetUtils;
@@ -315,4 +316,36 @@ public final class HddsUtils {
return name;
}
+ /**
+ * Checks if the container command is read only or not.
+ * @param proto ContainerCommand Request proto
+ * @return True if its readOnly , false otherwise.
+ */
+ public static boolean isReadOnly(
+ ContainerProtos.ContainerCommandRequestProto proto) {
+ switch (proto.getCmdType()) {
+ case ReadContainer:
+ case ReadChunk:
+ case ListKey:
+ case GetKey:
+ case GetSmallFile:
+ case ListContainer:
+ case ListChunk:
+ case GetCommittedBlockLength:
+ return true;
+ case CloseContainer:
+ case WriteChunk:
+ case UpdateContainer:
+ case CompactChunk:
+ case CreateContainer:
+ case DeleteChunk:
+ case DeleteContainer:
+ case DeleteKey:
+ case PutKey:
+ case PutSmallFile:
+ default:
+ return false;
+ }
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/952dc2fd/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index ac7aa57..c0dd0ba 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -57,7 +57,7 @@ import java.util.concurrent.ThreadPoolExecutor;
* requests.
*
* Read only requests are classified in
- * {@link org.apache.hadoop.hdds.scm.XceiverClientRatis#isReadOnly}
+ * {@link org.apache.hadoop.hdds.HddsUtils#isReadOnly}
* and these readonly requests are replied from the {@link #query(Message)}.
*
* The write requests can be divided into requests with user data
@@ -84,6 +84,11 @@ import java.util.concurrent.ThreadPoolExecutor;
* 2) Write chunk commit operation is executed after write chunk state machine
* operation. This will ensure that commit operation is sync'd with the state
* machine operation.
+ *
+ * Synchronization between {@link #writeStateMachineData} and
+ * {@link #applyTransaction} need to be enforced in the StateMachine
+ * implementation. For example, synchronization between writeChunk and
+ * createContainer in {@link ContainerStateMachine}.
* */
public class ContainerStateMachine extends BaseStateMachine {
static final Logger LOG = LoggerFactory.getLogger(
@@ -213,6 +218,10 @@ public class ContainerStateMachine extends BaseStateMachine {
return CompletableFuture.completedFuture(() -> ByteString.EMPTY);
}
+ /*
+ * writeStateMachineData calls are not synchronized with each other
+ * and also with applyTransaction.
+ */
@Override
public CompletableFuture<Message> writeStateMachineData(LogEntryProto entry) {
try {
@@ -244,6 +253,9 @@ public class ContainerStateMachine extends BaseStateMachine {
}
}
+ /*
+ * ApplyTransaction calls in Ratis are sequential.
+ */
@Override
public CompletableFuture<Message> applyTransaction(TransactionContext trx) {
try {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org