You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ms...@apache.org on 2020/02/23 17:05:01 UTC

[hadoop-ozone] branch HDDS-2886 created (now 2b8e449)

This is an automated email from the ASF dual-hosted git repository.

msingh pushed a change to branch HDDS-2886
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git.


      at 2b8e449  HDDS-2886. parse and dump datanode segment file to pritable text

This branch includes the following new commits:

     new 2b8e449  HDDS-2886. parse and dump datanode segment file to pritable text

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org


[hadoop-ozone] 01/01: HDDS-2886. parse and dump datanode segment file to pritable text

Posted by ms...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

msingh pushed a commit to branch HDDS-2886
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git

commit 2b8e449678a5574311c2c5a802403c706e27e65e
Author: Mukul Kumar Singh <ms...@apache.org>
AuthorDate: Sun Feb 23 22:31:14 2020 +0530

    HDDS-2886. parse and dump datanode segment file to pritable text
---
 .../java/org/apache/hadoop/hdds/HddsUtils.java     | 29 ----------
 .../server/ratis/ContainerStateMachine.java        | 40 +++++++++-----
 hadoop-hdds/tools/pom.xml                          |  9 +++
 .../hdds/datanode/cli/ParseDnRatisLogSegment.java  | 64 ++++++++++++++++++++++
 hadoop-ozone/dist/src/shell/ozone/ozone            |  5 ++
 5 files changed, 103 insertions(+), 44 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index dcf0be3..d7bfe57 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -491,35 +491,6 @@ public final class HddsUtils {
         "Path should be a descendant of %s", ancestor);
   }
 
-  public static String writeChunkToString(WriteChunkRequestProto wc,
-                                          long contId, String location) {
-    Preconditions.checkNotNull(wc);
-    StringBuilder builder = new StringBuilder();
-
-    builder.append("cmd=");
-    builder.append(ContainerProtos.Type.WriteChunk.toString());
-
-    builder.append(", container id=");
-    builder.append(contId);
-
-    builder.append(", blockid=");
-    builder.append(wc.getBlockID().getContainerID());
-    builder.append(":localid=");
-    builder.append(wc.getBlockID().getLocalID());
-
-    builder.append(", chunk=");
-    builder.append(wc.getChunkData().getChunkName());
-    builder.append(":offset=");
-    builder.append(wc.getChunkData().getOffset());
-    builder.append(":length=");
-    builder.append(wc.getChunkData().getLen());
-
-    builder.append(", container path=");
-    builder.append(location);
-
-    return builder.toString();
-  }
-
   /**
    * Leverages the Configuration.getPassword method to attempt to get
    * passwords from the CredentialProvider API before falling back to
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 3bf5550..446eba2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -73,6 +73,7 @@ import org.apache.ratis.statemachine.StateMachineStorage;
 import org.apache.ratis.statemachine.TransactionContext;
 import org.apache.ratis.statemachine.impl.BaseStateMachine;
 import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage;
+import org.apache.ratis.thirdparty.com.google.protobuf.TextFormat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -370,14 +371,15 @@ public class ContainerStateMachine extends BaseStateMachine {
     return entryProto.getStateMachineEntry().getStateMachineData();
   }
 
-  private ContainerCommandRequestProto getContainerCommandRequestProto(
-      ByteString request) throws InvalidProtocolBufferException {
+  private static ContainerCommandRequestProto getContainerCommandRequestProto(
+      RaftGroupId id, ByteString request)
+      throws InvalidProtocolBufferException {
     // TODO: We can avoid creating new builder and set pipeline Id if
     // the client is already sending the pipeline id, then we just have to
     // validate the pipeline Id.
     return ContainerCommandRequestProto.newBuilder(
         ContainerCommandRequestProto.parseFrom(request))
-        .setPipelineID(gid.getUuid().toString()).build();
+        .setPipelineID(id.getUuid().toString()).build();
   }
 
   private ContainerCommandRequestProto message2ContainerCommandRequestProto(
@@ -506,7 +508,7 @@ public class ContainerStateMachine extends BaseStateMachine {
       metrics.incNumWriteStateMachineOps();
       long writeStateMachineStartTime = Time.monotonicNowNanos();
       ContainerCommandRequestProto requestProto =
-          getContainerCommandRequestProto(
+          getContainerCommandRequestProto(gid,
               entry.getStateMachineLogEntry().getLogData());
       WriteChunkRequestProto writeChunk =
           WriteChunkRequestProto.newBuilder(requestProto.getWriteChunk())
@@ -638,7 +640,7 @@ public class ContainerStateMachine extends BaseStateMachine {
     }
     try {
       final ContainerCommandRequestProto requestProto =
-          getContainerCommandRequestProto(
+          getContainerCommandRequestProto(gid,
               entry.getStateMachineLogEntry().getLogData());
       // readStateMachineData should only be called for "write" to Ratis.
       Preconditions.checkArgument(!HddsUtils.isReadOnly(requestProto));
@@ -721,7 +723,7 @@ public class ContainerStateMachine extends BaseStateMachine {
       applyTransactionSemaphore.acquire();
       metrics.incNumApplyTransactionsOps();
       ContainerCommandRequestProto requestProto =
-          getContainerCommandRequestProto(
+          getContainerCommandRequestProto(gid,
               trx.getStateMachineLogEntry().getLogData());
       Type cmdType = requestProto.getCmdType();
       // Make sure that in write chunk, the user data is not set
@@ -887,22 +889,30 @@ public class ContainerStateMachine extends BaseStateMachine {
 
   @Override
   public String toStateMachineLogEntryString(StateMachineLogEntryProto proto) {
+    return smProtoToString(gid, containerController, proto);
+  }
+
+  public static String smProtoToString(RaftGroupId gid,
+                                   ContainerController containerController,
+                                   StateMachineLogEntryProto proto) {
+    StringBuilder builder = new StringBuilder();
     try {
       ContainerCommandRequestProto requestProto =
-              getContainerCommandRequestProto(proto.getLogData());
+          getContainerCommandRequestProto(gid, proto.getLogData());
       long contId = requestProto.getContainerID();
 
-      switch (requestProto.getCmdType()) {
-      case WriteChunk:
+      builder.append(TextFormat.shortDebugString(requestProto));
+
+      if (containerController != null) {
         String location = containerController.getContainerLocation(contId);
-        return HddsUtils.writeChunkToString(requestProto.getWriteChunk(),
-                contId, location);
-      default:
-        return "Cmd Type:" + requestProto.getCmdType()
-          + " should not have state machine data";
+        builder.append(", container path=");
+        builder.append(location);
       }
     } catch (Throwable t) {
-      return "";
+      LOG.info("smProtoToString failed", t);
+      builder.append("smProtoToString failed with");
+      builder.append(t.getMessage());
     }
+    return builder.toString();
   }
 }
diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml
index 8900eca..815e0e9 100644
--- a/hadoop-hdds/tools/pom.xml
+++ b/hadoop-hdds/tools/pom.xml
@@ -47,6 +47,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>hadoop-common</artifactId>
     </dependency>
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-container-service</artifactId>
+    </dependency>
+    <dependency>
+      <artifactId>ratis-tools</artifactId>
+      <groupId>org.apache.ratis</groupId>
+      <version>${ratis.version}</version>
+    </dependency>
+    <dependency>
       <groupId>commons-cli</groupId>
       <artifactId>commons-cli</artifactId>
     </dependency>
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/datanode/cli/ParseDnRatisLogSegment.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/datanode/cli/ParseDnRatisLogSegment.java
new file mode 100644
index 0000000..2df39f2
--- /dev/null
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/datanode/cli/ParseDnRatisLogSegment.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.datanode.cli;
+
+import org.apache.hadoop.ozone.container.common.transport.server
+    .ratis.ContainerStateMachine;
+import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto;
+import org.apache.ratis.protocol.RaftGroupId;
+import org.apache.ratis.tools.ParseRatisLog;
+import picocli.CommandLine;
+
+import java.io.File;
+
+/**
+ * Command line utility to parse and dump a datanode ratis segment file.
+ */
+@CommandLine.Command(
+    description = "Utility to parse and dump datanode segment file",
+    name = "dnRatisparser", mixinStandardHelpOptions = true)
+public class ParseDnRatisLogSegment implements Runnable {
+  @CommandLine.Option(names = {"-s", "--segmentPath"}, required = true,
+      description = "Path of the segment file")
+  private static File segmentFile;
+
+  private static String smToContainerLogString(
+      StateMachineLogEntryProto logEntryProto) {
+    return ContainerStateMachine.
+        smProtoToString(RaftGroupId.randomId(), null, logEntryProto);
+  }
+
+  public void run() {
+    try {
+      ParseRatisLog.Builder builder = new ParseRatisLog.Builder();
+      builder.setSegmentFile(segmentFile);
+      builder.setSMLogToString(ParseDnRatisLogSegment::smToContainerLogString);
+
+      ParseRatisLog prl = builder.build();
+      prl.dumpSegmentFile();
+    } catch (Exception e) {
+      System.out.println(ParseDnRatisLogSegment.class.getSimpleName()
+          + "failed with exception  " + e.toString());
+    }
+  }
+
+
+  public static void main(String... args) {
+    CommandLine.run(new ParseDnRatisLogSegment(), System.err, args);
+  }
+}
diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone b/hadoop-ozone/dist/src/shell/ozone/ozone
index 987e118..dc87e6f 100755
--- a/hadoop-ozone/dist/src/shell/ozone/ozone
+++ b/hadoop-ozone/dist/src/shell/ozone/ozone
@@ -56,6 +56,7 @@ function hadoop_usage
   hadoop_add_subcommand "dtutil" client "operations related to delegation tokens"
   hadoop_add_subcommand "upgrade" client "HDFS to Ozone in-place upgrade tool"
   hadoop_add_subcommand "admin" client "Ozone admin tool"
+  hadoop_add_subcommand "dnratislogparser" client "ozone debug tool, to convert datanode Ratis log files into text"
 
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
 }
@@ -212,6 +213,10 @@ function ozonecmd_case
       HADOOP_CLASSNAME=org.apache.hadoop.ozone.admin.OzoneAdmin
       OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools"
     ;;
+    dnratislogparser)
+      HADOOP_CLASSNAME=org.apache.hadoop.hdds.datanode.cli.ParseDnRatisLogSegment
+      OZONE_RUN_ARTIFACT_NAME="hadoop-hdds-tools"
+    ;;
     *)
       HADOOP_CLASSNAME="${subcmd}"
       if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org