You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by av...@apache.org on 2020/09/02 16:54:49 UTC

[hadoop-ozone] 30/33: HDDS-3867. Extend the chunkinfo tool to display information from all nodes in the pipeline. (#1154)

This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch HDDS-3698-upgrade
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git

commit 37f411ba8c8c1f3c1baa9e4910dab5f0141447be
Author: Sadanand Shenoy <sa...@gmail.com>
AuthorDate: Mon Aug 31 14:06:06 2020 +0530

    HDDS-3867. Extend the chunkinfo tool to display information from all nodes in the pipeline. (#1154)
---
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java  |  30 +++++
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java |   7 +
 .../apache/hadoop/hdds/scm/XceiverClientSpi.java   |  11 ++
 .../hdds/scm/storage/ContainerProtocolCalls.java   |  34 +++++
 .../src/main/smoketest/debug/ozone-debug.robot     |   4 +-
 .../apache/hadoop/ozone/debug/ChunkKeyHandler.java | 149 ++++++++++++---------
 .../hadoop/ozone/debug/ContainerChunkInfo.java     |  21 +--
 7 files changed, 175 insertions(+), 81 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index 4adfa85..c2743c4 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -248,6 +248,36 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   }
 
   @Override
+  public Map<DatanodeDetails, ContainerCommandResponseProto>
+      sendCommandOnAllNodes(
+      ContainerCommandRequestProto request) throws IOException {
+    HashMap<DatanodeDetails, ContainerCommandResponseProto>
+            responseProtoHashMap = new HashMap<>();
+    List<DatanodeDetails> datanodeList = pipeline.getNodes();
+    HashMap<DatanodeDetails, CompletableFuture<ContainerCommandResponseProto>>
+            futureHashMap = new HashMap<>();
+    for (DatanodeDetails dn : datanodeList) {
+      try {
+        futureHashMap.put(dn, sendCommandAsync(request, dn).getResponse());
+      } catch (InterruptedException e) {
+        LOG.error("Command execution was interrupted.");
+      }
+    }
+    try{
+      for (Map.Entry<DatanodeDetails,
+              CompletableFuture<ContainerCommandResponseProto> >
+              entry : futureHashMap.entrySet()){
+        responseProtoHashMap.put(entry.getKey(), entry.getValue().get());
+      }
+    } catch (InterruptedException e) {
+      LOG.error("Command execution was interrupted.");
+    } catch (ExecutionException e) {
+      LOG.error("Failed to execute command " + request, e);
+    }
+    return responseProtoHashMap;
+  }
+
+  @Override
   public ContainerCommandResponseProto sendCommand(
       ContainerCommandRequestProto request, List<CheckedBiFunction> validators)
       throws IOException {
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 1c7779b..23fca73 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.security.cert.X509Certificate;
 import java.util.Collection;
 import java.util.List;
+import java.util.Map;
 import java.util.Objects;
 import java.util.OptionalLong;
 import java.util.UUID;
@@ -352,4 +353,10 @@ public final class XceiverClientRatis extends XceiverClientSpi {
     return asyncReply;
   }
 
+  @Override
+  public Map<DatanodeDetails, ContainerCommandResponseProto>
+      sendCommandOnAllNodes(ContainerCommandRequestProto request) {
+    throw new UnsupportedOperationException(
+            "Operation Not supported for ratis client");
+  }
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
index 3287777..1c7d1f6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
@@ -21,10 +21,12 @@ package org.apache.hadoop.hdds.scm;
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -186,4 +188,13 @@ public abstract class XceiverClientSpi implements Closeable {
    * @return min commit index replicated to all servers.
    */
   public abstract long getReplicatedMinCommitIndex();
+
+  /**
+   * Sends command to all nodes in the pipeline.
+   * @return a map containing datanode as the key and
+   * the command response from that datanode
+   */
+  public abstract Map<DatanodeDetails, ContainerCommandResponseProto>
+      sendCommandOnAllNodes(ContainerCommandRequestProto request)
+      throws IOException, InterruptedException;
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 6b0d8f8..11acf82 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdds.scm.storage;
 
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.XceiverClientReply;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.common.helpers
@@ -73,7 +74,9 @@ import org.apache.hadoop.hdds.client.BlockID;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.ExecutionException;
 
 /**
@@ -578,4 +581,35 @@ public final class ContainerProtocolCalls  {
     validators.add(validator);
     return validators;
   }
+
+  public static HashMap<DatanodeDetails, GetBlockResponseProto>
+      getBlockFromAllNodes(
+      XceiverClientSpi xceiverClient,
+      DatanodeBlockID datanodeBlockID) throws IOException,
+          InterruptedException {
+    GetBlockRequestProto.Builder readBlockRequest = GetBlockRequestProto
+            .newBuilder()
+            .setBlockID(datanodeBlockID);
+    HashMap<DatanodeDetails, GetBlockResponseProto> datanodeToResponseMap
+            = new HashMap<>();
+    String id = xceiverClient.getPipeline().getFirstNode().getUuidString();
+    ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto
+            .newBuilder()
+            .setCmdType(Type.GetBlock)
+            .setContainerID(datanodeBlockID.getContainerID())
+            .setDatanodeUuid(id)
+            .setGetBlock(readBlockRequest);
+    String encodedToken = getEncodedBlockToken(getService(datanodeBlockID));
+    if (encodedToken != null) {
+      builder.setEncodedToken(encodedToken);
+    }
+    ContainerCommandRequestProto request = builder.build();
+    Map<DatanodeDetails, ContainerCommandResponseProto> responses =
+            xceiverClient.sendCommandOnAllNodes(request);
+    for(Map.Entry<DatanodeDetails, ContainerCommandResponseProto> entry:
+           responses.entrySet()){
+      datanodeToResponseMap.put(entry.getKey(), entry.getValue().getGetBlock());
+    }
+    return datanodeToResponseMap;
+  }
 }
diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot
index 39e561a..f7e3274 100644
--- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot
@@ -29,8 +29,8 @@ Write key
 
 *** Test Cases ***
 Test ozone debug
-    ${result} =     Execute             ozone debug chunkinfo o3://om/vol1/bucket1/debugKey | jq -r '.[]'
+    ${result} =     Execute             ozone debug chunkinfo o3://om/vol1/bucket1/debugKey | jq -r '.KeyLocations[0][0].Locations'
                     Should contain      ${result}       files
-    ${result} =     Execute             ozone debug chunkinfo o3://om/vol1/bucket1/debugKey | jq -r '.[].files[0]'
+    ${result} =     Execute             ozone debug chunkinfo o3://om/vol1/bucket1/debugKey | jq -r '.KeyLocations[0][0].Locations.files[0]'
                     File Should Exist   ${result}
 
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java
index c245490..4f69da7 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java
@@ -22,13 +22,22 @@ import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
-
+import java.util.Map;
+import java.util.HashMap;
+import java.util.HashSet;
+import com.google.gson.GsonBuilder;
+import com.google.gson.Gson;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonArray;
+import com.google.gson.JsonElement;
 import org.apache.hadoop.hdds.cli.SubcommandWithParent;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
 import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -44,12 +53,6 @@ import org.apache.hadoop.ozone.shell.OzoneAddress;
 import org.apache.hadoop.ozone.shell.keys.KeyHandler;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
-
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import org.apache.ratis.protocol.ClientId;
 import org.kohsuke.MetaInfServices;
 import picocli.CommandLine.Command;
 import picocli.CommandLine.Parameters;
@@ -70,7 +73,6 @@ public class ChunkKeyHandler extends KeyHandler implements
   private ContainerOperationClient containerOperationClient;
   private  XceiverClientManager xceiverClientManager;
   private XceiverClientSpi xceiverClient;
-  private final ClientId clientId = ClientId.randomId();
   private OzoneManagerProtocol ozoneManagerClient;
 
   private String getChunkLocationPath(String containerLocation) {
@@ -79,22 +81,22 @@ public class ChunkKeyHandler extends KeyHandler implements
 
   @Override
   protected void execute(OzoneClient client, OzoneAddress address)
-          throws IOException, OzoneClientException {
+          throws IOException, OzoneClientException{
     containerOperationClient = new
-        ContainerOperationClient(createOzoneConfiguration());
+            ContainerOperationClient(createOzoneConfiguration());
     xceiverClientManager = containerOperationClient
-        .getXceiverClientManager();
+            .getXceiverClientManager();
     ozoneManagerClient = client.getObjectStore().getClientProxy()
             .getOzoneManagerClient();
     address.ensureKeyAddress();
-    JsonObject jsonObj = new JsonObject();
     JsonElement element;
+    JsonObject result = new JsonObject();
     String volumeName = address.getVolumeName();
     String bucketName = address.getBucketName();
     String keyName = address.getKeyName();
     List<ContainerProtos.ChunkInfo> tempchunks = null;
     List<ChunkDetails> chunkDetailsList = new ArrayList<ChunkDetails>();
-    List<String> chunkPaths = new ArrayList<String>();
+    HashSet<String> chunkPaths = new HashSet<>();
     OmKeyArgs keyArgs = new OmKeyArgs.Builder()
             .setVolumeName(volumeName)
             .setBucketName(bucketName)
@@ -102,19 +104,31 @@ public class ChunkKeyHandler extends KeyHandler implements
             .setRefreshPipeline(true)
             .build();
     OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
-    List<OmKeyLocationInfo> locationInfos = keyInfo
-            .getLatestVersionLocations().getBlocksLatestVersionOnly();
     // querying  the keyLocations.The OM is queried to get containerID and
     // localID pertaining to a given key
+    List<OmKeyLocationInfo> locationInfos = keyInfo
+            .getLatestVersionLocations().getBlocksLatestVersionOnly();
+    // for zero-sized key
+    if(locationInfos.isEmpty()){
+      System.out.println("No Key Locations Found");
+      return;
+    }
     ChunkLayOutVersion chunkLayOutVersion = ChunkLayOutVersion
             .getConfiguredVersion(getConf());
+    JsonArray responseArrayList = new JsonArray();
     for (OmKeyLocationInfo keyLocation:locationInfos) {
       ContainerChunkInfo containerChunkInfoVerbose = new ContainerChunkInfo();
       ContainerChunkInfo containerChunkInfo = new ContainerChunkInfo();
       long containerId = keyLocation.getContainerID();
+      chunkPaths.clear();
       Token<OzoneBlockTokenIdentifier> token = keyLocation.getToken();
+      Pipeline pipeline = keyLocation.getPipeline();
+      if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
+        pipeline = Pipeline.newBuilder(pipeline)
+                .setType(HddsProtos.ReplicationType.STAND_ALONE).build();
+      }
       xceiverClient = xceiverClientManager
-              .acquireClient(keyLocation.getPipeline());
+              .acquireClientForReadData(pipeline);
       // Datanode is queried to get chunk information.Thus querying the
       // OM,SCM and datanode helps us get chunk location information
       if (token != null) {
@@ -122,55 +136,65 @@ public class ChunkKeyHandler extends KeyHandler implements
       }
       ContainerProtos.DatanodeBlockID datanodeBlockID = keyLocation.getBlockID()
               .getDatanodeBlockIDProtobuf();
-      ContainerProtos.GetBlockResponseProto response = ContainerProtocolCalls
-                 .getBlock(xceiverClient, datanodeBlockID);
-      tempchunks = response.getBlockData().getChunksList();
-      ContainerProtos.ContainerDataProto containerData =
-              containerOperationClient.readContainer(
-                      keyLocation.getContainerID(),
-                      keyLocation.getPipeline());
-      for (ContainerProtos.ChunkInfo chunkInfo:tempchunks) {
-        ChunkDetails chunkDetails = new ChunkDetails();
-        chunkDetails.setChunkName(chunkInfo.getChunkName());
-        chunkDetails.setChunkOffset(chunkInfo.getOffset());
-        chunkDetailsList.add(chunkDetails);
-        chunkPaths.add(chunkLayOutVersion.getChunkFile(new File(
-                getChunkLocationPath(containerData.getContainerPath())),
-                keyLocation.getBlockID(),
-                ChunkInfo.getFromProtoBuf(chunkInfo)).toString());
+      // doing a getBlock on all nodes
+      HashMap<DatanodeDetails, ContainerProtos.GetBlockResponseProto>
+              responses = null;
+      try {
+        responses = ContainerProtocolCalls
+                .getBlockFromAllNodes(xceiverClient, datanodeBlockID);
+      } catch (InterruptedException e) {
+        LOG.error("Execution interrupted due to " + e);
       }
-      containerChunkInfoVerbose
-              .setContainerPath(containerData.getContainerPath());
-      containerChunkInfoVerbose
-              .setDataNodeList(keyLocation.getPipeline().getNodes());
-      containerChunkInfoVerbose.setPipeline(keyLocation.getPipeline());
-      containerChunkInfoVerbose.setChunkInfos(chunkDetailsList);
-      containerChunkInfo.setFiles(chunkPaths);
-      List<ChunkDataNodeDetails> chunkDataNodeDetails = new
-              ArrayList<ChunkDataNodeDetails>();
-      for (DatanodeDetails datanodeDetails:keyLocation
-              .getPipeline().getNodes()) {
-        chunkDataNodeDetails.add(
-                new ChunkDataNodeDetails(datanodeDetails.getIpAddress(),
-                datanodeDetails.getHostName()));
-      }
-      containerChunkInfo.setChunkDataNodeDetails(chunkDataNodeDetails);
-      containerChunkInfo.setPipelineID(
-              keyLocation.getPipeline().getId().getId());
-      Gson gson = new GsonBuilder().create();
-      if (isVerbose()) {
-        element = gson.toJsonTree(containerChunkInfoVerbose);
-        jsonObj.add("container Id :" + containerId + " "
-                + "blockId :" + keyLocation.getLocalID() + "", element);
-      } else {
-        element = gson.toJsonTree(containerChunkInfo);
-        jsonObj.add("container Id :" + containerId + " "
-                + "blockId :" + keyLocation.getLocalID() + "", element);
+      JsonArray responseFromAllNodes = new JsonArray();
+      for (Map.Entry<DatanodeDetails, ContainerProtos.GetBlockResponseProto>
+              entry: responses.entrySet()) {
+        JsonObject jsonObj = new JsonObject();
+        if(entry.getValue() == null){
+          LOG.error("Cant execute getBlock on this node");
+          continue;
+        }
+        tempchunks = entry.getValue().getBlockData().getChunksList();
+        ContainerProtos.ContainerDataProto containerData =
+                containerOperationClient.readContainer(
+                        keyLocation.getContainerID(),
+                        keyLocation.getPipeline());
+        for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) {
+          String fileName = chunkLayOutVersion.getChunkFile(new File(
+              getChunkLocationPath(containerData.getContainerPath())),
+                  keyLocation.getBlockID(),
+                  ChunkInfo.getFromProtoBuf(chunkInfo)).toString();
+          chunkPaths.add(fileName);
+          ChunkDetails chunkDetails = new ChunkDetails();
+          chunkDetails.setChunkName(fileName);
+          chunkDetails.setChunkOffset(chunkInfo.getOffset());
+          chunkDetailsList.add(chunkDetails);
+        }
+        containerChunkInfoVerbose
+                .setContainerPath(containerData.getContainerPath());
+        containerChunkInfoVerbose.setPipeline(keyLocation.getPipeline());
+        containerChunkInfoVerbose.setChunkInfos(chunkDetailsList);
+        containerChunkInfo.setFiles(chunkPaths);
+        containerChunkInfo.setPipelineID(
+                keyLocation.getPipeline().getId().getId());
+        Gson gson = new GsonBuilder().create();
+        if (isVerbose()) {
+          element = gson.toJsonTree(containerChunkInfoVerbose);
+        } else {
+          element = gson.toJsonTree(containerChunkInfo);
+        }
+        jsonObj.addProperty("Datanode-HostName", entry.getKey().getHostName());
+        jsonObj.addProperty("Datanode-IP", entry.getKey().getIpAddress());
+        jsonObj.addProperty("Container-ID", containerId);
+        jsonObj.addProperty("Block-ID", keyLocation.getLocalID());
+        jsonObj.add("Locations", element);
+        responseFromAllNodes.add(jsonObj);
+        xceiverClientManager.releaseClientForReadData(xceiverClient, false);
       }
+      responseArrayList.add(responseFromAllNodes);
     }
-    xceiverClientManager.releaseClient(xceiverClient, false);
-    Gson gson = new GsonBuilder().setPrettyPrinting().create();
-    String prettyJson = gson.toJson(jsonObj);
+    result.add("KeyLocations", responseArrayList);
+    Gson gson2 = new GsonBuilder().setPrettyPrinting().create();
+    String prettyJson = gson2.toJson(result);
     System.out.println(prettyJson);
   }
 
@@ -178,4 +202,5 @@ public class ChunkKeyHandler extends KeyHandler implements
   public Class<?> getParentType() {
     return OzoneDebug.class;
   }
+
 }
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java
index 0e969c7..cf57d95 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java
@@ -19,9 +19,10 @@
 package org.apache.hadoop.ozone.debug;
 
 import com.fasterxml.jackson.annotation.JsonInclude;
+
+import java.util.HashSet;
 import java.util.List;
 import java.util.UUID;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 
 /**
@@ -30,19 +31,12 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 @JsonInclude(JsonInclude.Include.NON_NULL)
 public class ContainerChunkInfo {
   private String containerPath;
-  private List<DatanodeDetails> dataNodeList;
   private List<ChunkDetails> chunkInfos;
-  private List<String> files;
-  private List<ChunkDataNodeDetails> chunkDataNodeDetails;
+  private HashSet<String> files;
   private UUID pipelineID;
   private Pipeline pipeline;
 
-  public void setChunkDataNodeDetails(List<ChunkDataNodeDetails>
-                                              chunkDataNodeDetails) {
-    this.chunkDataNodeDetails = chunkDataNodeDetails;
-  }
-
-  public void setFiles(List<String> files) {
+  public void setFiles(HashSet<String> files) {
     this.files = files;
   }
 
@@ -66,9 +60,6 @@ public class ContainerChunkInfo {
     this.chunkInfos = chunkInfos;
   }
 
-  public void setDataNodeList(List<DatanodeDetails> dataNodeList) {
-    this.dataNodeList = dataNodeList;
-  }
 
   @Override
   public String toString() {
@@ -76,8 +67,6 @@ public class ContainerChunkInfo {
             + "containerPath='"
             + containerPath
             + '\''
-            + ", dataNodeList="
-            + dataNodeList
             + ", chunkInfos="
             + chunkInfos
             + ", pipeline="
@@ -85,8 +74,6 @@ public class ContainerChunkInfo {
             + '}'
             + "files="
             + files
-            + "chunkdatanodeDetails="
-            + chunkDataNodeDetails
             + "PipelineID="
             + pipelineID;
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org