You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2018/05/31 15:50:20 UTC

[26/50] [abbrv] hadoop git commit: HDDS-81. Moving ContainerReport inside Datanode heartbeat. Contributed by Nanda Kumar.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
deleted file mode 100644
index 50fd18f..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.testutils;
-
-import com.google.common.primitives.Longs;
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerInfo;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
-
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Random;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
-    .HEALTHY;
-
-/**
- * This class  manages the state of datanode
- * in conjunction with the node pool and node managers.
- */
-public class ReplicationDatanodeStateManager {
-  private final NodeManager nodeManager;
-  private final NodePoolManager poolManager;
-  private final Random r;
-
-  /**
-   * The datanode state Manager.
-   *
-   * @param nodeManager
-   * @param poolManager
-   */
-  public ReplicationDatanodeStateManager(NodeManager nodeManager,
-      NodePoolManager poolManager) {
-    this.nodeManager = nodeManager;
-    this.poolManager = poolManager;
-    r = new Random();
-  }
-
-  /**
-   * Get Container Report as if it is from a datanode in the cluster.
-   * @param containerID - Container ID.
-   * @param poolName - Pool Name.
-   * @param dataNodeCount - Datanode Count.
-   * @return List of Container Reports.
-   */
-  public List<ContainerReportsRequestProto> getContainerReport(
-      long containerID, String poolName, int dataNodeCount) {
-    List<ContainerReportsRequestProto> containerList = new LinkedList<>();
-    List<DatanodeDetails> nodesInPool = poolManager.getNodes(poolName);
-
-    if (nodesInPool == null) {
-      return containerList;
-    }
-
-    if (nodesInPool.size() < dataNodeCount) {
-      throw new IllegalStateException("Not enough datanodes to create " +
-          "required container reports");
-    }
-
-    while (containerList.size() < dataNodeCount && nodesInPool.size() > 0) {
-      DatanodeDetails id = nodesInPool.get(r.nextInt(nodesInPool.size()));
-      nodesInPool.remove(id);
-      containerID++;
-      // We return container reports only for nodes that are healthy.
-      if (nodeManager.getNodeState(id) == HEALTHY) {
-        ContainerInfo info = ContainerInfo.newBuilder()
-            .setContainerID(containerID)
-            .setFinalhash(DigestUtils.sha256Hex(
-                Longs.toByteArray(containerID)))
-            .setContainerID(containerID)
-            .build();
-        ContainerReportsRequestProto containerReport =
-            ContainerReportsRequestProto.newBuilder().addReports(info)
-            .setDatanodeDetails(id.getProtoBufMessage())
-            .setType(ContainerReportsRequestProto.reportType.fullReport)
-            .build();
-        containerList.add(containerReport);
-      }
-    }
-    return containerList;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
index 3f814d0..072d821 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@ -24,13 +24,13 @@ import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.NodePoolManager;
 import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
+import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.mockito.Mockito;
 
@@ -277,12 +277,12 @@ public class ReplicationNodeManagerMock implements NodeManager {
    * Register the node if the node finds that it is not registered with any SCM.
    *
    * @param dd DatanodeDetailsProto
-   * @param nodeReport SCMNodeReport
+   * @param nodeReport NodeReportProto
    * @return SCMHeartbeatResponseProto
    */
   @Override
-  public SCMCommand register(HddsProtos.DatanodeDetailsProto dd,
-                             SCMNodeReport nodeReport) {
+  public RegisteredCommand register(DatanodeDetails dd,
+                                    NodeReportProto nodeReport) {
     return null;
   }
 
@@ -294,8 +294,8 @@ public class ReplicationNodeManagerMock implements NodeManager {
    * @return SCMheartbeat response list
    */
   @Override
-  public List<SCMCommand> sendHeartbeat(HddsProtos.DatanodeDetailsProto dd,
-      SCMNodeReport nodeReport) {
+  public List<SCMCommand> sendHeartbeat(DatanodeDetails dd,
+      NodeReportProto nodeReport) {
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index a0d41a8..0c1d8f2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -32,8 +32,10 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager.StartupOption;
 import org.apache.hadoop.hdds.scm.block.DeletedBlockLog;
@@ -302,12 +304,11 @@ public class TestStorageContainerManager {
       NodeManager nodeManager = cluster.getStorageContainerManager()
           .getScmNodeManager();
       List<SCMCommand> commands = nodeManager.sendHeartbeat(
-          nodeManager.getNodes(NodeState.HEALTHY).get(0).getProtoBufMessage(),
-          null);
+          nodeManager.getNodes(NodeState.HEALTHY).get(0), null);
 
       if (commands != null) {
         for (SCMCommand cmd : commands) {
-          if (cmd.getType() == SCMCmdType.deleteBlocksCommand) {
+          if (cmd.getType() == SCMCommandProto.Type.deleteBlocksCommand) {
             List<DeletedBlocksTransaction> deletedTXs =
                 ((DeleteBlocksCommand) cmd).blocksTobeDeleted();
             return deletedTXs != null && deletedTXs.size() == limitSize;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
index 1d19bb3..1dbe760 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
@@ -32,8 +32,10 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerReport;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics;
 import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
@@ -75,11 +77,11 @@ public class TestSCMMetrics {
       ContainerStat stat = new ContainerStat(size, used, keyCount, readBytes,
           writeBytes, readCount, writeCount);
       StorageContainerManager scmManager = cluster.getStorageContainerManager();
-
-      ContainerReportsRequestProto request = createContainerReport(numReport,
-          stat, null);
-      String fstDatanodeUuid = request.getDatanodeDetails().getUuid();
-      scmManager.getDatanodeProtocolServer().sendContainerReport(request);
+      DatanodeDetails fstDatanodeDetails = TestUtils.getDatanodeDetails();
+      ContainerReportsProto request = createContainerReport(numReport, stat);
+      String fstDatanodeUuid = fstDatanodeDetails.getUuidString();
+      scmManager.getDatanodeProtocolServer().processContainerReports(
+          fstDatanodeDetails, request);
 
       // verify container stat metrics
       MetricsRecordBuilder scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME);
@@ -100,9 +102,11 @@ public class TestSCMMetrics {
           getLongGauge("LastContainerReportWriteCount", scmMetrics));
 
       // add one new report
-      request = createContainerReport(1, stat, null);
-      String sndDatanodeUuid = request.getDatanodeDetails().getUuid();
-      scmManager.getDatanodeProtocolServer().sendContainerReport(request);
+      DatanodeDetails sndDatanodeDetails = TestUtils.getDatanodeDetails();
+      request = createContainerReport(1, stat);
+      String sndDatanodeUuid = sndDatanodeDetails.getUuidString();
+      scmManager.getDatanodeProtocolServer().processContainerReports(
+          sndDatanodeDetails, request);
 
       scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME);
       assertEquals(size * (numReport + 1),
@@ -124,12 +128,12 @@ public class TestSCMMetrics {
       // Re-send reports but with different value for validating
       // the aggregation.
       stat = new ContainerStat(100, 50, 3, 50, 60, 5, 6);
-      scmManager.getDatanodeProtocolServer().sendContainerReport(
-          createContainerReport(1, stat, fstDatanodeUuid));
+      scmManager.getDatanodeProtocolServer().processContainerReports(
+          fstDatanodeDetails, createContainerReport(1, stat));
 
       stat = new ContainerStat(1, 1, 1, 1, 1, 1, 1);
-      scmManager.getDatanodeProtocolServer().sendContainerReport(
-          createContainerReport(1, stat, sndDatanodeUuid));
+      scmManager.getDatanodeProtocolServer().processContainerReports(
+          sndDatanodeDetails, createContainerReport(1, stat));
 
       // the global container metrics value should be updated
       scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME);
@@ -170,11 +174,11 @@ public class TestSCMMetrics {
           writeBytes, readCount, writeCount);
       StorageContainerManager scmManager = cluster.getStorageContainerManager();
 
-      String datanodeUuid = cluster.getHddsDatanodes().get(0)
-          .getDatanodeDetails().getUuidString();
-      ContainerReportsRequestProto request = createContainerReport(numReport,
-          stat, datanodeUuid);
-      scmManager.getDatanodeProtocolServer().sendContainerReport(request);
+      DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
+          .getDatanodeDetails();
+      ContainerReportsProto request = createContainerReport(numReport, stat);
+      scmManager.getDatanodeProtocolServer().processContainerReports(
+          datanodeDetails, request);
 
       MetricsRecordBuilder scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME);
       assertEquals(size * numReport,
@@ -216,11 +220,11 @@ public class TestSCMMetrics {
     }
   }
 
-  private ContainerReportsRequestProto createContainerReport(int numReport,
-      ContainerStat stat, String datanodeUuid) {
-    StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto.Builder
+  private ContainerReportsProto createContainerReport(int numReport,
+      ContainerStat stat) {
+    StorageContainerDatanodeProtocolProtos.ContainerReportsProto.Builder
         reportsBuilder = StorageContainerDatanodeProtocolProtos
-        .ContainerReportsRequestProto.newBuilder();
+        .ContainerReportsProto.newBuilder();
 
     for (int i = 0; i < numReport; i++) {
       ContainerReport report = new ContainerReport(
@@ -234,24 +238,6 @@ public class TestSCMMetrics {
       report.setWriteBytes(stat.getWriteBytes().get());
       reportsBuilder.addReports(report.getProtoBufMessage());
     }
-
-    DatanodeDetails datanodeDetails;
-    if (datanodeUuid == null) {
-      datanodeDetails = TestUtils.getDatanodeDetails();
-    } else {
-      datanodeDetails = DatanodeDetails.newBuilder()
-          .setUuid(datanodeUuid)
-          .setIpAddress("127.0.0.1")
-          .setHostName("localhost")
-          .setContainerPort(0)
-          .setRatisPort(0)
-          .setOzoneRestPort(0)
-          .build();
-    }
-
-    reportsBuilder.setDatanodeDetails(datanodeDetails.getProtoBufMessage());
-    reportsBuilder.setType(StorageContainerDatanodeProtocolProtos
-        .ContainerReportsRequestProto.reportType.fullReport);
     return reportsBuilder.build();
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org