You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vi...@apache.org on 2018/07/09 18:26:02 UTC

[10/50] [abbrv] hadoop git commit: HDDS-175. Refactor ContainerInfo to remove Pipeline object from it. Contributed by Ajay Kumar.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
index d6f5d32..a9781b1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -79,14 +79,16 @@ public class TestXceiverClientMetrics {
     OzoneConfiguration conf = new OzoneConfiguration();
     XceiverClientManager clientManager = new XceiverClientManager(conf);
 
-    ContainerInfo container = storageContainerLocationClient
+    ContainerWithPipeline container = storageContainerLocationClient
         .allocateContainer(clientManager.getType(), clientManager.getFactor(),
             containerOwner);
-    XceiverClientSpi client = clientManager.acquireClient(
-        container.getPipeline(), container.getContainerID());
+    XceiverClientSpi client = clientManager
+        .acquireClient(container.getPipeline(),
+            container.getContainerInfo().getContainerID());
 
     ContainerCommandRequestProto request = ContainerTestHelper
-        .getCreateContainerRequest(container.getContainerID(),
+        .getCreateContainerRequest(
+            container.getContainerInfo().getContainerID(),
             container.getPipeline());
     client.sendCommand(request);
 
@@ -112,7 +114,7 @@ public class TestXceiverClientMetrics {
           // use async interface for testing pending metrics
           for (int i = 0; i < numRequest; i++) {
             BlockID blockID = ContainerTestHelper.
-                getTestBlockID(container.getContainerID());
+                getTestBlockID(container.getContainerInfo().getContainerID());
             ContainerProtos.ContainerCommandRequestProto smallFileRequest;
 
             smallFileRequest = ContainerTestHelper.getWriteSmallFileRequest(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
index 375450c..c344bbe 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
@@ -60,7 +60,9 @@ public class BenchMarkContainerStateMap {
       try {
         ContainerInfo containerInfo = new ContainerInfo.Builder()
             .setState(CLOSED)
-            .setPipeline(pipeline)
+            .setPipelineName(pipeline.getPipelineName())
+            .setReplicationType(pipeline.getType())
+            .setReplicationFactor(pipeline.getFactor())
             // This is bytes allocated for blocks inside container, not the
             // container size
             .setAllocatedBytes(0)
@@ -81,7 +83,9 @@ public class BenchMarkContainerStateMap {
       try {
         ContainerInfo containerInfo = new ContainerInfo.Builder()
             .setState(OPEN)
-            .setPipeline(pipeline)
+            .setPipelineName(pipeline.getPipelineName())
+            .setReplicationType(pipeline.getType())
+            .setReplicationFactor(pipeline.getFactor())
             // This is bytes allocated for blocks inside container, not the
             // container size
             .setAllocatedBytes(0)
@@ -101,7 +105,9 @@ public class BenchMarkContainerStateMap {
     try {
       ContainerInfo containerInfo = new ContainerInfo.Builder()
           .setState(OPEN)
-          .setPipeline(pipeline)
+          .setPipelineName(pipeline.getPipelineName())
+          .setReplicationType(pipeline.getType())
+          .setReplicationFactor(pipeline.getFactor())
           // This is bytes allocated for blocks inside container, not the
           // container size
           .setAllocatedBytes(0)
@@ -166,7 +172,9 @@ public class BenchMarkContainerStateMap {
     int cid = state.containerID.incrementAndGet();
     ContainerInfo containerInfo = new ContainerInfo.Builder()
         .setState(CLOSED)
-        .setPipeline(pipeline)
+        .setPipelineName(pipeline.getPipelineName())
+        .setReplicationType(pipeline.getType())
+        .setReplicationFactor(pipeline.getFactor())
         // This is bytes allocated for blocks inside container, not the
         // container size
         .setAllocatedBytes(0)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
index edc0d7b..26776c5 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.scm.cli;
 
 import com.google.common.base.Preconditions;
 import com.google.common.primitives.Longs;
+import com.google.protobuf.ByteString;
 import org.apache.commons.cli.BasicParser;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;
@@ -37,7 +38,6 @@ import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyI
 import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeInfo;
 import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeList;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.Pipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
@@ -86,12 +86,12 @@ public class SQLCLI  extends Configured implements Tool {
   private static final String CREATE_CONTAINER_INFO =
       "CREATE TABLE containerInfo (" +
           "containerID LONG PRIMARY KEY NOT NULL, " +
-          "leaderUUID TEXT NOT NULL)";
-  private static final String CREATE_CONTAINER_MEMBERS =
-      "CREATE TABLE containerMembers (" +
-          "containerName TEXT NOT NULL, " +
-          "datanodeUUID TEXT NOT NULL," +
-          "PRIMARY KEY(containerName, datanodeUUID));";
+          "replicationType TEXT NOT NULL," +
+          "replicationFactor TEXT NOT NULL," +
+          "usedBytes LONG NOT NULL," +
+          "allocatedBytes LONG NOT NULL," +
+          "owner TEXT," +
+          "numberOfKeys LONG)";
   private static final String CREATE_DATANODE_INFO =
       "CREATE TABLE datanodeInfo (" +
           "hostName TEXT NOT NULL, " +
@@ -99,8 +99,10 @@ public class SQLCLI  extends Configured implements Tool {
           "ipAddress TEXT, " +
           "containerPort INTEGER NOT NULL);";
   private static final String INSERT_CONTAINER_INFO =
-      "INSERT INTO containerInfo (containerID, leaderUUID) " +
-          "VALUES (\"%d\", \"%s\")";
+      "INSERT INTO containerInfo (containerID, replicationType, "
+          + "replicationFactor, usedBytes, allocatedBytes, owner, "
+          + "numberOfKeys) VALUES (\"%d\", \"%s\", \"%s\", \"%d\", \"%d\", "
+          + "\"%s\", \"%d\")";
   private static final String INSERT_DATANODE_INFO =
       "INSERT INTO datanodeInfo (hostname, datanodeUUid, ipAddress, " +
           "containerPort) " +
@@ -469,10 +471,7 @@ public class SQLCLI  extends Configured implements Tool {
         .setConf(conf).setDbFile(dbFile).build();
         Connection conn = connectDB(outPath.toString())) {
       executeSQL(conn, CREATE_CONTAINER_INFO);
-      executeSQL(conn, CREATE_CONTAINER_MEMBERS);
-      executeSQL(conn, CREATE_DATANODE_INFO);
 
-      HashSet<String> uuidChecked = new HashSet<>();
       dbStore.iterate(null, (key, value) -> {
         long containerID = Longs.fromByteArray(key);
         ContainerInfo containerInfo = null;
@@ -481,8 +480,7 @@ public class SQLCLI  extends Configured implements Tool {
         Preconditions.checkNotNull(containerInfo);
         try {
           //TODO: include container state to sqllite schema
-          insertContainerDB(conn, containerID,
-              containerInfo.getPipeline().getProtobufMessage(), uuidChecked);
+          insertContainerDB(conn, containerInfo, containerID);
           return true;
         } catch (SQLException e) {
           throw new IOException(e);
@@ -494,38 +492,23 @@ public class SQLCLI  extends Configured implements Tool {
   /**
    * Insert into the sqlite DB of container.db.
    * @param conn the connection to the sqlite DB.
-   * @param containerID the id of the container.
-   * @param pipeline the actual container pipeline object.
-   * @param uuidChecked the uuid that has been already inserted.
+   * @param containerInfo
+   * @param containerID
    * @throws SQLException throws exception.
    */
-  private void insertContainerDB(Connection conn, long containerID,
-      Pipeline pipeline, Set<String> uuidChecked) throws SQLException {
+  private void insertContainerDB(Connection conn, ContainerInfo containerInfo,
+      long containerID) throws SQLException {
     LOG.info("Insert to sql container db, for container {}", containerID);
     String insertContainerInfo = String.format(
         INSERT_CONTAINER_INFO, containerID,
-        pipeline.getLeaderID());
-    executeSQL(conn, insertContainerInfo);
+        containerInfo.getReplicationType(),
+        containerInfo.getReplicationFactor(),
+        containerInfo.getUsedBytes(),
+        containerInfo.getAllocatedBytes(),
+        containerInfo.getOwner(),
+        containerInfo.getNumberOfKeys());
 
-    for (HddsProtos.DatanodeDetailsProto dd :
-        pipeline.getMembersList()) {
-      String uuid = dd.getUuid();
-      if (!uuidChecked.contains(uuid)) {
-        // we may also not use this checked set, but catch exception instead
-        // but this seems a bit cleaner.
-        String ipAddr = dd.getIpAddress();
-        String hostName = dd.getHostName();
-        int containerPort = DatanodeDetails.getFromProtoBuf(dd)
-            .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
-        String insertMachineInfo = String.format(
-            INSERT_DATANODE_INFO, hostName, uuid, ipAddr, containerPort);
-        executeSQL(conn, insertMachineInfo);
-        uuidChecked.add(uuid);
-      }
-      String insertContainerMembers = String.format(
-          INSERT_CONTAINER_MEMBERS, containerID, uuid);
-      executeSQL(conn, insertContainerMembers);
-    }
+    executeSQL(conn, insertContainerInfo);
     LOG.info("Insertion completed.");
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org