You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cl...@apache.org on 2017/05/12 20:14:24 UTC

hadoop git commit: HDFS-11802. Ozone : add DEBUG CLI support for open container db file. Contributed by Chen Liang

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 2796b3435 -> 7bf301e20


HDFS-11802. Ozone : add DEBUG CLI support for open container db file. Contributed by Chen Liang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7bf301e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7bf301e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7bf301e2

Branch: refs/heads/HDFS-7240
Commit: 7bf301e20ef4e6f741248b5bc56e4feaaef261b3
Parents: 2796b34
Author: Chen Liang <cl...@apache.org>
Authored: Fri May 12 13:13:55 2017 -0700
Committer: Chen Liang <cl...@apache.org>
Committed: Fri May 12 13:13:55 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java | 60 ++++++++++++++++++--
 .../hadoop/ozone/scm/TestContainerSQLCli.java   | 43 ++++++++++++++
 2 files changed, 97 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bf301e2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
index d18aca8..f558882 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
@@ -53,6 +53,7 @@ import java.util.Set;
 import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB;
 import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
+import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB;
 
 /**
  * This is the CLI that can be use to convert a levelDB into a sqlite DB file.
@@ -116,6 +117,14 @@ public class SQLCLI  extends Configured implements Tool {
       "INSERT INTO nodePool (datanodeUUID, poolName) " +
           "VALUES (\"%s\", \"%s\")";
   // and reuse CREATE_DATANODE_INFO and INSERT_DATANODE_INFO
+  // for openContainer.db
+  private static final String CREATE_OPEN_CONTAINER =
+      "CREATE TABLE openContainer (" +
+          "containerName TEXT PRIMARY KEY NOT NULL, " +
+          "containerUsed INTEGER NOT NULL)";
+  private static final String INSERT_OPEN_CONTAINER =
+      "INSERT INTO openContainer (containerName, containerUsed) " +
+          "VALUES (\"%s\", \"%s\")";
 
 
   private static final Logger LOG =
@@ -191,6 +200,9 @@ public class SQLCLI  extends Configured implements Tool {
     } else if (dbName.toString().equals(NODEPOOL_DB)) {
       LOG.info("Converting node pool DB");
       convertNodePoolDB(dbPath, outPath);
+    } else if (dbName.toString().equals(OPEN_CONTAINERS_DB)) {
+      LOG.info("Converting open container DB");
+      convertOpenContainerDB(dbPath, outPath);
     } else {
       LOG.error("Unrecognized db name {}", dbName);
     }
@@ -244,12 +256,12 @@ public class SQLCLI  extends Configured implements Tool {
     File dbFile = dbPath.toFile();
     org.iq80.leveldb.Options dbOptions = new org.iq80.leveldb.Options();
     try (LevelDBStore dbStore = new LevelDBStore(dbFile, dbOptions);
-         Connection conn = connectDB(outPath.toString())) {
+         Connection conn = connectDB(outPath.toString());
+         DBIterator iter = dbStore.getIterator()) {
       executeSQL(conn, CREATE_CONTAINER_INFO);
       executeSQL(conn, CREATE_CONTAINER_MEMBERS);
       executeSQL(conn, CREATE_DATANODE_INFO);
 
-      DBIterator iter = dbStore.getIterator();
       iter.seekToFirst();
       HashSet<String> uuidChecked = new HashSet<>();
       while (iter.hasNext()) {
@@ -320,10 +332,10 @@ public class SQLCLI  extends Configured implements Tool {
     File dbFile = dbPath.toFile();
     org.iq80.leveldb.Options dbOptions = new org.iq80.leveldb.Options();
     try (LevelDBStore dbStore = new LevelDBStore(dbFile, dbOptions);
-         Connection conn = connectDB(outPath.toString())) {
+         Connection conn = connectDB(outPath.toString());
+         DBIterator iter = dbStore.getIterator()) {
       executeSQL(conn, CREATE_BLOCK_CONTAINER);
 
-      DBIterator iter = dbStore.getIterator();
       iter.seekToFirst();
       while (iter.hasNext()) {
         Map.Entry<byte[], byte[]> entry = iter.next();
@@ -364,11 +376,11 @@ public class SQLCLI  extends Configured implements Tool {
     File dbFile = dbPath.toFile();
     org.iq80.leveldb.Options dbOptions = new org.iq80.leveldb.Options();
     try (LevelDBStore dbStore = new LevelDBStore(dbFile, dbOptions);
-         Connection conn = connectDB(outPath.toString())) {
+         Connection conn = connectDB(outPath.toString());
+         DBIterator iter = dbStore.getIterator()) {
       executeSQL(conn, CREATE_NODE_POOL);
       executeSQL(conn, CREATE_DATANODE_INFO);
 
-      DBIterator iter = dbStore.getIterator();
       iter.seekToFirst();
       while (iter.hasNext()) {
         Map.Entry<byte[], byte[]> entry = iter.next();
@@ -394,6 +406,42 @@ public class SQLCLI  extends Configured implements Tool {
     executeSQL(conn, insertDatanodeID);
   }
 
+  /**
+   * Convert openContainer.db to sqlite db file. This is rather simple db,
+   * the schema has only one table:
+   *
+   * openContainer
+   * -------------------------------
+   * containerName* | containerUsed
+   * -------------------------------
+   *
+   * @param dbPath path to container db.
+   * @param outPath path to output sqlite
+   * @throws IOException throws exception.
+   */
+  private void convertOpenContainerDB(Path dbPath, Path outPath)
+      throws Exception {
+    LOG.info("Create table for open container db.");
+    File dbFile = dbPath.toFile();
+    org.iq80.leveldb.Options dbOptions = new org.iq80.leveldb.Options();
+    try (LevelDBStore dbStore = new LevelDBStore(dbFile, dbOptions);
+        Connection conn = connectDB(outPath.toString());
+        DBIterator iter = dbStore.getIterator()) {
+      executeSQL(conn, CREATE_OPEN_CONTAINER);
+
+      iter.seekToFirst();
+      while (iter.hasNext()) {
+        Map.Entry<byte[], byte[]> entry = iter.next();
+        String containerName = DFSUtil.bytes2String(entry.getKey());
+        Long containerUsed = Long.parseLong(
+            DFSUtil.bytes2String(entry.getValue()));
+        String insertOpenContainer = String.format(
+            INSERT_OPEN_CONTAINER, containerName, containerUsed);
+        executeSQL(conn, insertOpenContainer);
+      }
+    }
+  }
+
   private CommandLine parseArgs(String[] argv)
       throws ParseException {
     return parser.parse(options, argv);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bf301e2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
index 9e9e082..1e6f5f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
@@ -46,11 +46,13 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
 
 import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB;
 import static org.apache.hadoop.ozone.OzoneConsts.KB;
 import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
+import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -103,6 +105,15 @@ public class TestContainerSQLCli {
     // OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE which we set to 2.
     // so the first allocateBlock() will create two containers. A random one
     // is assigned for the block.
+
+    // loop until both the two datanodes are up, try up to about 4 seconds.
+    for (int c = 0; c < 40; c++) {
+      if (nodeManager.getAllNodes().size() == 2) {
+        break;
+      }
+      Thread.sleep(100);
+    }
+    assertEquals(2, nodeManager.getAllNodes().size());
     AllocatedBlock ab1 = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE);
     pipeline1 = ab1.getPipeline();
     blockContainerMap.put(ab1.getKey(), pipeline1.getContainerName());
@@ -185,6 +196,38 @@ public class TestContainerSQLCli {
   }
 
   @Test
+  public void testConvertOpenContainerDB() throws Exception {
+    String dbOutPath = cluster.getDataDirectory() + "/out_sql.db";
+    String dbRootPath = conf.get(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS);
+    String dbPath = dbRootPath + "/" + OPEN_CONTAINERS_DB;
+    String[] args = {"-p", dbPath, "-o", dbOutPath};
+
+    cli.run(args);
+
+    Connection conn = connectDB(dbOutPath);
+    String sql = "SELECT * FROM openContainer";
+    ResultSet rs = executeQuery(conn, sql);
+    HashSet<String> expectedContainer = new HashSet<>();
+    expectedContainer.add(pipeline1.getContainerName());
+    expectedContainer.add(pipeline2.getContainerName());
+    // the number of allocated blocks can vary, and they can be located
+    // at either of the two containers. We only check if the total used
+    // is equal to block size * # of blocks.
+    long totalUsed = 0;
+    while(rs.next()) {
+      String containerName = rs.getString("containerName");
+      long containerUsed = rs.getLong("containerUsed");
+      totalUsed += containerUsed;
+      assertTrue(expectedContainer.remove(containerName));
+    }
+    assertEquals(0, expectedContainer.size());
+    assertEquals(blockContainerMap.keySet().size() * DEFAULT_BLOCK_SIZE,
+        totalUsed);
+
+    Files.delete(Paths.get(dbOutPath));
+  }
+
+  @Test
   public void testConvertContainerDB() throws Exception {
     String dbOutPath = cluster.getDataDirectory() + "/out_sql.db";
     // TODO : the following will fail due to empty Datanode list, need to fix.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org