You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by av...@apache.org on 2020/09/02 16:54:34 UTC

[hadoop-ozone] 15/33: HDDS-3972. Add option to limit number of items displaying through ldb tool. (#1206)

This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch HDDS-3698-upgrade
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git

commit c51e4b3c54e3b6a228752ab5ab673c306afc579f
Author: Sadanand Shenoy <sa...@gmail.com>
AuthorDate: Thu Aug 27 14:03:40 2020 +0530

    HDDS-3972. Add option to limit number of items displaying through ldb tool. (#1206)
---
 .../org/apache/hadoop/ozone/om/TestOmLDBCli.java   | 120 +++++++++++
 .../org/apache/hadoop/ozone/om/TestOmSQLCli.java   | 235 ---------------------
 .../org/apache/hadoop/ozone/debug/DBScanner.java   |  62 ++++--
 .../org/apache/hadoop/ozone/debug/RDBParser.java   |   4 +
 4 files changed, 172 insertions(+), 249 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java
new file mode 100644
index 0000000..450eebb
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.debug.DBScanner;
+import org.apache.hadoop.ozone.debug.RDBParser;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.Assert;
+import org.junit.rules.TemporaryFolder;
+
+import java.io.File;
+import java.util.List;
+import java.util.ArrayList;
+
+
+/**
+ * This class tests the Debug LDB CLI that reads from an om.db file.
+ */
+public class TestOmLDBCli {
+  private OzoneConfiguration conf;
+
+  private RDBParser rdbParser;
+  private DBScanner dbScanner;
+  private DBStore dbStore = null;
+  private static List<String> keyNames;
+
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+  @Before
+  public void setup() throws Exception {
+    conf = new OzoneConfiguration();
+    rdbParser = new RDBParser();
+    dbScanner = new DBScanner();
+    keyNames = new ArrayList<>();
+  }
+
+  @After
+  public void shutdown() throws Exception {
+    if (dbStore!=null){
+      dbStore.close();
+    }
+  }
+
+  @Test
+  public void testOMDB() throws Exception {
+    File newFolder = folder.newFolder();
+    if(!newFolder.exists()) {
+      Assert.assertTrue(newFolder.mkdirs());
+    }
+    // Dummy om.db with only keyTable
+    dbStore = DBStoreBuilder.newBuilder(conf)
+      .setName("om.db")
+      .setPath(newFolder.toPath())
+      .addTable("keyTable")
+      .build();
+    // insert 5 keys
+    for (int i = 0; i<5; i++) {
+      OmKeyInfo value = TestOMRequestUtils.createOmKeyInfo("sampleVol",
+          "sampleBuck", "key" + (i+1), HddsProtos.ReplicationType.STAND_ALONE,
+          HddsProtos.ReplicationFactor.ONE);
+      String key = "key"+ (i);
+      Table<byte[], byte[]> keyTable = dbStore.getTable("keyTable");
+      keyTable.put(key.getBytes(), value.getProtobuf().toByteArray());
+    }
+    rdbParser.setDbPath(dbStore.getDbLocation().getAbsolutePath());
+    dbScanner.setParent(rdbParser);
+    Assert.assertEquals(5, getKeyNames(dbScanner).size());
+    Assert.assertTrue(getKeyNames(dbScanner).contains("key1"));
+    Assert.assertTrue(getKeyNames(dbScanner).contains("key5"));
+    Assert.assertFalse(getKeyNames(dbScanner).contains("key6"));
+    DBScanner.setLimit(1);
+    Assert.assertEquals(1, getKeyNames(dbScanner).size());
+    DBScanner.setLimit(-1);
+    try {
+      getKeyNames(dbScanner);
+      Assert.fail("IllegalArgumentException is expected");
+    }catch (IllegalArgumentException e){
+      //ignore
+    }
+  }
+
+  private static List<String> getKeyNames(DBScanner dbScanner)
+            throws Exception {
+    keyNames.clear();
+    dbScanner.setTableName("keyTable");
+    dbScanner.call();
+    Assert.assertFalse(dbScanner.getScannedObjects().isEmpty());
+    for (Object o : dbScanner.getScannedObjects()){
+      OmKeyInfo keyInfo = (OmKeyInfo)o;
+      keyNames.add(keyInfo.getKeyName());
+    }
+    return keyNames;
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java
deleted file mode 100644
index b1ce4ba..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java
+++ /dev/null
@@ -1,235 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Paths;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.UUID;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.TestDataUtil;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.scm.cli.SQLCLI;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
-import org.junit.After;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-/**
- * This class tests the CLI that transforms om.db into SQLite DB files.
- */
-public class TestOmSQLCli {
-
-  /**
-    * Set a timeout for each test.
-    */
-  @Rule
-  public Timeout timeout = new Timeout(300000);
-  private MiniOzoneCluster cluster = null;
-
-  private OzoneConfiguration conf;
-  private SQLCLI cli;
-
-  private String userName = "userTest";
-  private String adminName = "adminTest";
-  private String volumeName0 = "volumeTest0";
-  private String volumeName1 = "volumeTest1";
-  private String bucketName0 = "bucketTest0";
-  private String bucketName1 = "bucketTest1";
-  private String bucketName2 = "bucketTest2";
-  private String keyName0 = "key0";
-  private String keyName1 = "key1";
-  private String keyName2 = "key2";
-  private String keyName3 = "key3";
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @Before
-  public void setup() throws Exception {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    OzoneBucket bucket0 =
-        TestDataUtil.createVolumeAndBucket(cluster, volumeName0, bucketName0);
-    OzoneBucket bucket1 =
-        TestDataUtil.createVolumeAndBucket(cluster, volumeName1, bucketName1);
-    OzoneBucket bucket2 =
-        TestDataUtil.createVolumeAndBucket(cluster, volumeName0, bucketName2);
-
-    TestDataUtil.createKey(bucket0, keyName0, "");
-    TestDataUtil.createKey(bucket1, keyName1, "");
-    TestDataUtil.createKey(bucket2, keyName2, "");
-    TestDataUtil.createKey(bucket2, keyName3, "");
-
-    cluster.getOzoneManager().stop();
-    cluster.getStorageContainerManager().stop();
-    cli = new SQLCLI(conf);
-  }
-
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  // After HDDS-357, we have to fix SQLCli.
-  // TODO: fix SQLCli
-  @Ignore
-  @Test
-  public void testOmDB() throws Exception {
-    String dbOutPath =  GenericTestUtils.getTempPath(
-        UUID.randomUUID() + "/out_sql.db");
-
-    String dbRootPath = conf.get(HddsConfigKeys.OZONE_METADATA_DIRS);
-    String dbPath = dbRootPath + "/" + OM_DB_NAME;
-    String[] args = {"-p", dbPath, "-o", dbOutPath};
-
-    cli.run(args);
-
-    Connection conn = connectDB(dbOutPath);
-    String sql = "SELECT * FROM volumeList";
-    ResultSet rs = executeQuery(conn, sql);
-    List<String> expectedValues =
-        new ArrayList<>(Arrays.asList(volumeName0, volumeName1));
-    while (rs.next()) {
-      String userNameRs = rs.getString("userName");
-      String volumeNameRs = rs.getString("volumeName");
-      assertEquals(userName,  userNameRs.substring(1));
-      assertTrue(expectedValues.remove(volumeNameRs));
-    }
-    assertEquals(0, expectedValues.size());
-
-    sql = "SELECT * FROM volumeInfo";
-    rs = executeQuery(conn, sql);
-    expectedValues =
-        new ArrayList<>(Arrays.asList(volumeName0, volumeName1));
-    while (rs.next()) {
-      String adName = rs.getString("adminName");
-      String ownerName = rs.getString("ownerName");
-      String volumeName = rs.getString("volumeName");
-      assertEquals(adminName, adName);
-      assertEquals(userName, ownerName);
-      assertTrue(expectedValues.remove(volumeName));
-    }
-    assertEquals(0, expectedValues.size());
-
-    sql = "SELECT * FROM aclInfo";
-    rs = executeQuery(conn, sql);
-    expectedValues =
-        new ArrayList<>(Arrays.asList(volumeName0, volumeName1));
-    while (rs.next()) {
-      String adName = rs.getString("adminName");
-      String ownerName = rs.getString("ownerName");
-      String volumeName = rs.getString("volumeName");
-      String type = rs.getString("type");
-      String uName = rs.getString("userName");
-      String rights = rs.getString("rights");
-      assertEquals(adminName, adName);
-      assertEquals(userName, ownerName);
-      assertEquals("USER", type);
-      assertEquals(userName, uName);
-      assertEquals("READ_WRITE", rights);
-      assertTrue(expectedValues.remove(volumeName));
-    }
-    assertEquals(0, expectedValues.size());
-
-    sql = "SELECT * FROM bucketInfo";
-    rs = executeQuery(conn, sql);
-    HashMap<String, String> expectedMap = new HashMap<>();
-    expectedMap.put(bucketName0, volumeName0);
-    expectedMap.put(bucketName2, volumeName0);
-    expectedMap.put(bucketName1, volumeName1);
-    while (rs.next()) {
-      String volumeName = rs.getString("volumeName");
-      String bucketName = rs.getString("bucketName");
-      boolean versionEnabled = rs.getBoolean("versionEnabled");
-      String storegeType = rs.getString("storageType");
-      assertEquals(volumeName, expectedMap.remove(bucketName));
-      assertFalse(versionEnabled);
-      assertEquals("DISK", storegeType);
-    }
-    assertEquals(0, expectedMap.size());
-
-    sql = "SELECT * FROM keyInfo";
-    rs = executeQuery(conn, sql);
-    HashMap<String, List<String>> expectedMap2 = new HashMap<>();
-    // no data written, data size will be 0
-    expectedMap2.put(keyName0,
-        Arrays.asList(volumeName0, bucketName0, "0"));
-    expectedMap2.put(keyName1,
-        Arrays.asList(volumeName1, bucketName1, "0"));
-    expectedMap2.put(keyName2,
-        Arrays.asList(volumeName0, bucketName2, "0"));
-    expectedMap2.put(keyName3,
-        Arrays.asList(volumeName0, bucketName2, "0"));
-    while (rs.next()) {
-      String volumeName = rs.getString("volumeName");
-      String bucketName = rs.getString("bucketName");
-      String keyName = rs.getString("keyName");
-      int dataSize = rs.getInt("dataSize");
-      List<String> vals = expectedMap2.remove(keyName);
-      assertNotNull(vals);
-      assertEquals(vals.get(0), volumeName);
-      assertEquals(vals.get(1), bucketName);
-      assertEquals(vals.get(2), Integer.toString(dataSize));
-    }
-    assertEquals(0, expectedMap2.size());
-
-    conn.close();
-    Files.delete(Paths.get(dbOutPath));
-  }
-
-  private ResultSet executeQuery(Connection conn, String sql)
-      throws SQLException {
-    Statement stmt = conn.createStatement();
-    return stmt.executeQuery(sql);
-  }
-
-  private Connection connectDB(String dbPath) throws Exception {
-    Class.forName("org.sqlite.JDBC");
-    String connectPath =
-        String.format("jdbc:sqlite:%s", dbPath);
-    return DriverManager.getConnection(connectPath);
-  }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
index 8eea23f..b1139df 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
@@ -61,25 +61,24 @@ public class DBScanner implements Callable<Void>, SubcommandWithParent {
       description = "List Key -> Value instead of just Value.",
       defaultValue = "false",
       showDefaultValue = CommandLine.Help.Visibility.ALWAYS)
-  private boolean withKey;
+  private static boolean withKey;
+
+  @CommandLine.Option(names = {"--length", "-l"},
+          description = "Maximum number of items to list")
+  private static int limit = 100;
 
   @CommandLine.ParentCommand
   private RDBParser parent;
 
   private HashMap<String, DBColumnFamilyDefinition> columnFamilyMap;
 
-  private static void displayTable(RocksDB rocksDB,
-        DBColumnFamilyDefinition dbColumnFamilyDefinition,
-        List<ColumnFamilyHandle> list, boolean withKey) throws IOException {
-    ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle(
-            dbColumnFamilyDefinition.getTableName()
-                    .getBytes(StandardCharsets.UTF_8), list);
-    if (columnFamilyHandle == null) {
-      throw new IllegalArgumentException("columnFamilyHandle is null");
-    }
-    RocksIterator iterator = rocksDB.newIterator(columnFamilyHandle);
+  private List<Object> scannedObjects;
+
+  private static List<Object> displayTable(RocksIterator iterator,
+      DBColumnFamilyDefinition dbColumnFamilyDefinition) throws IOException {
+    List<Object> outputs = new ArrayList<>();
     iterator.seekToFirst();
-    while (iterator.isValid()){
+    while (iterator.isValid() && limit > 0){
       StringBuilder result = new StringBuilder();
       if (withKey) {
         Object key = dbColumnFamilyDefinition.getKeyCodec()
@@ -90,11 +89,34 @@ public class DBScanner implements Callable<Void>, SubcommandWithParent {
       }
       Object o = dbColumnFamilyDefinition.getValueCodec()
               .fromPersistedFormat(iterator.value());
+      outputs.add(o);
       Gson gson = new GsonBuilder().setPrettyPrinting().create();
       result.append(gson.toJson(o));
       System.out.println(result.toString());
+      limit--;
       iterator.next();
     }
+    return outputs;
+  }
+
+  public void setTableName(String tableName) {
+    this.tableName = tableName;
+  }
+
+  public RDBParser getParent() {
+    return parent;
+  }
+
+  public void setParent(RDBParser parent) {
+    this.parent = parent;
+  }
+
+  public static void setLimit(int limit) {
+    DBScanner.limit = limit;
+  }
+
+  public List<Object> getScannedObjects() {
+    return scannedObjects;
   }
 
   private static ColumnFamilyHandle getColumnFamilyHandle(
@@ -150,6 +172,10 @@ public class DBScanner implements Callable<Void>, SubcommandWithParent {
   private void printAppropriateTable(
           List<ColumnFamilyHandle> columnFamilyHandleList,
           RocksDB rocksDB, String dbPath) throws IOException {
+    if (limit < 1) {
+      throw new IllegalArgumentException(
+              "List length should be a positive number");
+    }
     dbPath = removeTrailingSlashIfNeeded(dbPath);
     this.constructColumnFamilyMap(DBDefinitionFactory.
             getDefinition(new File(dbPath).getName()));
@@ -159,8 +185,15 @@ public class DBScanner implements Callable<Void>, SubcommandWithParent {
       } else {
         DBColumnFamilyDefinition columnFamilyDefinition =
                 this.columnFamilyMap.get(tableName);
-        displayTable(rocksDB, columnFamilyDefinition, columnFamilyHandleList,
-            withKey);
+        ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle(
+                columnFamilyDefinition.getTableName()
+                        .getBytes(StandardCharsets.UTF_8),
+                columnFamilyHandleList);
+        if (columnFamilyHandle == null) {
+          throw new IllegalArgumentException("columnFamilyHandle is null");
+        }
+        RocksIterator iterator = rocksDB.newIterator(columnFamilyHandle);
+        scannedObjects = displayTable(iterator, columnFamilyDefinition);
       }
     } else {
       System.out.println("Incorrect db Path");
@@ -179,3 +212,4 @@ public class DBScanner implements Callable<Void>, SubcommandWithParent {
     return RDBParser.class;
   }
 }
+
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java
index e18baaa..f133386 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java
@@ -49,6 +49,10 @@ public class RDBParser implements Callable<Void>, SubcommandWithParent {
     return dbPath;
   }
 
+  public void setDbPath(String dbPath) {
+    this.dbPath = dbPath;
+  }
+
   @Override
   public Class<?> getParentType() {
     return OzoneDebug.class;


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org