You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ra...@apache.org on 2021/04/07 03:25:52 UTC

[ozone] 25/29: HDDS-4790. Add a tool to parse entries in the prefix format (#1891)

This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 188861feb0e4e7c76b30195594c09ec0b4eb7679
Author: Mukul Kumar Singh <ms...@apache.org>
AuthorDate: Mon Mar 15 16:23:51 2021 +0530

    HDDS-4790. Add a tool to parse entries in the prefix format (#1891)
---
 .../hdds/scm/pipeline/SCMPipelineManager.java      |   6 +-
 .../hadoop/ozone/om/helpers/OmDirectoryInfo.java   |   4 +-
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  |  25 +--
 .../ozone/om/helpers/WithParentObjectId.java       |  55 +++++
 .../fs/ozone/TestOzoneFileSystemPrefixParser.java  | 180 ++++++++++++++++
 .../apache/hadoop/ozone/debug/PrefixParser.java    | 233 +++++++++++++++++++++
 6 files changed, 474 insertions(+), 29 deletions(-)

diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
index b22feab..3487b12 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
@@ -710,8 +710,10 @@ public class SCMPipelineManager implements
     pipelineFactory.shutdown();
     lock.writeLock().lock();
     try {
-      pipelineStore.close();
-      pipelineStore = null;
+      if (pipelineStore != null) {
+        pipelineStore.close();
+        pipelineStore = null;
+      }
     } catch (Exception ex) {
       LOG.error("Pipeline  store close failed", ex);
     } finally {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
index 4c82047..3d5d6a5 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
@@ -28,9 +28,7 @@ import java.util.*;
  * in the user given path and a pointer to its parent directory element in the
  * path. Also, it stores directory node related metdata details.
  */
-public class OmDirectoryInfo extends WithObjectID {
-  private long parentObjectID; // pointer to parent directory
-
+public class OmDirectoryInfo extends WithParentObjectId {
   private String name; // directory name
 
   private long creationTime;
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index be68d9b..dd67cc1 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -42,7 +42,7 @@ import com.google.common.base.Preconditions;
  * This is returned from OM to client, and client use class to talk to
  * datanode. Also, this is the metadata written to om.db on server side.
  */
-public final class OmKeyInfo extends WithObjectID {
+public final class OmKeyInfo extends WithParentObjectId {
   private final String volumeName;
   private final String bucketName;
   // name of key client specified
@@ -56,29 +56,6 @@ public final class OmKeyInfo extends WithObjectID {
   private FileEncryptionInfo encInfo;
 
   /**
-   * A pointer to parent directory used for path traversal. ParentID will be
-   * used only when the key is created into a FileSystemOptimized(FSO) bucket.
-   * <p>
-   * For example, if a key "a/b/key1" created into a FSOBucket then each
-   * path component will be assigned an ObjectId and linked to its parent path
-   * component using parent's objectID.
-   * <p>
-   * Say, Bucket's ObjectID = 512, which is the parent for its immediate child
-   * element.
-   * <p>
-   * ------------------------------------------|
-   * PathComponent |   ObjectID   |   ParentID |
-   * ------------------------------------------|
-   *      a        |     1024     |     512    |
-   * ------------------------------------------|
-   *      b        |     1025     |     1024   |
-   * ------------------------------------------|
-   *     key1      |     1026     |     1025   |
-   * ------------------------------------------|
-   */
-  private long parentObjectID;
-
-  /**
    * Represents leaf node name. This also will be used when the keyName is
    * created on a FileSystemOptimized(FSO) bucket. For example, the user given
    * keyName is "a/b/key1" then the fileName stores "key1".
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java
new file mode 100644
index 0000000..79a135a
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.helpers;
+
+/**
+ * Object ID with additional parent ID field.
+ */
+public class WithParentObjectId extends WithObjectID {
+  /**
+   * Object ID with additional parent ID field.
+   *
+   * A pointer to parent directory used for path traversal. ParentID will be
+   * used only when the key is created into a FileSystemOptimized(FSO) bucket.
+   * <p>
+   * For example, if a key "a/b/key1" created into a FSOBucket then each
+   * path component will be assigned an ObjectId and linked to its parent path
+   * component using parent's objectID.
+   * <p>
+   * Say, Bucket's ObjectID = 512, which is the parent for its immediate child
+   * element.
+   * <p>
+   * ------------------------------------------|
+   * PathComponent |   ObjectID   |   ParentID |
+   * ------------------------------------------|
+   *      a        |     1024     |     512    |
+   * ------------------------------------------|
+   *      b        |     1025     |     1024   |
+   * ------------------------------------------|
+   *     key1      |     1026     |     1025   |
+   * ------------------------------------------|
+   */
+  @SuppressWarnings("visibilitymodifier")
+  protected long parentObjectID;
+
+  public long getParentObjectID() {
+    return parentObjectID;
+  }
+
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java
new file mode 100644
index 0000000..3f18fae
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.debug.PrefixParser;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMStorage;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.net.URI;
+
+/**
+ * Test Ozone Prefix Parser.
+ */
+public class TestOzoneFileSystemPrefixParser {
+
+  private MiniOzoneCluster cluster = null;
+
+  private FileSystem fs;
+
+  private String volumeName;
+
+  private String bucketName;
+
+  private OzoneConfiguration configuration;
+
+  @Before
+  public void init() throws Exception {
+    volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
+    bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
+
+    configuration = new OzoneConfiguration();
+
+    TestOMRequestUtils.configureFSOptimizedPaths(configuration,
+        true, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
+
+    cluster = MiniOzoneCluster.newBuilder(configuration)
+        .setNumDatanodes(3)
+        .build();
+    cluster.waitForClusterToBeReady();
+
+    // create a volume and a bucket to be used by OzoneFileSystem
+    TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName);
+
+    String rootPath = String
+        .format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucketName,
+            volumeName);
+    fs = FileSystem.get(new URI(rootPath + "/test.txt"), configuration);
+  }
+
+  @After
+  public void teardown() throws IOException {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+    IOUtils.closeQuietly(fs);
+  }
+
+  @Test
+  public void testPrefixParseDir() throws Exception {
+    Path dir = new Path("/a/b/c/d/e");
+    fs.mkdirs(dir);
+    Path file = new Path("/a/b/c/file1");
+    FSDataOutputStream os = fs.create(file);
+    os.close();
+
+    cluster.stop();
+    PrefixParser parser = new PrefixParser();
+
+    parser.parse(volumeName, bucketName,
+        OMStorage.getOmDbDir(configuration).getPath(),
+        dir.getParent().getParent().toString());
+
+    assertPrefixStats(parser, 1, 1, 3, 0, 1, 1);
+  }
+
+  @Test
+  public void testPrefixParseFile() throws Exception {
+    Path dir = new Path("/a/b/c/d/e");
+    fs.mkdirs(dir);
+    Path file = new Path("/a/b/file1");
+    FSDataOutputStream os = fs.create(file);
+    os.close();
+
+    cluster.stop();
+    PrefixParser parser = new PrefixParser();
+
+    parser.parse(volumeName, bucketName,
+        OMStorage.getOmDbDir(configuration).getPath(),
+        file.toString());
+
+    assertPrefixStats(parser, 1, 1, 2, 1, 1, 1);
+  }
+
+  private void assertPrefixStats(PrefixParser parser, int volumeCount,
+      int bucketCount, int intermediateDirCount, int nonExistentDirCount,
+      int fileCount, int dirCount) {
+    Assert.assertEquals(volumeCount,
+        parser.getParserStats(PrefixParser.Types.VOLUME));
+    Assert.assertEquals(bucketCount,
+        parser.getParserStats(PrefixParser.Types.BUCKET));
+    Assert.assertEquals(intermediateDirCount,
+        parser.getParserStats(PrefixParser.Types.INTERMEDIATE_DIRECTORY));
+    Assert.assertEquals(nonExistentDirCount,
+        parser.getParserStats(PrefixParser.Types.NON_EXISTENT_DIRECTORY));
+    Assert.assertEquals(fileCount,
+        parser.getParserStats(PrefixParser.Types.FILE));
+    Assert.assertEquals(dirCount,
+        parser.getParserStats(PrefixParser.Types.DIRECTORY));
+  }
+
+  @Test
+  public void testPrefixParseWithInvalidPaths() throws Exception {
+    Path dir = new Path("/a/b/c/d/e");
+    fs.mkdirs(dir);
+    Path file = new Path("/a/b/file1");
+    FSDataOutputStream os = fs.create(file);
+    os.close();
+
+    cluster.stop();
+
+    PrefixParser invalidVolumeParser = new PrefixParser();
+    String invalidVolumeName =
+        RandomStringUtils.randomAlphabetic(10).toLowerCase();
+    invalidVolumeParser.parse(invalidVolumeName, bucketName,
+        OMStorage.getOmDbDir(configuration).getPath(),
+        file.toString());
+    assertPrefixStats(invalidVolumeParser, 0, 0, 0, 0, 0, 0);
+
+    PrefixParser invalidBucketParser = new PrefixParser();
+    String invalidBucketName =
+        RandomStringUtils.randomAlphabetic(10).toLowerCase();
+    invalidBucketParser.parse(volumeName, invalidBucketName,
+        OMStorage.getOmDbDir(configuration).getPath(),
+        file.toString());
+    assertPrefixStats(invalidBucketParser, 1, 0, 0, 0, 0, 0);
+
+
+    Path invalidIntermediateDir = new Path(file.getParent(), "xyz");
+    PrefixParser invalidIntermediateDirParser = new PrefixParser();
+    invalidIntermediateDirParser.parse(volumeName, bucketName,
+        OMStorage.getOmDbDir(configuration).getPath(),
+        invalidIntermediateDir.toString());
+
+    assertPrefixStats(invalidIntermediateDirParser, 1, 1, 2, 1, 1, 1);
+
+  }
+
+
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java
new file mode 100644
index 0000000..4c257dc
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.debug;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+import java.nio.file.Path;
+import org.apache.hadoop.hdds.cli.SubcommandWithParent;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.*;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.kohsuke.MetaInfServices;
+import picocli.CommandLine;
+import picocli.CommandLine.Model.CommandSpec;
+import picocli.CommandLine.Spec;
+
+/**
+ * Tool that parses OM db file for prefix table.
+ */
+@CommandLine.Command(
+    name = "prefix",
+    description = "Parse prefix contents")
+@MetaInfServices(SubcommandWithParent.class)
+public class PrefixParser implements Callable<Void>, SubcommandWithParent {
+
+  public enum Types {
+    VOLUME,
+    BUCKET,
+    FILE,
+    DIRECTORY,
+    INTERMEDIATE_DIRECTORY,
+    NON_EXISTENT_DIRECTORY,
+  }
+
+  private final int[] parserStats = new int[Types.values().length];
+
+  @Spec
+  private CommandSpec spec;
+
+  @CommandLine.Option(names = {"--db"},
+      required = true,
+      description = "Database File Path")
+  private String dbPath;
+
+  @CommandLine.Option(names = {"--path"},
+      required = true,
+      description = "prefixFile Path")
+  private String filePath;
+
+  @CommandLine.Option(names = {"--bucket"},
+      required = true,
+      description = "bucket name")
+  private String bucket;
+
+  @CommandLine.Option(names = {"--volume"},
+      required = true,
+      description = "volume name")
+  private String volume;
+
+  public String getDbPath() {
+    return dbPath;
+  }
+
+  public void setDbPath(String dbPath) {
+    this.dbPath = dbPath;
+  }
+
+  @Override
+  public Class<?> getParentType() {
+    return OzoneDebug.class;
+  }
+
+  @Override
+  public Void call() throws Exception {
+    parse(volume, bucket, dbPath, filePath);
+    return null;
+  }
+
+  public static void main(String[] args) throws Exception {
+    new PrefixParser().call();
+  }
+
+  public void parse(String vol, String buck, String db,
+                    String file) throws Exception {
+    if (!Files.exists(Paths.get(db))) {
+      System.out.println("DB path not exist:" + db);
+      return;
+    }
+
+    System.out.println("FilePath is:" + file);
+    System.out.println("Db Path is:" + db);
+
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(OMConfigKeys.OZONE_OM_DB_DIRS, db);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+
+    OmMetadataManagerImpl metadataManager =
+        new OmMetadataManagerImpl(conf);
+    metadataManager.start(conf);
+
+    org.apache.hadoop.fs.Path effectivePath =
+        new org.apache.hadoop.fs.Path("/");
+
+    Path p = Paths.get(file);
+
+    String volumeKey = metadataManager.getVolumeKey(vol);
+    if (!metadataManager.getVolumeTable().isExist(volumeKey)) {
+      System.out.println("Invalid Volume:" + vol);
+      metadataManager.stop();
+      return;
+    }
+
+    parserStats[Types.VOLUME.ordinal()]++;
+    // First get the info about the bucket
+    String bucketKey = metadataManager.getBucketKey(vol, buck);
+    OmBucketInfo info = metadataManager.getBucketTable().get(bucketKey);
+    if (info == null) {
+      System.out.println("Invalid Bucket:" + buck);
+      metadataManager.stop();
+      return;
+    }
+
+    long lastObjectId = info.getObjectID();
+    WithParentObjectId objectBucketId = new WithParentObjectId();
+    objectBucketId.setObjectID(lastObjectId);
+    dumpInfo(Types.BUCKET, effectivePath, objectBucketId, bucketKey);
+
+    Iterator<Path> pathIterator =  p.iterator();
+    while(pathIterator.hasNext()) {
+      Path elem = pathIterator.next();
+      String path =
+          metadataManager.getOzonePathKey(lastObjectId, elem.toString());
+      OmDirectoryInfo directoryInfo =
+          metadataManager.getDirectoryTable().get(path);
+
+      org.apache.hadoop.fs.Path tmpPath =
+          getEffectivePath(effectivePath, elem.toString());
+      if (directoryInfo == null) {
+        System.out.println("Given path contains a non-existent directory at:" +
+            tmpPath);
+        System.out.println("Dumping files and dirs at level:" +
+            tmpPath.getParent());
+        System.out.println();
+        parserStats[Types.NON_EXISTENT_DIRECTORY.ordinal()]++;
+        break;
+      }
+
+      effectivePath = tmpPath;
+
+      dumpInfo(Types.INTERMEDIATE_DIRECTORY, effectivePath,
+          directoryInfo, path);
+      lastObjectId = directoryInfo.getObjectID();
+    }
+
+    // at the last level, now parse both file and dir table
+    dumpTableInfo(Types.DIRECTORY, effectivePath,
+        metadataManager.getDirectoryTable(), lastObjectId);
+
+    dumpTableInfo(Types.FILE, effectivePath,
+        metadataManager.getKeyTable(), lastObjectId);
+    metadataManager.stop();
+  }
+
+  private void dumpTableInfo(Types type,
+      org.apache.hadoop.fs.Path effectivePath,
+      Table<String, ? extends WithParentObjectId> table, long lastObjectId)
+      throws IOException {
+    MetadataKeyFilters.KeyPrefixFilter filter = getPrefixFilter(lastObjectId);
+
+    List<? extends KeyValue
+        <String, ? extends WithParentObjectId>> infoList =
+        table.getRangeKVs(null, 1000, filter);
+
+    for (KeyValue<String, ? extends WithParentObjectId> info :infoList) {
+      Path key = Paths.get(info.getKey());
+      dumpInfo(type, getEffectivePath(effectivePath,
+          key.getName(1).toString()), info.getValue(), info.getKey());
+    }
+  }
+
+  private org.apache.hadoop.fs.Path getEffectivePath(
+      org.apache.hadoop.fs.Path currentPath, String name) {
+    return new org.apache.hadoop.fs.Path(currentPath, name);
+  }
+
+  private void dumpInfo(Types level, org.apache.hadoop.fs.Path effectivePath,
+                        WithParentObjectId id,  String key) {
+    parserStats[level.ordinal()]++;
+    System.out.println("Type:" + level);
+    System.out.println("Path: " + effectivePath);
+    System.out.println("DB Path: " + key);
+    System.out.println("Object Id: " + id.getObjectID());
+    System.out.println("Parent object Id: " + id.getParentObjectID());
+    System.out.println();
+
+  }
+
+  private static MetadataKeyFilters.KeyPrefixFilter getPrefixFilter(long id) {
+    return (new MetadataKeyFilters.KeyPrefixFilter())
+        .addFilter(Long.toString(id));
+  }
+
+  public int getParserStats(Types type) {
+    return parserStats[type.ordinal()];
+  }
+}

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org