You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by na...@apache.org on 2022/11/17 09:27:47 UTC

[ozone] branch HDDS-6517-Snapshot updated: HDDS-7410. Ozone snapshot diff skeleton code. (#3885)

This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch HDDS-6517-Snapshot
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/HDDS-6517-Snapshot by this push:
     new 83072e0f8d HDDS-7410. Ozone snapshot diff skeleton code. (#3885)
83072e0f8d is described below

commit 83072e0f8df6ee45b1999b028888d3edfe1c9f0d
Author: Nandakumar <na...@apache.org>
AuthorDate: Thu Nov 17 14:57:40 2022 +0530

    HDDS-7410. Ozone snapshot diff skeleton code. (#3885)
---
 hadoop-hdds/rocksdb-checkpoint-differ/pom.xml      |   2 +
 .../ozone/rocksdb/util/ManagedSstFileReader.java   | 128 +++++++++++
 .../org/apache/ozone/rocksdb/util/RdbUtil.java     |  62 ++++++
 .../apache/ozone/rocksdb/util/package-info.java    |  22 ++
 hadoop-ozone/ozone-manager/pom.xml                 |   4 +
 .../org/apache/hadoop/ozone/om/OmSnapshot.java     |   4 +
 .../apache/hadoop/ozone/om/OmSnapshotManager.java  |  43 ++++
 .../ozone/om/snapshot/SnapshotDiffManager.java     | 235 +++++++++++++++++++++
 .../ozone/om/snapshot/SnapshotDiffReport.java      | 168 +++++++++++++++
 hadoop-ozone/pom.xml                               |   5 +
 10 files changed, 673 insertions(+)

diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
index 568a6cc6f8..0c03876df0 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
@@ -165,6 +165,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
                     <allowedImport>org.rocksdb.RocksDBException</allowedImport>
                     <allowedImport>org.rocksdb.SstFileReader</allowedImport>
                     <allowedImport>org.rocksdb.TableProperties</allowedImport>
+                    <allowedImport>org.rocksdb.ReadOptions</allowedImport>
+                    <allowedImport>org.rocksdb.SstFileReaderIterator</allowedImport>
                   </allowedImports>
                   <exclusion>org.apache.hadoop.hdds.utils.db.managed.*</exclusion>
                 </RestrictImports>
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/ManagedSstFileReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/ManagedSstFileReader.java
new file mode 100644
index 0000000000..cf8e59331d
--- /dev/null
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/ManagedSstFileReader.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ozone.rocksdb.util;
+
+import org.rocksdb.Options;
+import org.rocksdb.ReadOptions;
+import org.rocksdb.RocksDBException;
+import org.rocksdb.SstFileReader;
+import org.rocksdb.SstFileReaderIterator;
+
+import java.io.Closeable;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.stream.Stream;
+import java.util.stream.StreamSupport;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+/**
+ * ManagedSstFileReader provides an abstraction layer using which we can
+ * iterate over multiple underlying SST files transparently.
+ */
+public class ManagedSstFileReader {
+
+  private final Collection<String> sstFiles;
+
+  public ManagedSstFileReader(final Collection<String> sstFiles) {
+    this.sstFiles = sstFiles;
+  }
+  public Stream<String> getKeyStream() throws RocksDBException {
+    final ManagedSstFileIterator itr = new ManagedSstFileIterator(sstFiles);
+    final Spliterator<String> spliterator = Spliterators
+        .spliteratorUnknownSize(itr, 0);
+    return StreamSupport.stream(spliterator, false).onClose(itr::close);
+  }
+
+  private static final class ManagedSstFileIterator implements
+      Iterator<String>, Closeable {
+
+    private final Iterator<String> fileNameIterator;
+    private final Options options;
+    private final ReadOptions readOptions;
+    private String currentFile;
+    private SstFileReader currentFileReader;
+    private SstFileReaderIterator currentFileIterator;
+
+    private ManagedSstFileIterator(Collection<String> files)
+        throws RocksDBException {
+      // TODO: Check if default Options and ReadOptions is enough.
+      this.options = new Options();
+      this.readOptions = new ReadOptions();
+      this.fileNameIterator = files.iterator();
+      moveToNextFile();
+    }
+
+    @Override
+    public boolean hasNext() {
+      try {
+        do {
+          if (currentFileIterator.isValid()) {
+            return true;
+          }
+        } while (moveToNextFile());
+      } catch (RocksDBException e) {
+        // TODO: This exception has to be handled by the caller.
+        //  We have to do better exception handling.
+        throw new RuntimeException(e);
+      }
+      return false;
+    }
+
+    @Override
+    public String next() {
+      if (hasNext()) {
+        final String value = new String(currentFileIterator.key(), UTF_8);
+        currentFileIterator.next();
+        return value;
+      }
+      throw new NoSuchElementException("No more keys");
+    }
+
+    @Override
+    public void close() {
+      closeCurrentFile();
+    }
+
+    private boolean moveToNextFile() throws RocksDBException {
+      if (fileNameIterator.hasNext()) {
+        closeCurrentFile();
+        currentFile = fileNameIterator.next();
+        currentFileReader = new SstFileReader(options);
+        currentFileReader.open(currentFile);
+        currentFileIterator = currentFileReader.newIterator(readOptions);
+        currentFileIterator.seekToFirst();
+        return true;
+      }
+      return false;
+    }
+
+    private void closeCurrentFile() {
+      if (currentFile != null) {
+        currentFileIterator.close();
+        currentFileReader.close();
+        currentFile = null;
+      }
+    }
+  }
+
+}
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java
new file mode 100644
index 0000000000..15757df7b5
--- /dev/null
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ozone.rocksdb.util;
+
+import org.rocksdb.ColumnFamilyDescriptor;
+import org.rocksdb.ColumnFamilyHandle;
+import org.rocksdb.DBOptions;
+import org.rocksdb.RocksDB;
+import org.rocksdb.RocksDBException;
+
+import java.io.File;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * Temporary class to test snapshot diff functionality.
+ * This should be removed later.
+ */
+public final class RdbUtil {
+
+  private RdbUtil() { }
+
+  public static Set<String> getKeyTableSSTFiles(final String dbLocation)
+      throws RocksDBException {
+    final List<ColumnFamilyHandle> columnFamilyHandles  = new ArrayList<>();
+    final List<ColumnFamilyDescriptor> cfd = new ArrayList<>();
+    cfd.add(
+        new ColumnFamilyDescriptor(
+            "keyTable".getBytes(StandardCharsets.UTF_8)));
+    cfd.add(
+        new ColumnFamilyDescriptor(
+            "default".getBytes(StandardCharsets.UTF_8)));
+    try (DBOptions options = new DBOptions();
+         RocksDB rocksDB = RocksDB.openReadOnly(options, dbLocation,
+             cfd, columnFamilyHandles)) {
+      return rocksDB.getLiveFilesMetaData().stream().map(lfm ->
+              new File(lfm.path(), lfm.fileName()).getPath())
+          .collect(Collectors.toCollection(HashSet::new));
+    }
+  }
+
+}
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/package-info.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/package-info.java
new file mode 100644
index 0000000000..9291ecda6b
--- /dev/null
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package contains utility classes related to rocksdb.
+ */
+package org.apache.ozone.rocksdb.util;
diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml
index 6d72aa6a7e..8c274912eb 100644
--- a/hadoop-ozone/ozone-manager/pom.xml
+++ b/hadoop-ozone/ozone-manager/pom.xml
@@ -82,6 +82,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <groupId>org.apache.ozone</groupId>
       <artifactId>hdds-hadoop-dependency-server</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.apache.ozone</groupId>
+      <artifactId>rocksdb-checkpoint-differ</artifactId>
+    </dependency>
 
     <dependency>
       <groupId>org.bouncycastle</groupId>
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java
index fa708e6a09..4c0d14f41a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java
@@ -230,6 +230,10 @@ public class OmSnapshot implements IOmMetadataReader, Closeable {
       .build();
   }
 
+  public String getName() {
+    return snapshotName;
+  }
+
   @Override
   public void close() throws IOException {
     omMetadataManager.getStore().close();
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
index 3e0057c981..9691fb366d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
@@ -27,11 +27,15 @@ import org.apache.hadoop.hdds.utils.db.RDBStore;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus;
 
 import java.io.IOException;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.snapshot.SnapshotDiffManager;
+import org.apache.hadoop.ozone.om.snapshot.SnapshotDiffReport;
 import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer;
+import org.rocksdb.RocksDBException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -49,6 +53,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_
  */
 public final class OmSnapshotManager {
   private final OzoneManager ozoneManager;
+  private final SnapshotDiffManager snapshotDiffManager;
   private final LoadingCache<String, OmSnapshot> snapshotCache;
 
   private static final Logger LOG =
@@ -57,6 +62,8 @@ public final class OmSnapshotManager {
   OmSnapshotManager(OzoneManager ozoneManager) {
     this.ozoneManager = ozoneManager;
 
+    this.snapshotDiffManager = new SnapshotDiffManager();
+
     // size of lru cache
     int cacheSize = ozoneManager.getConfiguration().getInt(
         OzoneConfigKeys.OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE,
@@ -217,4 +224,40 @@ public final class OmSnapshotManager {
     return (keyParts.length > 1) &&
         (keyParts[0].compareTo(OM_SNAPSHOT_INDICATOR) == 0);
   }
+
+  public SnapshotDiffReport getSnapshotDiffReport(final String volume,
+                                                  final String bucket,
+                                                  final String fromSnapshot,
+                                                  final String toSnapshot)
+      throws IOException {
+    // Validate fromSnapshot and toSnapshot
+    final SnapshotInfo fsInfo = getSnapshotInfo(volume, bucket, fromSnapshot);
+    final SnapshotInfo tsInfo = getSnapshotInfo(volume, bucket, toSnapshot);
+    verifySnapshotInfoForSnapDiff(fsInfo, tsInfo);
+
+    final String fsKey = SnapshotInfo.getTableKey(volume, bucket, fromSnapshot);
+    final String tsKey = SnapshotInfo.getTableKey(volume, bucket, toSnapshot);
+    try {
+      final OmSnapshot fs = snapshotCache.get(fsKey);
+      final OmSnapshot ts = snapshotCache.get(tsKey);
+      return snapshotDiffManager.getSnapshotDiffReport(volume, bucket, fs, ts);
+    } catch (ExecutionException | RocksDBException e) {
+      throw new IOException(e.getCause());
+    }
+  }
+
+  private void verifySnapshotInfoForSnapDiff(final SnapshotInfo fromSnapshot,
+                                             final SnapshotInfo toSnapshot)
+      throws IOException {
+    if ((fromSnapshot.getSnapshotStatus() != SnapshotStatus.SNAPSHOT_ACTIVE) ||
+        (toSnapshot.getSnapshotStatus() != SnapshotStatus.SNAPSHOT_ACTIVE)) {
+      // TODO: throw custom snapshot exception.
+      throw new IOException("Cannot generate snapshot diff for non-active " +
+          "snapshots.");
+    }
+    if (fromSnapshot.getCreationTime() > toSnapshot.getCreationTime()) {
+      throw new IOException("fromSnapshot:" + fromSnapshot.getName() +
+          " should be older than to toSnapshot:" + toSnapshot.getName());
+    }
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
new file mode 100644
index 0000000000..b0edc18745
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
@@ -0,0 +1,235 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.snapshot;
+
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmSnapshot;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.snapshot.SnapshotDiffReport.DiffType;
+import org.apache.hadoop.ozone.om.snapshot.SnapshotDiffReport.DiffReportEntry;
+
+import org.apache.ozone.rocksdb.util.ManagedSstFileReader;
+import org.apache.ozone.rocksdb.util.RdbUtil;
+import org.rocksdb.RocksDBException;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Stream;
+
+/**
+ * Class to generate snapshot diff.
+ */
+public class SnapshotDiffManager {
+
+  public SnapshotDiffReport getSnapshotDiffReport(final String volume,
+                                                  final String bucket,
+                                                  final OmSnapshot fromSnapshot,
+                                                  final OmSnapshot toSnapshot)
+      throws IOException, RocksDBException {
+
+    // TODO: Once RocksDBCheckpointDiffer exposes method to get list
+    //  of delta SST files, plug it in here.
+
+    Set<String> fromSnapshotFiles = RdbUtil.getKeyTableSSTFiles(fromSnapshot
+        .getMetadataManager().getStore().getDbLocation().getPath());
+    Set<String> toSnapshotFiles = RdbUtil.getKeyTableSSTFiles(toSnapshot
+        .getMetadataManager().getStore().getDbLocation().getPath());
+
+    final Set<String> deltaFiles = new HashSet<>();
+    deltaFiles.addAll(fromSnapshotFiles);
+    deltaFiles.addAll(toSnapshotFiles);
+
+    // TODO: Filter out the files.
+
+    final Stream<String> keysToCheck = new ManagedSstFileReader(deltaFiles)
+        .getKeyStream();
+
+    final BucketLayout bucketLayout = getBucketLayout(volume, bucket,
+        fromSnapshot.getMetadataManager());
+
+    final Table<String, OmKeyInfo> fsKeyTable = fromSnapshot
+        .getMetadataManager().getKeyTable(bucketLayout);
+    final Table<String, OmKeyInfo> tsKeyTable = toSnapshot
+        .getMetadataManager().getKeyTable(bucketLayout);
+
+    /*
+     * The reason for having ObjectID to KeyName mapping instead of OmKeyInfo
+     * is to reduce the memory footprint.
+     */
+    final Map<Long, String> oldObjIdToKeyMap = new HashMap<>();
+    // Long --> const. length
+    // String --> var. length "/dir1/dir2/dir3/dir4/dir5/key1"
+    final Map<Long, String> newObjIdToKeyMap = new HashMap<>();
+
+    final Set<Long> objectIDsToCheck = new HashSet<>();
+
+    keysToCheck.forEach(key -> {
+      try {
+        final OmKeyInfo oldKey = fsKeyTable.get(key);
+        final OmKeyInfo newKey = tsKeyTable.get(key);
+        if (areKeysEqual(oldKey, newKey)) {
+          // We don't have to do anything.
+          return;
+        }
+        if (oldKey != null) {
+          final long oldObjId = oldKey.getObjectID();
+          oldObjIdToKeyMap.put(oldObjId, oldKey.getKeyName());
+          objectIDsToCheck.add(oldObjId);
+        }
+        if (newKey != null) {
+          final long newObjId = newKey.getObjectID();
+          newObjIdToKeyMap.put(newObjId, newKey.getKeyName());
+          objectIDsToCheck.add(newObjId);
+        }
+      } catch (IOException e) {
+        throw new RuntimeException(e);
+      }
+    });
+    keysToCheck.close();
+
+    return new SnapshotDiffReport(volume, bucket, fromSnapshot.getName(),
+        toSnapshot.getName(), generateDiffReport(objectIDsToCheck,
+        oldObjIdToKeyMap, newObjIdToKeyMap));
+  }
+
+  private List<DiffReportEntry> generateDiffReport(
+      final Set<Long> objectIDsToCheck,
+      final Map<Long, String> oldObjIdToKeyMap,
+      final Map<Long, String> newObjIdToKeyMap) {
+
+    final List<DiffReportEntry> deleteDiffs = new ArrayList<>();
+    final List<DiffReportEntry> renameDiffs = new ArrayList<>();
+    final List<DiffReportEntry> createDiffs = new ArrayList<>();
+    final List<DiffReportEntry> modifyDiffs = new ArrayList<>();
+
+
+    for (Long id : objectIDsToCheck) {
+      /*
+       * This key can be
+       * -> Created after the old snapshot was taken, which means it will be
+       *    missing in oldKeyTable and present in newKeyTable.
+       * -> Deleted after the old snapshot was taken, which means it will be
+       *    present in oldKeyTable and missing in newKeyTable.
+       * -> Modified after the old snapshot was taken, which means it will be
+       *    present in oldKeyTable and present in newKeyTable with same
+       *    Object ID but with different metadata.
+       * -> Renamed after the old snapshot was taken, which means it will be
+       *    present in oldKeyTable and present in newKeyTable but with different
+       *    name and same Object ID.
+       */
+
+      final String oldKeyName = oldObjIdToKeyMap.get(id);
+      final String newKeyName = newObjIdToKeyMap.get(id);
+
+      if (oldKeyName == null && newKeyName == null) {
+        // This cannot happen.
+        continue;
+      }
+
+      // Key Created.
+      if (oldKeyName == null) {
+        createDiffs.add(DiffReportEntry.of(DiffType.CREATE, newKeyName));
+        continue;
+      }
+
+      // Key Deleted.
+      if (newKeyName == null) {
+        deleteDiffs.add(DiffReportEntry.of(DiffType.DELETE, oldKeyName));
+        continue;
+      }
+
+      // Key modified.
+      if (oldKeyName.equals(newKeyName)) {
+        modifyDiffs.add(DiffReportEntry.of(DiffType.MODIFY, newKeyName));
+        continue;
+      }
+
+      // Key Renamed.
+      renameDiffs.add(DiffReportEntry.of(DiffType.RENAME,
+          oldKeyName, newKeyName));
+    }
+    /*
+     * The order in which snap-diff should be applied
+     *
+     *     1. Delete diffs
+     *     2. Rename diffs
+     *     3. Create diffs
+     *     4. Modified diffs
+     *
+     * Consider the following scenario
+     *
+     *    1. File "A" is created.
+     *    2. File "B" is created.
+     *    3. File "C" is created.
+     *    Snapshot "1" is taken.
+     *
+     * Case 1:
+     *   1. File "A" is deleted.
+     *   2. File "B" is renamed to "A".
+     *   Snapshot "2" is taken.
+     *
+     *   Snapshot diff should be applied in the following order:
+     *    1. Delete "A"
+     *    2. Rename "B" to "A"
+     *
+     *
+     * Case 2:
+     *    1. File "B" is renamed to "C".
+     *    2. File "B" is created.
+     *    Snapshot "2" is taken.
+     *
+     *   Snapshot diff should be applied in the following order:
+     *    1. Rename "B" to "C"
+     *    2. Create "B"
+     *
+     */
+
+    final List<DiffReportEntry> snapshotDiffs = new ArrayList<>();
+    snapshotDiffs.addAll(deleteDiffs);
+    snapshotDiffs.addAll(renameDiffs);
+    snapshotDiffs.addAll(createDiffs);
+    snapshotDiffs.addAll(modifyDiffs);
+    return snapshotDiffs;
+  }
+
+  private BucketLayout getBucketLayout(final String volume,
+                                       final String bucket,
+                                       final OMMetadataManager mManager)
+      throws IOException {
+    final String bucketTableKey = mManager.getBucketKey(volume, bucket);
+    return mManager.getBucketTable().get(bucketTableKey).getBucketLayout();
+  }
+
+  private boolean areKeysEqual(OmKeyInfo oldKey, OmKeyInfo newKey) {
+    if (oldKey == null && newKey == null) {
+      return true;
+    }
+    if (oldKey != null) {
+      return oldKey.equals(newKey);
+    }
+    return false;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffReport.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffReport.java
new file mode 100644
index 0000000000..107512c509
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffReport.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.snapshot;
+
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Snapshot diff report.
+ */
+public class SnapshotDiffReport {
+
+  private static final String LINE_SEPARATOR = System.getProperty(
+      "line.separator", "\n");
+
+  /**
+   * Types of the difference, which include CREATE, MODIFY, DELETE, and RENAME.
+   * Each type has a label for representation:
+   * +  CREATE
+   * M  MODIFY
+   * -  DELETE
+   * R  RENAME
+   */
+  public enum DiffType {
+    CREATE("+"),
+    MODIFY("M"),
+    DELETE("-"),
+    RENAME("R");
+
+    private final String label;
+
+    DiffType(String label) {
+      this.label = label;
+    }
+
+    public String getLabel() {
+      return label;
+    }
+  }
+
+  /**
+   * Snapshot diff report entry.
+   */
+  public static final class DiffReportEntry {
+
+    /**
+     * The type of diff.
+     */
+    private final DiffType type;
+
+    /**
+     * Source File/Object path.
+     */
+    private final String sourcePath;
+
+    /**
+     * Destination File/Object path, if this is a re-name operation.
+     */
+    private final String targetPath;
+
+    private DiffReportEntry(final DiffType type, final String sourcePath,
+                            final String targetPath) {
+      this.type = type;
+      this.sourcePath = sourcePath;
+      this.targetPath = targetPath;
+    }
+
+    public static DiffReportEntry of(final DiffType type,
+                                     final String sourcePath) {
+      return of(type, sourcePath, null);
+    }
+
+    public static DiffReportEntry of(final DiffType type,
+                                     final String sourcePath,
+                                     final String targetPath) {
+      return new DiffReportEntry(type, sourcePath, targetPath);
+
+    }
+
+    @Override
+    public String toString() {
+      String str = type.getLabel() + "\t" + sourcePath;
+      if (type == DiffType.RENAME) {
+        str += " -> " + targetPath;
+      }
+      return str;
+    }
+
+    public DiffType getType() {
+      return type;
+    }
+
+    @Override
+    public boolean equals(Object other) {
+      if (this == other) {
+        return true;
+      }
+      if (other instanceof DiffReportEntry) {
+        DiffReportEntry entry = (DiffReportEntry) other;
+        return type.equals(entry.getType())
+            && sourcePath.equals(entry.sourcePath)
+            && targetPath.equals(entry.targetPath);
+      }
+      return false;
+    }
+
+    @Override
+    public int hashCode() {
+      return toString().hashCode();
+    }
+
+  }
+
+
+  /**
+   * Volume name to which the snapshot bucket belongs.
+   */
+  private final String volumeName;
+
+  /**
+   * Bucket name to which the snapshot belongs.
+   */
+  private final String bucketName;
+  /**
+   * start point of the diff.
+   */
+  private final String fromSnapshot;
+
+  /**
+   * end point of the diff.
+   */
+  private final String toSnapshot;
+
+  /**
+   * list of diff.
+   */
+  private final List<DiffReportEntry> diffList;
+
+  public SnapshotDiffReport(final String volumeName, final String bucketName,
+                            final String fromSnapshot, final String toSnapshot,
+                            List<DiffReportEntry> entryList) {
+    this.volumeName = volumeName;
+    this.bucketName = bucketName;
+    this.fromSnapshot = fromSnapshot;
+    this.toSnapshot = toSnapshot;
+    this.diffList = entryList != null ? entryList : Collections.emptyList();
+  }
+
+  public List<DiffReportEntry> getDiffList() {
+    return diffList;
+  }
+}
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index c33b804ae0..3811718f89 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -158,6 +158,11 @@
         <artifactId>hdds-hadoop-dependency-server</artifactId>
         <version>${hdds.version}</version>
       </dependency>
+      <dependency>
+        <groupId>org.apache.ozone</groupId>
+        <artifactId>rocksdb-checkpoint-differ</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
       <dependency>
         <groupId>org.apache.ozone</groupId>
         <artifactId>hdds-hadoop-dependency-test</artifactId>


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org