You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by si...@apache.org on 2023/04/28 18:09:54 UTC
[ozone] branch master updated: HDDS-8322. [Snapshot] Created various config for Snapdiff and moved few configs from OzoneConfigKeys to OMConfigKeys (#4511)
This is an automated email from the ASF dual-hosted git repository.
siyao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 3858cd102b HDDS-8322. [Snapshot] Created various config for Snapdiff and moved few configs from OzoneConfigKeys to OMConfigKeys (#4511)
3858cd102b is described below
commit 3858cd102bbc3c328c24261d929c6549f5358a40
Author: Hemant Kumar <he...@gmail.com>
AuthorDate: Fri Apr 28 11:09:47 2023 -0700
HDDS-8322. [Snapshot] Created various config for Snapdiff and moved few configs from OzoneConfigKeys to OMConfigKeys (#4511)
---
.../org/apache/hadoop/ozone/OzoneConfigKeys.java | 14 ----
.../common/src/main/resources/ozone-default.xml | 87 +++++++++++++++++++---
.../hadoop/hdds/utils/db/DBStoreBuilder.java | 22 +-----
.../org/apache/hadoop/hdds/utils/db/RDBStore.java | 17 ++---
hadoop-hdds/rocksdb-checkpoint-differ/pom.xml | 13 +++-
.../ozone/rocksdiff/RocksDBCheckpointDiffer.java | 30 +++++---
.../rocksdiff/TestRocksDBCheckpointDiffer.java | 37 ++++++---
.../org/apache/hadoop/ozone/om/OMConfigKeys.java | 48 ++++++++++++
.../hadoop/fs/ozone/TestRootedOzoneFileSystem.java | 4 +-
.../org/apache/hadoop/ozone/om/TestOmSnapshot.java | 7 +-
.../apache/hadoop/ozone/om/OmSnapshotManager.java | 47 +++++++-----
.../om/service/SnapshotDiffCleanupService.java | 28 ++++---
.../ozone/om/snapshot/SnapshotDiffManager.java | 47 +++++++-----
.../hadoop/ozone/om/TestOmSnapshotManager.java | 3 +-
.../om/service/TestSnapshotDiffCleanupService.java | 38 +++++++---
15 files changed, 302 insertions(+), 140 deletions(-)
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 74a829f041..1fd797ef3d 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -605,11 +605,6 @@ public final class OzoneConfigKeys {
public static final String FS_TRASH_CLASSNAME_DEFAULT =
"org.apache.hadoop.ozone.om.TrashPolicyOzone";
-
- public static final String OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE =
- "ozone.om.snapshot.cache.max.size";
- public static final int OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE_DEFAULT = 10;
-
public static final String
OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED =
"ozone.om.snapshot.compaction.dag.max.time.allowed";
@@ -626,11 +621,6 @@ public final class OzoneConfigKeys {
OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT =
TimeUnit.HOURS.toMillis(1);
- public static final String OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF =
- "ozone.om.snapshot.force.full.diff";
-
- public static final boolean OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT = false;
-
public static final String OZONE_OM_DELTA_UPDATE_DATA_SIZE_MAX_LIMIT =
"ozone.om.delta.update.data.size.max.limit";
public static final String
@@ -642,10 +632,6 @@ public final class OzoneConfigKeys {
public static final String OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION =
"ozone.scm.close.container.wait.duration";
- public static final String OZONE_OM_SNAPDIFF_MAX_PAGE_SIZE =
- "ozone.om.snapdiff.max.page.size";
- public static final int OZONE_OM_SNAPDIFF_MAX_PAGE_SIZE_DEFAULT = 1000;
-
/**
* There is no need to instantiate this class.
*/
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 906ac12126..97c06a900c 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -3682,13 +3682,88 @@
</description>
</property>
+ <property>
+ <name>ozone.om.snapshot.cache.max.size</name>
+ <value>10</value>
+ <tag>OZONE, OM</tag>
+ <description>
+ Maximum number of entries allowed in the snapshot cache.
+ </description>
+ </property>
+
<property>
<name>ozone.om.snapshot.force.full.diff</name>
<value>false</value>
<tag>OZONE, OM</tag>
<description>
- If true, snapshot diff will always perform full diff (can be slow)
- without using the optimised DAG based pruning approach
+ Flag to always perform full snapshot diff (can be slow) without using the optimised compaction DAG.
+ </description>
+ </property>
+
+ <property>
+ <name>ozone.om.snapshot.diff.max.page.size</name>
+ <value>1000</value>
+ <tag>OZONE, OM</tag>
+ <description>
+ Maximum number of entries to be returned in a single page of snap diff report.
+ </description>
+ </property>
+
+ <property>
+ <name>ozone.om.snapshot.diff.thread.pool.size</name>
+ <value>10</value>
+ <tag>OZONE, OM</tag>
+ <description>
+ Maximum numbers of concurrent snapshot diff jobs are allowed.
+ </description>
+ </property>
+
+ <property>
+ <name>ozone.om.snapshot.diff.job.default.wait.time</name>
+ <value>1m</value>
+ <tag>OZONE, OM</tag>
+ <description>
+ Default wait time returned to client to wait before retrying snap diff request.
+ Uses millisecond by default when no time unit is specified.
+ </description>
+ </property>
+
+ <property>
+ <name>ozone.om.snapshot.diff.max.jobs.purge.per.task</name>
+ <value>100</value>
+ <tag>OZONE, OM</tag>
+ <description>
+ Maximum number of snapshot diff jobs to be purged per snapDiff clean up run.
+ </description>
+ </property>
+
+ <property>
+ <name>ozone.om.snapshot.diff.job.report.persistent.time</name>
+ <value>7d</value>
+ <tag>OZONE, OM</tag>
+ <description>
+ Maximum time a successful snapshot diff job and its report will be persisted.
+ Uses millisecond by default when no time unit is specified.
+ </description>
+ </property>
+
+ <property>
+ <name>ozone.om.snapshot.diff.cleanup.service.run.internal</name>
+ <value>60m</value>
+ <tag>OZONE, OM</tag>
+ <description>
+ Interval at which snapshot diff clean up service will run.
+ Uses millisecond by default when no time unit is specified.
+ </description>
+ </property>
+
+ <property>
+ <name>ozone.om.snapshot.diff.cleanup.service.timeout</name>
+ <value>5m</value>
+ <tag>OZONE, OM</tag>
+ <description>
+ Timeout for snapshot diff clean up service.
+ Uses millisecond by default when no time unit is specified.
</description>
</property>
@@ -3709,12 +3784,4 @@
Buffer size for SST Dumptool Pipe which would be used for computing snapdiff when native library is enabled.
</description>
</property>
- <property>
- <name>ozone.om.snapdiff.max.page.size</name>
- <value>1000</value>
- <tag>OZONE, OM</tag>
- <description>
- Maximum number of entries that a single snapDiff RPC would return.
- </description>
- </property>
</configuration>
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
index 5427f20936..c7a436230c 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
@@ -32,7 +32,6 @@ import java.util.Map;
import java.util.Optional;
import java.util.Set;
-import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -47,10 +46,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKS
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_DELTA_UPDATE_DATA_SIZE_MAX_LIMIT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_DELTA_UPDATE_DATA_SIZE_MAX_LIMIT_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT;
import static org.rocksdb.RocksDB.DEFAULT_COLUMN_FAMILY;
import org.apache.hadoop.hdds.conf.StorageUnit;
@@ -105,8 +100,6 @@ public final class DBStoreBuilder {
private int maxFSSnapshots = 0;
private final DBProfile defaultCfProfile;
private boolean enableCompactionLog;
- private long maxTimeAllowedForSnapshotInDag;
- private long pruneCompactionDagDaemonRunInterval;
private boolean createCheckpointDirs = true;
// this is to track the total size of dbUpdates data since sequence
// number in request to avoid increase in heap memory.
@@ -159,16 +152,6 @@ public final class DBStoreBuilder {
HDDS_DEFAULT_DB_PROFILE);
LOG.debug("Default DB profile:{}", defaultCfProfile);
- maxTimeAllowedForSnapshotInDag = configuration.getTimeDuration(
- OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED,
- OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT,
- TimeUnit.MILLISECONDS);
-
- pruneCompactionDagDaemonRunInterval = configuration.getTimeDuration(
- OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL,
- OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT,
- TimeUnit.MILLISECONDS);
-
this.maxDbUpdatesSizeThreshold = (long) configuration.getStorageSize(
OZONE_OM_DELTA_UPDATE_DATA_SIZE_MAX_LIMIT,
OZONE_OM_DELTA_UPDATE_DATA_SIZE_MAX_LIMIT_DEFAULT, StorageUnit.BYTES);
@@ -228,9 +211,8 @@ public final class DBStoreBuilder {
return new RDBStore(dbFile, rocksDBOption, writeOptions, tableConfigs,
registry, openReadOnly, maxFSSnapshots, dbJmxBeanNameName,
- enableCompactionLog, maxTimeAllowedForSnapshotInDag,
- pruneCompactionDagDaemonRunInterval, maxDbUpdatesSizeThreshold,
- createCheckpointDirs);
+ enableCompactionLog, maxDbUpdatesSizeThreshold, createCheckpointDirs,
+ configuration);
} finally {
tableConfigs.forEach(TableConfig::close);
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index a06264744e..7b0d210856 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -30,8 +30,7 @@ import java.util.HashMap;
import java.util.Map;
import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.RocksDBStoreMetrics;
import org.apache.hadoop.hdds.utils.db.cache.TableCache;
@@ -82,12 +81,12 @@ public class RDBStore implements DBStore {
@VisibleForTesting
public RDBStore(File dbFile, ManagedDBOptions options,
- Set<TableConfig> families, long maxDbUpdatesSizeThreshold)
+ Set<TableConfig> families,
+ long maxDbUpdatesSizeThreshold)
throws IOException {
this(dbFile, options, new ManagedWriteOptions(), families,
new CodecRegistry(), false, 1000, null, false,
- TimeUnit.DAYS.toMillis(1), TimeUnit.HOURS.toMillis(1),
- maxDbUpdatesSizeThreshold, true);
+ maxDbUpdatesSizeThreshold, true, null);
}
@SuppressWarnings("parameternumber")
@@ -95,10 +94,9 @@ public class RDBStore implements DBStore {
ManagedWriteOptions writeOptions, Set<TableConfig> families,
CodecRegistry registry, boolean readOnly, int maxFSSnapshots,
String dbJmxBeanNameName, boolean enableCompactionLog,
- long maxTimeAllowedForSnapshotInDag,
- long compactionDagDaemonInterval,
long maxDbUpdatesSizeThreshold,
- boolean createCheckpointDirs)
+ boolean createCheckpointDirs,
+ ConfigurationSource configuration)
throws IOException {
Preconditions.checkNotNull(dbFile, "DB file location cannot be null");
@@ -115,8 +113,7 @@ public class RDBStore implements DBStore {
rocksDBCheckpointDiffer = RocksDBCheckpointDifferHolder.getInstance(
dbLocation.getParent() + OM_KEY_PREFIX + OM_SNAPSHOT_DIFF_DIR,
DB_COMPACTION_SST_BACKUP_DIR, DB_COMPACTION_LOG_DIR,
- dbLocation.toString(),
- maxTimeAllowedForSnapshotInDag, compactionDagDaemonInterval);
+ dbLocation.toString(), configuration);
rocksDBCheckpointDiffer.setRocksDBForCompactionTracking(dbOptions);
} else {
rocksDBCheckpointDiffer = null;
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
index 3c3764d386..c6314d8ce6 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
@@ -73,10 +73,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>junit-jupiter-params</artifactId>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>org.apache.ozone</groupId>
- <artifactId>hdds-rocks-native</artifactId>
- </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-core</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.ozone</groupId>
+ <artifactId>hdds-rocks-native</artifactId>
+ </dependency>
</dependencies>
<build>
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
index 0d76d87ee8..2bea23e9a7 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
@@ -29,6 +29,7 @@ import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
@@ -69,6 +70,10 @@ import java.util.stream.Stream;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Arrays.asList;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT;
// TODO
// 8. Handle bootstrapping rocksDB for a new OM follower node
@@ -180,18 +185,14 @@ public class RocksDBCheckpointDiffer implements AutoCloseable {
* @param sstBackupDirName Name of the SST backup dir under metadata dir.
* @param compactionLogDirName Name of the compaction log dir.
* @param activeDBLocationName Active RocksDB directory's location.
- * @param maxTimeAllowedForSnapshotInDagInMs Time after which snapshot will be
- * pruned from the DAG by daemon.
- * @param pruneCompactionDagDaemonRunIntervalInMs Internal at which DAG
- * pruning daemon will run.
+ * @param configuration ConfigurationSource.
*/
@VisibleForTesting
RocksDBCheckpointDiffer(String metadataDirName,
String sstBackupDirName,
String compactionLogDirName,
String activeDBLocationName,
- long maxTimeAllowedForSnapshotInDagInMs,
- long pruneCompactionDagDaemonRunIntervalInMs) {
+ ConfigurationSource configuration) {
Preconditions.checkNotNull(metadataDirName);
Preconditions.checkNotNull(sstBackupDirName);
Preconditions.checkNotNull(compactionLogDirName);
@@ -205,7 +206,16 @@ public class RocksDBCheckpointDiffer implements AutoCloseable {
// Active DB location is used in getSSTFileSummary
this.activeDBLocationStr = activeDBLocationName + "/";
- this.maxAllowedTimeInDag = maxTimeAllowedForSnapshotInDagInMs;
+ this.maxAllowedTimeInDag = configuration.getTimeDuration(
+ OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED,
+ OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT,
+ TimeUnit.MILLISECONDS);
+
+ long pruneCompactionDagDaemonRunIntervalInMs =
+ configuration.getTimeDuration(
+ OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL,
+ OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT,
+ TimeUnit.MILLISECONDS);
if (pruneCompactionDagDaemonRunIntervalInMs > 0) {
this.executor = Executors.newSingleThreadScheduledExecutor();
@@ -1488,16 +1498,14 @@ public class RocksDBCheckpointDiffer implements AutoCloseable {
String sstBackupDirName,
String compactionLogDirName,
String activeDBLocationName,
- long maxTimeAllowedForSnapshotInDagInMs,
- long pruneCompactionDagDaemonRunIntervalInMs
+ ConfigurationSource configuration
) {
return INSTANCE_MAP.computeIfAbsent(metadataDirName, (key) ->
new RocksDBCheckpointDiffer(metadataDirName,
sstBackupDirName,
compactionLogDirName,
activeDBLocationName,
- maxTimeAllowedForSnapshotInDagInMs,
- pruneCompactionDagDaemonRunIntervalInMs));
+ configuration));
}
}
}
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
index c1ff195c77..2bad808481 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
@@ -42,6 +42,7 @@ import java.util.stream.Stream;
import com.google.common.graph.MutableGraph;
import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.NodeComparator;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
@@ -51,6 +52,7 @@ import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
+import org.mockito.Mockito;
import org.rocksdb.Checkpoint;
import org.rocksdb.ColumnFamilyDescriptor;
import org.rocksdb.ColumnFamilyHandle;
@@ -66,6 +68,10 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT;
import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.COMPACTION_LOG_FILE_NAME_SUFFIX;
import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.DEBUG_DAG_LIVE_NODES;
import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.DEBUG_READ_ALL_DB_KEYS;
@@ -100,6 +106,7 @@ public class TestRocksDBCheckpointDiffer {
private File metadataDirDir;
private File compactionLogDir;
private File sstBackUpDir;
+ private ConfigurationSource config;
@BeforeEach
public void init() {
@@ -119,6 +126,18 @@ public class TestRocksDBCheckpointDiffer {
sstBackUpDir = new File(metadataDirName, sstBackUpDirName);
createDir(sstBackUpDir, metadataDirName + "/" + sstBackUpDirName);
+
+ config = Mockito.mock(ConfigurationSource.class);
+
+ Mockito.when(config.getTimeDuration(
+ OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED,
+ OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT,
+ TimeUnit.MILLISECONDS)).thenReturn(MINUTES.toMillis(10));
+
+ Mockito.when(config.getTimeDuration(
+ OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL,
+ OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT,
+ TimeUnit.MILLISECONDS)).thenReturn(0L);
}
private void createDir(File file, String filePath) {
@@ -246,8 +265,7 @@ public class TestRocksDBCheckpointDiffer {
sstBackUpDirName,
compactionLogDirName,
activeDbDirName,
- 0L,
- 0L);
+ config);
boolean exceptionThrown = false;
long createdTime = System.currentTimeMillis();
@@ -320,8 +338,7 @@ public class TestRocksDBCheckpointDiffer {
sstBackUpDirName,
compactionLogDirName,
activeDbDirName,
- TimeUnit.DAYS.toMillis(1),
- MINUTES.toMillis(5));
+ config);
RocksDB rocksDB =
createRocksDBInstanceAndWriteKeys(activeDbDirName, differ);
@@ -821,8 +838,7 @@ public class TestRocksDBCheckpointDiffer {
sstBackUpDirName,
compactionLogDirName,
activeDbDirName,
- 0L,
- 0L);
+ config);
Set<String> actualFileNodesRemoved =
differ.pruneBackwardDag(originalDag, levelToBeRemoved);
Assertions.assertEquals(expectedDag, originalDag);
@@ -884,8 +900,7 @@ public class TestRocksDBCheckpointDiffer {
sstBackUpDirName,
compactionLogDirName,
activeDbDirName,
- 0L,
- 0L);
+ config);
Set<String> actualFileNodesRemoved =
differ.pruneForwardDag(originalDag, levelToBeRemoved);
Assertions.assertEquals(expectedDag, originalDag);
@@ -1066,8 +1081,7 @@ public class TestRocksDBCheckpointDiffer {
sstBackUpDirName,
compactionLogDirName,
activeDbDirName,
- MINUTES.toMillis(10),
- 0L);
+ config);
differ.loadAllCompactionLogs();
@@ -1162,8 +1176,7 @@ public class TestRocksDBCheckpointDiffer {
sstBackUpDirName,
compactionLogDirName,
activeDbDirName,
- MINUTES.toMillis(10),
- 0L);
+ config);
differ.loadAllCompactionLogs();
differ.pruneSstFiles();
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index caf932cedd..2adf5ec20e 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -458,6 +458,54 @@ public final class OMConfigKeys {
= "ozone.om.enable.ofs.shared.tmp.dir";
public static final boolean OZONE_OM_ENABLE_OFS_SHARED_TMP_DIR_DEFAULT
= false;
+
+ public static final String OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE =
+ "ozone.om.snapshot.cache.max.size";
+ public static final int OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE_DEFAULT = 10;
+
+ public static final String OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF =
+ "ozone.om.snapshot.force.full.diff";
+
+ public static final boolean OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT = false;
+
public static final String OZONE_OM_SNAPSHOT_DIFF_DB_DIR
= "ozone.om.snapshot.diff.db.dir";
+
+ public static final String OZONE_OM_SNAPSHOT_DIFF_REPORT_MAX_PAGE_SIZE
+ = "ozone.om.snapshot.diff.max.page.size";
+ public static final int OZONE_OM_SNAPSHOT_DIFF_REPORT_MAX_PAGE_SIZE_DEFAULT
+ = 1000;
+
+ public static final String OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE
+ = "ozone.om.snapshot.diff.thread.pool.size";
+ public static final int OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE_DEFAULT
+ = 10;
+
+ public static final String OZONE_OM_SNAPSHOT_DIFF_JOB_DEFAULT_WAIT_TIME
+ = "ozone.om.snapshot.diff.job.default.wait.time";
+ public static final long OZONE_OM_SNAPSHOT_DIFF_JOB_DEFAULT_WAIT_TIME_DEFAULT
+ = TimeUnit.MINUTES.toMillis(1);
+
+ public static final String OZONE_OM_SNAPSHOT_DIFF_MAX_JOBS_PURGE_PER_TASK
+ = "ozone.om.snapshot.diff.max.jobs.purge.per.task";
+ public static final int OZONE_OM_SNAPSHOT_DIFF_MAX_JOBS_PURGE_PER_TASK_DEFAULT
+ = 100;
+
+ public static final String OZONE_OM_SNAPSHOT_DIFF_JOB_REPORT_PERSISTENT_TIME
+ = "ozone.om.snapshot.diff.job.report.persistent.time";
+ public static final long
+ OZONE_OM_SNAPSHOT_DIFF_JOB_REPORT_PERSISTENT_TIME_DEFAULT
+ = TimeUnit.DAYS.toMillis(7);
+
+ public static final String OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_RUN_INTERVAL
+ = "ozone.om.snapshot.diff.cleanup.service.run.internal";
+ public static final long
+ OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_RUN_INTERVAL_DEFAULT
+ = TimeUnit.HOURS.toMillis(1);
+
+ public static final String OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_TIMEOUT
+ = "ozone.om.snapshot.diff.cleanup.service.timeout";
+ public static final long
+ OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_TIMEOUT_DEFAULT
+ = TimeUnit.MINUTES.toMillis(5);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
index 32f308790e..59972e85c4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
@@ -117,10 +117,10 @@ import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE;
import static org.apache.hadoop.hdds.client.ECReplicationConfig.EcCodec.RS;
import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPDIFF_MAX_PAGE_SIZE;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_OFS_SHARED_TMP_DIR;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_REPORT_MAX_PAGE_SIZE;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PERMISSION_DENIED;
@@ -279,7 +279,7 @@ public class TestRootedOzoneFileSystem {
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
// Set the number of keys to be processed during batch operate.
conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
- conf.setInt(OZONE_OM_SNAPDIFF_MAX_PAGE_SIZE, 4);
+ conf.setInt(OZONE_OM_SNAPSHOT_DIFF_REPORT_MAX_PAGE_SIZE, 4);
// fs.ofs.impl would be loaded from META-INF, no need to manually set it
fs = FileSystem.get(conf);
trash = new Trash(conf);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java
index c70fa0562f..22925653e6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdds.utils.db.RDBStore;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -161,9 +160,13 @@ public class TestOmSnapshot {
enabledFileSystemPaths);
conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
bucketLayout.name());
- conf.setBoolean(OzoneConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF,
+ conf.setBoolean(OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF,
forceFullSnapshotDiff);
conf.setEnum(HDDS_DB_PROFILE, DBProfile.TEST);
+ conf.setTimeDuration(
+ OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_JOB_DEFAULT_WAIT_TIME,
+ TimeUnit.SECONDS.toMillis(1),
+ TimeUnit.MILLISECONDS);
cluster = MiniOzoneCluster.newOMHABuilder(conf)
.setClusterId(clusterId)
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
index a97ed425b5..f75623a8ad 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
@@ -27,7 +27,6 @@ import com.google.common.cache.RemovalListener;
import java.io.File;
import java.io.IOException;
import java.nio.file.Paths;
-import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
@@ -35,6 +34,7 @@ import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.apache.hadoop.hdds.StringUtils;
@@ -52,7 +52,6 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.om.codec.OmDBDiffReportEntryCodec;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
@@ -77,13 +76,19 @@ import javax.annotation.Nonnull;
import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
import static org.apache.commons.lang3.StringUtils.isBlank;
import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.DEFAULT_COLUMN_FAMILY_NAME;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPDIFF_MAX_PAGE_SIZE;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPDIFF_MAX_PAGE_SIZE_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_CHECKPOINT_DIR;
import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_DIFF_DB_NAME;
import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_RUN_INTERVAL;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_RUN_INTERVAL_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_TIMEOUT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_TIMEOUT_DEFAULT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DB_DIR;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_REPORT_MAX_PAGE_SIZE;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_REPORT_MAX_PAGE_SIZE_DEFAULT;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME;
import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.dropColumnFamilyHandle;
@@ -102,7 +107,7 @@ public final class OmSnapshotManager implements AutoCloseable {
private final OzoneManager ozoneManager;
private final SnapshotDiffManager snapshotDiffManager;
private final LoadingCache<String, OmSnapshot> snapshotCache;
- private ManagedRocksDB snapshotDiffDb;
+ private final ManagedRocksDB snapshotDiffDb;
public static final String DELIMITER = "-";
@@ -147,16 +152,21 @@ public final class OmSnapshotManager implements AutoCloseable {
private final ManagedDBOptions options;
private final List<ColumnFamilyDescriptor> columnFamilyDescriptors;
private final List<ColumnFamilyHandle> columnFamilyHandles;
- private final CodecRegistry codecRegistry;
private final SnapshotDiffCleanupService snapshotDiffCleanupService;
+ private final int maxPageSize;
+
public OmSnapshotManager(OzoneManager ozoneManager) {
this.options = new ManagedDBOptions();
this.options.setCreateIfMissing(true);
this.columnFamilyOptions = new ManagedColumnFamilyOptions();
this.columnFamilyDescriptors = new ArrayList<>();
this.columnFamilyHandles = new ArrayList<>();
- this.codecRegistry = createCodecRegistryForSnapDiff();
+ CodecRegistry codecRegistry = createCodecRegistryForSnapDiff();
+ this.maxPageSize = ozoneManager.getConfiguration().getInt(
+ OZONE_OM_SNAPSHOT_DIFF_REPORT_MAX_PAGE_SIZE,
+ OZONE_OM_SNAPSHOT_DIFF_REPORT_MAX_PAGE_SIZE_DEFAULT
+ );
ColumnFamilyHandle snapDiffJobCf;
ColumnFamilyHandle snapDiffReportCf;
@@ -196,8 +206,8 @@ public final class OmSnapshotManager implements AutoCloseable {
// size of lru cache
int cacheSize = ozoneManager.getConfiguration().getInt(
- OzoneConfigKeys.OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE,
- OzoneConfigKeys.OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE_DEFAULT);
+ OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE,
+ OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE_DEFAULT);
CacheLoader<String, OmSnapshot> loader = createCacheLoader();
@@ -224,13 +234,19 @@ public final class OmSnapshotManager implements AutoCloseable {
ozoneManager, snapshotCache, snapDiffJobCf, snapDiffReportCf,
columnFamilyOptions, codecRegistry);
- // TODO: [SNAPSHOT] Move this to config.
- long runInterval = Duration.ofMinutes(15).toMillis();
- long timeout = Duration.ofSeconds(3).toMillis();
+ long diffCleanupServiceInterval = ozoneManager.getConfiguration()
+ .getTimeDuration(OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_RUN_INTERVAL,
+ OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_RUN_INTERVAL_DEFAULT,
+ TimeUnit.MILLISECONDS);
+
+ long diffCleanupServiceTimeout = ozoneManager.getConfiguration()
+ .getTimeDuration(OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_TIMEOUT,
+ OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_TIMEOUT_DEFAULT,
+ TimeUnit.MILLISECONDS);
this.snapshotDiffCleanupService = new SnapshotDiffCleanupService(
- runInterval,
- timeout,
+ diffCleanupServiceInterval,
+ diffCleanupServiceTimeout,
ozoneManager,
snapshotDiffDb,
snapDiffJobCf,
@@ -548,9 +564,6 @@ public final class OmSnapshotManager implements AutoCloseable {
verifySnapshotInfoForSnapDiff(fsInfo, tsInfo);
int index = getIndexFromToken(token);
- int maxPageSize = ozoneManager.getConfiguration()
- .getInt(OZONE_OM_SNAPDIFF_MAX_PAGE_SIZE,
- OZONE_OM_SNAPDIFF_MAX_PAGE_SIZE_DEFAULT);
if (pageSize <= 0 || pageSize > maxPageSize) {
pageSize = maxPageSize;
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDiffCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDiffCleanupService.java
index f9ab7a7257..dd8a3ad4ef 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDiffCleanupService.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDiffCleanupService.java
@@ -19,11 +19,6 @@
package org.apache.hadoop.ozone.om.service;
import com.google.common.annotations.VisibleForTesting;
-import java.io.IOException;
-import java.time.Duration;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hdds.utils.BackgroundService;
import org.apache.hadoop.hdds.utils.BackgroundTask;
import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
@@ -38,6 +33,15 @@ import org.apache.hadoop.ozone.om.snapshot.SnapshotDiffJob;
import org.rocksdb.ColumnFamilyHandle;
import org.rocksdb.RocksDBException;
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_JOB_REPORT_PERSISTENT_TIME;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_JOB_REPORT_PERSISTENT_TIME_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_MAX_JOBS_PURGE_PER_TASK;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_MAX_JOBS_PURGE_PER_TASK_DEFAULT;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER;
import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.FAILED;
import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.REJECTED;
@@ -62,17 +66,15 @@ public class SnapshotDiffCleanupService extends BackgroundService {
private final ColumnFamilyHandle snapDiffReportCfh;
private final CodecRegistry codecRegistry;
- // TODO: [SNAPSHOT] Move this to config.
/**
* Maximum numbers of snapDiff jobs to be purged per clean-up task run.
*/
- private final long maxJobToPurgePerTask = 1000L;
+ private final long maxJobToPurgePerTask;
- // TODO: [SNAPSHOT] Move this to config.
/**
* Maximum time a snapDiff job and corresponding report will be persisted.
*/
- private final long maxAllowedTime = Duration.ofDays(7).toMillis();
+ private final long maxAllowedTime;
@SuppressWarnings("parameternumber")
public SnapshotDiffCleanupService(long interval,
@@ -96,6 +98,14 @@ public class SnapshotDiffCleanupService extends BackgroundService {
this.snapDiffPurgedJobCfh = snapDiffPurgedJobCfh;
this.snapDiffReportCfh = snapDiffReportCfh;
this.codecRegistry = codecRegistry;
+ this.maxJobToPurgePerTask = ozoneManager.getConfiguration().getLong(
+ OZONE_OM_SNAPSHOT_DIFF_MAX_JOBS_PURGE_PER_TASK,
+ OZONE_OM_SNAPSHOT_DIFF_MAX_JOBS_PURGE_PER_TASK_DEFAULT
+ );
+ this.maxAllowedTime = ozoneManager.getConfiguration().getTimeDuration(
+ OZONE_OM_SNAPSHOT_DIFF_JOB_REPORT_PERSISTENT_TIME,
+ OZONE_OM_SNAPSHOT_DIFF_JOB_REPORT_PERSISTENT_TIME_DEFAULT,
+ TimeUnit.MILLISECONDS);
}
@VisibleForTesting
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
index 633d52ea8f..ebd23dff9a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
@@ -27,7 +27,6 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
-import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -59,7 +58,6 @@ import org.apache.hadoop.hdds.utils.db.CodecRegistry;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.OFSPath;
import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
import org.apache.hadoop.ozone.om.OMConfigKeys;
@@ -91,6 +89,12 @@ import org.slf4j.LoggerFactory;
import java.util.concurrent.SynchronousQueue;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_JOB_DEFAULT_WAIT_TIME;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_JOB_DEFAULT_WAIT_TIME_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER;
import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.dropColumnFamilyHandle;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
@@ -124,11 +128,8 @@ public class SnapshotDiffManager implements AutoCloseable {
private final LoadingCache<String, OmSnapshot> snapshotCache;
private final CodecRegistry codecRegistry;
private final ManagedColumnFamilyOptions familyOptions;
-
// TODO: [SNAPSHOT] Use different wait time based of job status.
- private static final Duration DEFAULT_WAIT_TIME = Duration.ofSeconds(1);
- // TODO: [SNAPSHOT] Move this to config file.
- private static final int DEFAULT_THREAD_POOL_SIZE = 10;
+ private final long defaultWaitTime;
/**
* Global table to keep the diff report. Each key is prefixed by the jobID
@@ -173,6 +174,16 @@ public class SnapshotDiffManager implements AutoCloseable {
this.snapshotCache = snapshotCache;
this.familyOptions = familyOptions;
this.codecRegistry = codecRegistry;
+ this.defaultWaitTime = ozoneManager.getConfiguration().getTimeDuration(
+ OZONE_OM_SNAPSHOT_DIFF_JOB_DEFAULT_WAIT_TIME,
+ OZONE_OM_SNAPSHOT_DIFF_JOB_DEFAULT_WAIT_TIME_DEFAULT,
+ TimeUnit.MILLISECONDS
+ );
+
+ int threadPoolSize = ozoneManager.getConfiguration().getInt(
+ OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE,
+ OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE_DEFAULT
+ );
this.snapDiffJobTable = new RocksDbPersistentMap<>(db,
snapDiffJobCfh,
@@ -186,11 +197,11 @@ public class SnapshotDiffManager implements AutoCloseable {
byte[].class,
byte[].class);
- this.executorService = new ThreadPoolExecutor(DEFAULT_THREAD_POOL_SIZE,
- DEFAULT_THREAD_POOL_SIZE,
+ this.executorService = new ThreadPoolExecutor(threadPoolSize,
+ threadPoolSize,
0,
TimeUnit.SECONDS,
- new ArrayBlockingQueue<>(DEFAULT_THREAD_POOL_SIZE)
+ new ArrayBlockingQueue<>(threadPoolSize)
);
Path path = Paths.get(differ.getMetadataDir(), "snapDiff");
@@ -365,12 +376,12 @@ public class SnapshotDiffManager implements AutoCloseable {
return new SnapshotDiffResponse(
new SnapshotDiffReportOzone(snapshotRoot.toString(), volume, bucket,
fromSnapshot.getName(), toSnapshot.getName(), new ArrayList<>(),
- null), IN_PROGRESS, DEFAULT_WAIT_TIME.toMillis());
+ null), IN_PROGRESS, defaultWaitTime);
case FAILED:
return new SnapshotDiffResponse(
new SnapshotDiffReportOzone(snapshotRoot.toString(), volume, bucket,
fromSnapshot.getName(), toSnapshot.getName(), new ArrayList<>(),
- null), FAILED, DEFAULT_WAIT_TIME.toMillis());
+ null), FAILED, defaultWaitTime);
case DONE:
SnapshotDiffReportOzone report =
createPageResponse(snapDiffJob.getJobId(), volume, bucket,
@@ -450,7 +461,7 @@ public class SnapshotDiffManager implements AutoCloseable {
return new SnapshotDiffResponse(
new SnapshotDiffReportOzone(snapshotRoot.toString(),
volume, bucket, fromSnapshot.getName(), toSnapshot.getName(),
- new ArrayList<>(), null), REJECTED, DEFAULT_WAIT_TIME.toMillis());
+ new ArrayList<>(), null), REJECTED, defaultWaitTime);
}
// Check again that request is still in queued status. If it is not queued,
@@ -467,7 +478,7 @@ public class SnapshotDiffManager implements AutoCloseable {
return new SnapshotDiffResponse(
new SnapshotDiffReportOzone(snapshotRoot.toString(), volume, bucket,
fromSnapshot.getName(), toSnapshot.getName(), new ArrayList<>(),
- null), snapDiffJob.getStatus(), DEFAULT_WAIT_TIME.toMillis());
+ null), snapDiffJob.getStatus(), defaultWaitTime);
}
}
@@ -505,7 +516,7 @@ public class SnapshotDiffManager implements AutoCloseable {
return new SnapshotDiffResponse(
new SnapshotDiffReportOzone(snapshotRoot.toString(), volume, bucket,
fromSnapshot.getName(), toSnapshot.getName(), new ArrayList<>(),
- null), IN_PROGRESS, DEFAULT_WAIT_TIME.toMillis());
+ null), IN_PROGRESS, defaultWaitTime);
} catch (RejectedExecutionException exception) {
// Remove the entry from job table so that client can retry.
// If entry is not removed, client has to wait till cleanup service
@@ -513,11 +524,11 @@ public class SnapshotDiffManager implements AutoCloseable {
// before the cleanup kicks in.
snapDiffJobTable.remove(jobKey);
LOG.info("Exceeded the snapDiff parallel requests progressing " +
- "limit. Please retry after {}.", DEFAULT_WAIT_TIME);
+ "limit. Please retry after {}.", defaultWaitTime);
return new SnapshotDiffResponse(
new SnapshotDiffReportOzone(snapshotRoot.toString(), volume, bucket,
fromSnapshot.getName(), toSnapshot.getName(), new ArrayList<>(),
- null), REJECTED, DEFAULT_WAIT_TIME.toMillis());
+ null), REJECTED, defaultWaitTime);
}
}
@@ -605,8 +616,8 @@ public class SnapshotDiffManager implements AutoCloseable {
toSnapshot.getMetadataManager().getKeyTable(bucketLayout);
boolean useFullDiff = ozoneManager.getConfiguration().getBoolean(
- OzoneConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF,
- OzoneConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT);
+ OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF,
+ OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT);
if (forceFullDiff) {
useFullDiff = true;
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
index 583e381d25..c3162784b0 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils;
import org.apache.ozone.test.GenericTestUtils;
@@ -74,7 +73,7 @@ public class TestOmSnapshotManager {
// Only allow one entry in cache so each new one causes an eviction
configuration.setInt(
- OzoneConfigKeys.OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE, 1);
+ OMConfigKeys.OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE, 1);
OmTestManagers omTestManagers = new OmTestManagers(configuration);
om = omTestManagers.getOzoneManager();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDiffCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDiffCleanupService.java
index 6af8b0733e..eb7b77f328 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDiffCleanupService.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDiffCleanupService.java
@@ -18,15 +18,9 @@
package org.apache.hadoop.ozone.om.service;
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Paths;
-import java.time.Duration;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.StringUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.db.CodecRegistry;
import org.apache.hadoop.hdds.utils.db.IntegerCodec;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
@@ -50,8 +44,21 @@ import org.rocksdb.ColumnFamilyDescriptor;
import org.rocksdb.ColumnFamilyHandle;
import org.rocksdb.RocksDBException;
-import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER;
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.DEFAULT_COLUMN_FAMILY_NAME;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_JOB_REPORT_PERSISTENT_TIME;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_JOB_REPORT_PERSISTENT_TIME_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_MAX_JOBS_PURGE_PER_TASK;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_MAX_JOBS_PURGE_PER_TASK_DEFAULT;
+import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER;
import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE;
import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.FAILED;
import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.IN_PROGRESS;
@@ -86,6 +93,8 @@ public class TestSnapshotDiffCleanupService {
private SnapshotDiffCleanupService diffCleanupService;
@Mock
private OzoneManager ozoneManager;
+ @Mock
+ private OzoneConfiguration config;
@BeforeAll
public static void staticInit() throws RocksDBException {
@@ -131,7 +140,18 @@ public class TestSnapshotDiffCleanupService {
@BeforeEach
public void init() throws RocksDBException, IOException {
MockitoAnnotations.initMocks(this);
- when(ozoneManager.isLeaderReady()).thenReturn(true);
+ when(config.getLong(
+ OZONE_OM_SNAPSHOT_DIFF_MAX_JOBS_PURGE_PER_TASK,
+ OZONE_OM_SNAPSHOT_DIFF_MAX_JOBS_PURGE_PER_TASK_DEFAULT)
+ ).thenReturn(1000L);
+
+ when(config.getTimeDuration(
+ OZONE_OM_SNAPSHOT_DIFF_JOB_REPORT_PERSISTENT_TIME,
+ OZONE_OM_SNAPSHOT_DIFF_JOB_REPORT_PERSISTENT_TIME_DEFAULT,
+ TimeUnit.MILLISECONDS)
+ ).thenReturn(TimeUnit.DAYS.toMillis(7));
+
+ when(ozoneManager.getConfiguration()).thenReturn(config);
jobTableCfd = new ColumnFamilyDescriptor(jobTableNameBytes,
columnFamilyOptions);
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org