You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ad...@apache.org on 2022/07/02 20:32:46 UTC
[ozone] branch master updated: HDDS-6723. Close Rocks objects properly in OzoneManager (#3400)
This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 179755eab0 HDDS-6723. Close Rocks objects properly in OzoneManager (#3400)
179755eab0 is described below
commit 179755eab0913ffa8fe32f3ccb3316f8bfc19486
Author: Wei-Chiu Chuang <we...@apache.org>
AuthorDate: Sat Jul 2 13:32:41 2022 -0700
HDDS-6723. Close Rocks objects properly in OzoneManager (#3400)
---
.../hadoop/hdds/utils/db/RDBSstFileWriter.java | 4 +-
.../org/apache/hadoop/hdds/utils/db/RDBStore.java | 45 ++-
.../apache/hadoop/hdds/utils/db/RocksDatabase.java | 20 +-
.../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 435 ++++++++++++---------
.../ozone/om/request/file/OMFileRequest.java | 36 +-
.../org/apache/hadoop/ozone/debug/ListTables.java | 7 +-
.../apache/hadoop/ozone/debug/RocksDBUtils.java | 5 +-
7 files changed, 316 insertions(+), 236 deletions(-)
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java
index 367c235f73..d440df1748 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java
@@ -37,10 +37,11 @@ public class RDBSstFileWriter implements DumpFileWriter, Closeable {
private SstFileWriter sstFileWriter;
private File sstFile;
private AtomicLong keyCounter;
+ private Options emptyOption = new Options();
public RDBSstFileWriter() {
EnvOptions envOptions = new EnvOptions();
- this.sstFileWriter = new SstFileWriter(envOptions, new Options());
+ this.sstFileWriter = new SstFileWriter(envOptions, emptyOption);
this.keyCounter = new AtomicLong(0);
}
@@ -83,6 +84,7 @@ public class RDBSstFileWriter implements DumpFileWriter, Closeable {
} finally {
sstFileWriter.close();
sstFileWriter = null;
+ emptyOption.close();
}
keyCounter.set(0);
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index baaa17e49a..e32987c17d 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -283,9 +283,8 @@ public class RDBStore implements DBStore {
throw new IllegalArgumentException("Illegal count for getUpdatesSince.");
}
DBUpdatesWrapper dbUpdatesWrapper = new DBUpdatesWrapper();
- try {
- TransactionLogIterator transactionLogIterator =
- db.getUpdatesSince(sequenceNumber);
+ try (TransactionLogIterator transactionLogIterator =
+ db.getUpdatesSince(sequenceNumber)) {
// Only the first record needs to be checked if its seq number <
// ( 1 + passed_in_sequence_number). For example, if seqNumber passed
@@ -298,24 +297,28 @@ public class RDBStore implements DBStore {
while (transactionLogIterator.isValid()) {
TransactionLogIterator.BatchResult result =
transactionLogIterator.getBatch();
- long currSequenceNumber = result.sequenceNumber();
- if (checkValidStartingSeqNumber &&
- currSequenceNumber > 1 + sequenceNumber) {
- throw new SequenceNumberNotFoundException("Unable to read data from" +
- " RocksDB wal to get delta updates. It may have already been" +
- "flushed to SSTs.");
- }
- // If the above condition was not satisfied, then it is OK to reset
- // the flag.
- checkValidStartingSeqNumber = false;
- if (currSequenceNumber <= sequenceNumber) {
- transactionLogIterator.next();
- continue;
- }
- dbUpdatesWrapper.addWriteBatch(result.writeBatch().data(),
- result.sequenceNumber());
- if (currSequenceNumber - sequenceNumber >= limitCount) {
- break;
+ try {
+ long currSequenceNumber = result.sequenceNumber();
+ if (checkValidStartingSeqNumber &&
+ currSequenceNumber > 1 + sequenceNumber) {
+ throw new SequenceNumberNotFoundException("Unable to read data from"
+ + " RocksDB wal to get delta updates. It may have already been"
+ + "flushed to SSTs.");
+ }
+ // If the above condition was not satisfied, then it is OK to reset
+ // the flag.
+ checkValidStartingSeqNumber = false;
+ if (currSequenceNumber <= sequenceNumber) {
+ transactionLogIterator.next();
+ continue;
+ }
+ dbUpdatesWrapper.addWriteBatch(result.writeBatch().data(),
+ result.sequenceNumber());
+ if (currSequenceNumber - sequenceNumber >= limitCount) {
+ break;
+ }
+ } finally {
+ result.writeBatch().close();
}
transactionLogIterator.next();
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
index 4ac83c9122..1289260259 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
@@ -73,12 +73,11 @@ public final class RocksDatabase {
* Read DB and return existing column families.
*
* @return a list of column families.
- * @see RocksDB#listColumnFamilies(Options, String)
*/
private static List<TableConfig> getColumnFamilies(File file)
throws RocksDBException {
- final List<TableConfig> columnFamilies = RocksDB.listColumnFamilies(
- new Options(), file.getAbsolutePath())
+ final List<TableConfig> columnFamilies = listColumnFamiliesEmptyOptions(
+ file.getAbsolutePath())
.stream()
.map(TableConfig::newTableConfig)
.collect(Collectors.toList());
@@ -88,6 +87,21 @@ public final class RocksDatabase {
return columnFamilies;
}
+ /**
+ * Read DB column families without Options.
+ * @param path
+ * @return A list of column family names
+ * @throws RocksDBException
+ *
+ * @see RocksDB#listColumnFamilies(Options, String)
+ */
+ public static List<byte[]> listColumnFamiliesEmptyOptions(final String path)
+ throws RocksDBException {
+ try (Options emptyOptions = new Options()) {
+ return RocksDB.listColumnFamilies(emptyOptions, path);
+ }
+ }
+
static RocksDatabase open(File dbFile, DBOptions dbOptions,
WriteOptions writeOptions, Set<TableConfig> families,
boolean readOnly) throws IOException {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index cbc492161f..3ff8029c4e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -1578,8 +1578,47 @@ public class KeyManagerImpl implements KeyManager {
bucketName);
Table<String, OmKeyInfo> keyTable = metadataManager
.getKeyTable(getBucketLayout(metadataManager, volName, buckName));
- TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
- iterator;
+ try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+ iterator = getIteratorForKeyInTableCache(recursive, startKey,
+ volumeName, bucketName, cacheKeyMap, keyArgs, keyTable)) {
+ findKeyInDbWithIterator(recursive, startKey, numEntries, volumeName,
+ bucketName, keyName, cacheKeyMap, keyArgs, keyTable, iterator);
+ }
+ int countEntries;
+
+ countEntries = 0;
+ // Convert results in cacheKeyMap to List
+ for (OzoneFileStatus fileStatus : cacheKeyMap.values()) {
+ // No need to check if a key is deleted or not here, this is handled
+ // when adding entries to cacheKeyMap from DB.
+ fileStatusList.add(fileStatus);
+ countEntries++;
+ if (countEntries >= numEntries) {
+ break;
+ }
+ }
+ // Clean up temp map and set
+ cacheKeyMap.clear();
+
+ List<OmKeyInfo> keyInfoList = new ArrayList<>(fileStatusList.size());
+ fileStatusList.stream().map(s -> s.getKeyInfo()).forEach(keyInfoList::add);
+ if (args.getLatestVersionLocation()) {
+ slimLocationVersion(keyInfoList.toArray(new OmKeyInfo[0]));
+ }
+ refreshPipeline(keyInfoList);
+
+ if (args.getSortDatanodes()) {
+ sortDatanodes(clientAddress, keyInfoList.toArray(new OmKeyInfo[0]));
+ }
+ return fileStatusList;
+ }
+
+ private TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+ getIteratorForKeyInTableCache(
+ boolean recursive, String startKey, String volumeName, String bucketName,
+ TreeMap<String, OzoneFileStatus> cacheKeyMap, String keyArgs,
+ Table<String, OmKeyInfo> keyTable) {
+ TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> iterator;
try {
Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>>
cacheIter = keyTable.cacheIterator();
@@ -1595,7 +1634,17 @@ public class KeyManagerImpl implements KeyManager {
metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
bucketName);
}
+ return iterator;
+ }
+ @SuppressWarnings("parameternumber")
+ private void findKeyInDbWithIterator(boolean recursive, String startKey,
+ long numEntries, String volumeName, String bucketName, String keyName,
+ TreeMap<String, OzoneFileStatus> cacheKeyMap, String keyArgs,
+ Table<String, OmKeyInfo> keyTable,
+ TableIterator<String,
+ ? extends Table.KeyValue<String, OmKeyInfo>> iterator)
+ throws IOException {
// Then, find key in DB
String seekKeyInDb =
metadataManager.getOzoneKey(volumeName, bucketName, startKey);
@@ -1662,32 +1711,6 @@ public class KeyManagerImpl implements KeyManager {
}
}
}
-
- countEntries = 0;
- // Convert results in cacheKeyMap to List
- for (OzoneFileStatus fileStatus : cacheKeyMap.values()) {
- // No need to check if a key is deleted or not here, this is handled
- // when adding entries to cacheKeyMap from DB.
- fileStatusList.add(fileStatus);
- countEntries++;
- if (countEntries >= numEntries) {
- break;
- }
- }
- // Clean up temp map and set
- cacheKeyMap.clear();
-
- List<OmKeyInfo> keyInfoList = new ArrayList<>(fileStatusList.size());
- fileStatusList.stream().map(s -> s.getKeyInfo()).forEach(keyInfoList::add);
- if (args.getLatestVersionLocation()) {
- slimLocationVersion(keyInfoList.toArray(new OmKeyInfo[0]));
- }
- refreshPipeline(keyInfoList);
-
- if (args.getSortDatanodes()) {
- sortDatanodes(clientAddress, keyInfoList.toArray(new OmKeyInfo[0]));
- }
- return fileStatusList;
}
@SuppressWarnings("methodlength")
@@ -1741,162 +1764,169 @@ public class KeyManagerImpl implements KeyManager {
TreeMap<String, OzoneFileStatus> tempCacheDirMap = new TreeMap<>();
TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
- iterator;
+ iterator = null;
- if (Strings.isNullOrEmpty(startKey)) {
- OzoneFileStatus fileStatus = getFileStatus(args, clientAddress);
- if (fileStatus.isFile()) {
- return Collections.singletonList(fileStatus);
- }
+ try {
+ if (Strings.isNullOrEmpty(startKey)) {
+ OzoneFileStatus fileStatus = getFileStatus(args, clientAddress);
+ if (fileStatus.isFile()) {
+ return Collections.singletonList(fileStatus);
+ }
- // Not required to search in DeletedTable because all the deleted
- // keys will be marked directly in dirTable or in keyTable by
- // breaking the pointer to its sub-dirs and sub-files. So, there is no
- // issue of inconsistency.
+ // Not required to search in DeletedTable because all the deleted
+ // keys will be marked directly in dirTable or in keyTable by
+ // breaking the pointer to its sub-dirs and sub-files. So, there is no
+ // issue of inconsistency.
+
+ /*
+ * keyName is a directory.
+ * Say, "/a" is the dir name and its objectID is 1024, then seek
+ * will be doing with "1024/" to get all immediate descendants.
+ */
+ if (fileStatus.getKeyInfo() != null) {
+ prefixKeyInDB = fileStatus.getKeyInfo().getObjectID();
+ } else {
+ // list root directory.
+ prefixKeyInDB = bucketId;
+ }
+ seekFileInDB = metadataManager.getOzonePathKey(
+ volumeId, bucketId, prefixKeyInDB, "");
+ seekDirInDB = metadataManager.getOzonePathKey(
+ volumeId, bucketId, prefixKeyInDB, "");
+
+ // Order of seek ->
+ // (1)Seek files in fileTable
+ // (2)Seek dirs in dirTable
+
+ // First under lock obtain both entries from dir/file cache and generate
+ // entries marked for delete.
+ metadataManager.getLock()
+ .acquireReadLock(BUCKET_LOCK, volumeName, bucketName);
+ try {
+ BucketLayout bucketLayout = getBucketLayout(
+ metadataManager, volumeName, bucketName);
+ iterator = metadataManager.getKeyTable(bucketLayout).iterator();
+ countEntries =
+ getFilesAndDirsFromCacheWithBucket(volumeName, bucketName,
+ cacheFileMap, tempCacheDirMap, deletedKeySet, prefixKeyInDB,
+ seekFileInDB, seekDirInDB, prefixPath, startKey, countEntries,
+ numEntries);
+
+ } finally {
+ metadataManager.getLock()
+ .releaseReadLock(BUCKET_LOCK, volumeName, bucketName);
+ }
+ countEntries =
+ getFilesFromDirectory(cacheFileMap, seekFileInDB, prefixPath,
+ prefixKeyInDB, countEntries, numEntries, deletedKeySet,
+ iterator);
- /*
- * keyName is a directory.
- * Say, "/a" is the dir name and its objectID is 1024, then seek
- * will be doing with "1024/" to get all immediate descendants.
- */
- if (fileStatus.getKeyInfo() != null) {
- prefixKeyInDB = fileStatus.getKeyInfo().getObjectID();
} else {
- // list root directory.
- prefixKeyInDB = bucketId;
- }
- seekFileInDB = metadataManager.getOzonePathKey(
- volumeId, bucketId, prefixKeyInDB, "");
- seekDirInDB = metadataManager.getOzonePathKey(
- volumeId, bucketId, prefixKeyInDB, "");
-
- // Order of seek ->
- // (1)Seek files in fileTable
- // (2)Seek dirs in dirTable
-
-
- // First under lock obtain both entries from dir/file cache and generate
- // entries marked for delete.
- metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
- bucketName);
- try {
- BucketLayout bucketLayout =
- getBucketLayout(metadataManager, volumeName, bucketName);
- iterator = metadataManager.getKeyTable(bucketLayout).iterator();
- countEntries = getFilesAndDirsFromCacheWithBucket(volumeName,
- bucketName, cacheFileMap, tempCacheDirMap, deletedKeySet,
- prefixKeyInDB, seekFileInDB, seekDirInDB, prefixPath, startKey,
- countEntries, numEntries);
-
- } finally {
- metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
- bucketName);
- }
- countEntries = getFilesFromDirectory(cacheFileMap, seekFileInDB,
- prefixPath, prefixKeyInDB, countEntries, numEntries, deletedKeySet,
- iterator);
-
- } else {
- /*
- * startKey will be used in iterator seek and sets the beginning point
- * for key traversal.
- * keyName will be used as parentID where the user has requested to
- * list the keys from.
- *
- * When recursive flag=false, parentID won't change between two pages.
- * For example: OM has a namespace like,
- * /a/1...1M files and /a/b/1...1M files.
- * /a/1...1M directories and /a/b/1...1M directories.
- * Listing "/a", will always have the parentID as "a" irrespective of
- * the startKey value.
- */
-
- // Check startKey is an immediate child of keyName. For example,
- // keyName=/a/ and expected startKey=/a/b. startKey can't be /xyz/b.
- if (StringUtils.isNotBlank(keyName) &&
- !OzoneFSUtils.isImmediateChild(keyName, startKey)) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("StartKey {} is not an immediate child of keyName {}. " +
- "Returns empty list", startKey, keyName);
+ /*
+ * startKey will be used in iterator seek and sets the beginning point
+ * for key traversal.
+ * keyName will be used as parentID where the user has requested to
+ * list the keys from.
+ *
+ * When recursive flag=false, parentID won't change between two pages.
+ * For example: OM has a namespace like,
+ * /a/1...1M files and /a/b/1...1M files.
+ * /a/1...1M directories and /a/b/1...1M directories.
+ * Listing "/a", will always have the parentID as "a" irrespective of
+ * the startKey value.
+ */
+
+ // Check startKey is an immediate child of keyName. For example,
+ // keyName=/a/ and expected startKey=/a/b. startKey can't be /xyz/b.
+ if (StringUtils.isNotBlank(keyName) && !OzoneFSUtils
+ .isImmediateChild(keyName, startKey)) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("StartKey {} is not an immediate child of keyName {}. "
+ + "Returns empty list", startKey, keyName);
+ }
+ return Collections.emptyList();
}
- return Collections.emptyList();
- }
- // assign startKeyPath if prefixPath is empty string.
- if (StringUtils.isBlank(prefixPath)) {
- prefixPath = OzoneFSUtils.getParentDir(startKey);
- }
+ // assign startKeyPath if prefixPath is empty string.
+ if (StringUtils.isBlank(prefixPath)) {
+ prefixPath = OzoneFSUtils.getParentDir(startKey);
+ }
- OmKeyArgs startKeyArgs = args.toBuilder()
- .setKeyName(startKey)
- .setSortDatanodesInPipeline(false)
- .build();
- OzoneFileStatus fileStatusInfo = getOzoneFileStatusFSO(startKeyArgs,
- null, true);
+ OmKeyArgs startKeyArgs = args.toBuilder()
+ .setKeyName(startKey)
+ .setSortDatanodesInPipeline(false)
+ .build();
+ OzoneFileStatus fileStatusInfo = getOzoneFileStatusFSO(startKeyArgs,
+ null, true);
- if (fileStatusInfo != null) {
- prefixKeyInDB = fileStatusInfo.getKeyInfo().getParentObjectID();
+ if (fileStatusInfo != null) {
+ prefixKeyInDB = fileStatusInfo.getKeyInfo().getParentObjectID();
- if (fileStatusInfo.isDirectory()) {
- seekDirInDB = metadataManager.getOzonePathKey(
- volumeId, bucketId, prefixKeyInDB,
- fileStatusInfo.getKeyInfo().getFileName());
+ if (fileStatusInfo.isDirectory()) {
+ seekDirInDB = metadataManager.getOzonePathKey(
+ volumeId, bucketId, prefixKeyInDB,
+ fileStatusInfo.getKeyInfo().getFileName());
- // Order of seek -> (1) Seek dirs only in dirTable. In OM, always
- // the order of search is, first seek into fileTable and then
- // dirTable. So, its not required to search again in the fileTable.
+ // Order of seek -> (1) Seek dirs only in dirTable. In OM, always
+ // the order of search is, first seek into fileTable and then
+ // dirTable. So, its not required to search again in the fileTable.
- // Seek the given key in dirTable.
- metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
+ // Seek the given key in dirTable.
+ metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
bucketName);
- try {
- listStatusFindDirsInTableCache(tempCacheDirMap,
- metadataManager.getDirectoryTable(),
- prefixKeyInDB, seekDirInDB, prefixPath, startKey, volumeName,
- bucketName, countEntries, numEntries, deletedKeySet);
- } finally {
- metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
+ try {
+ listStatusFindDirsInTableCache(tempCacheDirMap,
+ metadataManager.getDirectoryTable(),
+ prefixKeyInDB, seekDirInDB, prefixPath, startKey, volumeName,
+ bucketName, countEntries, numEntries, deletedKeySet);
+ } finally {
+ metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
+ bucketName);
+ }
+
+ } else {
+ seekFileInDB = metadataManager.getOzonePathKey(
+ volumeId, bucketId, prefixKeyInDB,
+ fileStatusInfo.getKeyInfo().getFileName());
+ // begins from the first sub-dir under the parent dir
+ seekDirInDB = metadataManager.getOzonePathKey(
+ volumeId, bucketId, prefixKeyInDB, "");
+
+ // First under lock obtain both entries from dir/file cache and
+ // generate entries marked for delete.
+ metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
bucketName);
- }
+ try {
+ BucketLayout bucketLayout =
+ getBucketLayout(metadataManager, volumeName, bucketName);
+ iterator = metadataManager.getKeyTable(bucketLayout)
+ .iterator();
+ countEntries = getFilesAndDirsFromCacheWithBucket(volumeName,
+ bucketName, cacheFileMap, tempCacheDirMap, deletedKeySet,
+ prefixKeyInDB, seekFileInDB, seekDirInDB, prefixPath,
+ startKey, countEntries, numEntries);
+ } finally {
+ metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
+ bucketName);
+ }
+ // 1. Seek the given key in key table.
+ countEntries = getFilesFromDirectory(cacheFileMap, seekFileInDB,
+ prefixPath, prefixKeyInDB, countEntries, numEntries,
+ deletedKeySet, iterator);
+ }
} else {
- seekFileInDB = metadataManager.getOzonePathKey(
- volumeId, bucketId, prefixKeyInDB,
- fileStatusInfo.getKeyInfo().getFileName());
- // begins from the first sub-dir under the parent dir
- seekDirInDB = metadataManager.getOzonePathKey(
- volumeId, bucketId, prefixKeyInDB, "");
-
- // First under lock obtain both entries from dir/file cache and
- // generate entries marked for delete.
- metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
- bucketName);
- try {
- BucketLayout bucketLayout =
- getBucketLayout(metadataManager, volumeName, bucketName);
- iterator = metadataManager.getKeyTable(bucketLayout)
- .iterator();
- countEntries = getFilesAndDirsFromCacheWithBucket(volumeName,
- bucketName, cacheFileMap, tempCacheDirMap, deletedKeySet,
- prefixKeyInDB, seekFileInDB, seekDirInDB, prefixPath, startKey,
- countEntries, numEntries);
- } finally {
- metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
- bucketName);
+ // TODO: HDDS-4364: startKey can be a non-existed key
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("StartKey {} is a non-existed key and returning empty "
+ + "list", startKey);
}
-
- // 1. Seek the given key in key table.
- countEntries = getFilesFromDirectory(cacheFileMap, seekFileInDB,
- prefixPath, prefixKeyInDB, countEntries, numEntries,
- deletedKeySet, iterator);
- }
- } else {
- // TODO: HDDS-4364: startKey can be a non-existed key
- if (LOG.isDebugEnabled()) {
- LOG.debug("StartKey {} is a non-existed key and returning empty " +
- "list", startKey);
+ return Collections.emptyList();
}
- return Collections.emptyList();
+ }
+ } finally {
+ if (iterator != null) {
+ iterator.close();
}
}
@@ -2028,9 +2058,25 @@ public class KeyManagerImpl implements KeyManager {
throws IOException {
Table dirTable = metadataManager.getDirectoryTable();
- TableIterator<String, ? extends Table.KeyValue<String, OmDirectoryInfo>>
- iterator = dirTable.iterator();
+ try (TableIterator<String,
+ ? extends Table.KeyValue<String, OmDirectoryInfo>>
+ iterator = dirTable.iterator()) {
+
+ return getDirectoriesWithIterator(cacheKeyMap, seekDirInDB, prefixPath,
+ prefixKeyInDB, countEntries, numEntries, recursive, volumeName,
+ bucketName, deletedKeySet, iterator);
+ }
+ }
+ @SuppressWarnings("parameternumber")
+ private int getDirectoriesWithIterator(
+ TreeMap<String, OzoneFileStatus> cacheKeyMap, String seekDirInDB,
+ String prefixPath, long prefixKeyInDB, int countEntries, long numEntries,
+ boolean recursive, String volumeName, String bucketName,
+ Set<String> deletedKeySet,
+ TableIterator<String,
+ ? extends Table.KeyValue<String, OmDirectoryInfo>> iterator)
+ throws IOException {
iterator.seek(seekDirInDB);
while (iterator.hasNext() && numEntries - countEntries > 0) {
@@ -2409,15 +2455,27 @@ public class KeyManagerImpl implements KeyManager {
@Override
public List<OmKeyInfo> getPendingDeletionSubDirs(long volumeId, long bucketId,
OmKeyInfo parentInfo, long numEntries) throws IOException {
- List<OmKeyInfo> directories = new ArrayList<>();
String seekDirInDB = metadataManager.getOzonePathKey(volumeId, bucketId,
parentInfo.getObjectID(), "");
long countEntries = 0;
Table dirTable = metadataManager.getDirectoryTable();
- TableIterator<String, ? extends Table.KeyValue<String, OmDirectoryInfo>>
- iterator = dirTable.iterator();
+ try (TableIterator<String,
+ ? extends Table.KeyValue<String, OmDirectoryInfo>>
+ iterator = dirTable.iterator()) {
+ return gatherSubDirsWithIterator(parentInfo, numEntries,
+ seekDirInDB, countEntries, iterator);
+ }
+
+ }
+ private List<OmKeyInfo> gatherSubDirsWithIterator(OmKeyInfo parentInfo,
+ long numEntries, String seekDirInDB,
+ long countEntries,
+ TableIterator<String,
+ ? extends Table.KeyValue<String, OmDirectoryInfo>> iterator)
+ throws IOException {
+ List<OmKeyInfo> directories = new ArrayList<>();
iterator.seek(seekDirInDB);
while (iterator.hasNext() && numEntries - countEntries > 0) {
@@ -2449,25 +2507,26 @@ public class KeyManagerImpl implements KeyManager {
long countEntries = 0;
Table fileTable = metadataManager.getFileTable();
- TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
- iterator = fileTable.iterator();
+ try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+ iterator = fileTable.iterator()) {
- iterator.seek(seekFileInDB);
+ iterator.seek(seekFileInDB);
- while (iterator.hasNext() && numEntries - countEntries > 0) {
- Table.KeyValue<String, OmKeyInfo> entry = iterator.next();
- OmKeyInfo fileInfo = entry.getValue();
- if (!OMFileRequest.isImmediateChild(fileInfo.getParentObjectID(),
- parentInfo.getObjectID())) {
- break;
- }
- fileInfo.setFileName(fileInfo.getKeyName());
- String fullKeyPath = OMFileRequest.getAbsolutePath(
- parentInfo.getKeyName(), fileInfo.getKeyName());
- fileInfo.setKeyName(fullKeyPath);
+ while (iterator.hasNext() && numEntries - countEntries > 0) {
+ Table.KeyValue<String, OmKeyInfo> entry = iterator.next();
+ OmKeyInfo fileInfo = entry.getValue();
+ if (!OMFileRequest.isImmediateChild(fileInfo.getParentObjectID(),
+ parentInfo.getObjectID())) {
+ break;
+ }
+ fileInfo.setFileName(fileInfo.getKeyName());
+ String fullKeyPath = OMFileRequest.getAbsolutePath(
+ parentInfo.getKeyName(), fileInfo.getKeyName());
+ fileInfo.setKeyName(fullKeyPath);
- files.add(fileInfo);
- countEntries++;
+ files.add(fileInfo);
+ countEntries++;
+ }
}
return files;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index e5733d6712..88ffee1a6e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -887,16 +887,19 @@ public final class OMFileRequest {
// Check dirTable entries for any sub paths.
String seekDirInDB = metaMgr.getOzonePathKey(volumeId, bucketId,
omKeyInfo.getObjectID(), "");
- TableIterator<String, ? extends Table.KeyValue<String, OmDirectoryInfo>>
- iterator = dirTable.iterator();
+ try (TableIterator<String, ? extends
+ Table.KeyValue<String, OmDirectoryInfo>>
+ iterator = dirTable.iterator()) {
- iterator.seek(seekDirInDB);
+ iterator.seek(seekDirInDB);
+
+ if (iterator.hasNext()) {
+ Table.KeyValue<String, OmDirectoryInfo> entry = iterator.next();
+ OmDirectoryInfo dirInfo = entry.getValue();
+ return isImmediateChild(dirInfo.getParentObjectID(),
+ omKeyInfo.getObjectID());
+ }
- if (iterator.hasNext()) {
- Table.KeyValue<String, OmDirectoryInfo> entry = iterator.next();
- OmDirectoryInfo dirInfo = entry.getValue();
- return isImmediateChild(dirInfo.getParentObjectID(),
- omKeyInfo.getObjectID());
}
return false; // no sub paths found
}
@@ -931,16 +934,17 @@ public final class OMFileRequest {
// Check fileTable entries for any sub paths.
String seekFileInDB = metaMgr.getOzonePathKey(volumeId, bucketId,
omKeyInfo.getObjectID(), "");
- TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
- iterator = fileTable.iterator();
+ try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+ iterator = fileTable.iterator()) {
- iterator.seek(seekFileInDB);
+ iterator.seek(seekFileInDB);
- if (iterator.hasNext()) {
- Table.KeyValue<String, OmKeyInfo> entry = iterator.next();
- OmKeyInfo fileInfo = entry.getValue();
- return isImmediateChild(fileInfo.getParentObjectID(),
- omKeyInfo.getObjectID()); // found a sub path file
+ if (iterator.hasNext()) {
+ Table.KeyValue<String, OmKeyInfo> entry = iterator.next();
+ OmKeyInfo fileInfo = entry.getValue();
+ return isImmediateChild(fileInfo.getParentObjectID(),
+ omKeyInfo.getObjectID()); // found a sub path file
+ }
}
return false; // no sub paths found
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java
index be1cd592d7..494f42e587 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java
@@ -24,9 +24,8 @@ import java.util.concurrent.Callable;
import org.apache.hadoop.hdds.cli.SubcommandWithParent;
+import org.apache.hadoop.hdds.utils.db.RocksDatabase;
import org.kohsuke.MetaInfServices;
-import org.rocksdb.Options;
-import org.rocksdb.RocksDB;
import picocli.CommandLine;
/**
@@ -45,8 +44,8 @@ public class ListTables implements Callable<Void>, SubcommandWithParent {
@Override
public Void call() throws Exception {
- List<byte[]> columnFamilies = RocksDB.listColumnFamilies(new Options(),
- parent.getDbPath());
+ List<byte[]> columnFamilies = RocksDatabase.listColumnFamiliesEmptyOptions(
+ parent.getDbPath());
for (byte[] b : columnFamilies) {
System.out.println(new String(b, StandardCharsets.UTF_8));
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RocksDBUtils.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RocksDBUtils.java
index 24f6e21833..cb780da450 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RocksDBUtils.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RocksDBUtils.java
@@ -18,9 +18,8 @@
package org.apache.hadoop.ozone.debug;
+import org.apache.hadoop.hdds.utils.db.RocksDatabase;
import org.rocksdb.ColumnFamilyDescriptor;
-import org.rocksdb.Options;
-import org.rocksdb.RocksDB;
import org.rocksdb.RocksDBException;
import java.util.ArrayList;
@@ -38,7 +37,7 @@ public final class RocksDBUtils {
public static List<ColumnFamilyDescriptor> getColumnFamilyDescriptors(
String dbPath) throws RocksDBException {
List<ColumnFamilyDescriptor> cfs = new ArrayList<>();
- List<byte[]> cfList = RocksDB.listColumnFamilies(new Options(), dbPath);
+ List<byte[]> cfList = RocksDatabase.listColumnFamiliesEmptyOptions(dbPath);
if (cfList != null) {
for (byte[] b : cfList) {
cfs.add(new ColumnFamilyDescriptor(b));
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org