You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by si...@apache.org on 2022/08/15 18:08:34 UTC
[ozone] branch master updated: HDDS-6726. Close RocksObject in Recon and tools (#3678)
This is an automated email from the ASF dual-hosted git repository.
siyao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 5ba6728f7c HDDS-6726. Close RocksObject in Recon and tools (#3678)
5ba6728f7c is described below
commit 5ba6728f7c2535d4ada5a78a99504424e3d7bd4a
Author: Duong Nguyen <du...@gmail.com>
AuthorDate: Mon Aug 15 11:08:29 2022 -0700
HDDS-6726. Close RocksObject in Recon and tools (#3678)
---
.../datanode/metadata/DatanodeCRLStoreImpl.java | 13 +-
.../background/BlockDeletingService.java | 23 ++-
.../java/org/apache/hadoop/hdds/utils/HAUtils.java | 30 ++--
.../hadoop/hdds/utils/db/RDBCheckpointManager.java | 8 +-
.../hadoop/hdds/utils/db/RDBSstFileWriter.java | 18 +-
.../org/apache/hadoop/hdds/utils/db/RDBStore.java | 3 +-
.../apache/hadoop/hdds/utils/db/RocksDatabase.java | 8 +-
.../db/managed/ManagedColumnFamilyOptions.java | 2 +-
.../hdds/scm/ha/SCMHADBTransactionBufferImpl.java | 3 +
.../hadoop/hdds/scm/server/SCMCertStore.java | 5 +-
.../org/apache/hadoop/ozone/om/OzoneManager.java | 49 +++---
.../hadoop/ozone/recon/api/NSSummaryEndpoint.java | 2 +-
.../ozone/recon/api/handlers/EntityHandler.java | 47 ++---
.../ozone/recon/api/handlers/FSOBucketHandler.java | 112 ++++++------
.../hadoop/ozone/recon/scm/ReconNodeManager.java | 18 +-
.../scm/ReconStorageContainerManagerFacade.java | 11 +-
.../impl/ReconContainerMetadataManagerImpl.java | 191 +++++++++++----------
.../ozone/recon/spi/impl/ReconDBProvider.java | 11 +-
.../ozone/recon/tasks/ContainerKeyMapperTask.java | 89 +++++-----
.../hadoop/ozone/recon/tasks/NSSummaryTask.java | 23 +--
.../ozone/recon/tasks/NSSummaryTaskWithFSO.java | 29 ++--
.../freon/containergenerator/GeneratorOm.java | 14 +-
22 files changed, 379 insertions(+), 330 deletions(-)
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/datanode/metadata/DatanodeCRLStoreImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/datanode/metadata/DatanodeCRLStoreImpl.java
index 67827b962e..0926a79295 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/datanode/metadata/DatanodeCRLStoreImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/datanode/metadata/DatanodeCRLStoreImpl.java
@@ -105,13 +105,14 @@ public class DatanodeCRLStoreImpl implements DatanodeCRLStore {
@Override
public List<CRLInfo> getPendingCRLs() throws IOException {
- TableIterator<Long, ? extends Table.KeyValue<Long, CRLInfo>> iter =
- pendingCRLsTable.iterator();
- List<CRLInfo> pendingCRLs = new ArrayList<>();
- while (iter.hasNext()) {
- pendingCRLs.add(iter.next().getValue());
+ try (TableIterator<Long, ? extends Table.KeyValue<Long, CRLInfo>> iter =
+ pendingCRLsTable.iterator()) {
+ List<CRLInfo> pendingCRLs = new ArrayList<>();
+ while (iter.hasNext()) {
+ pendingCRLs.add(iter.next().getValue());
+ }
+ return pendingCRLs;
}
- return pendingCRLs;
}
private void checkTableStatus(Table table, String name) throws IOException {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
index 1ccca2aa0b..d3599af9e9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
@@ -428,9 +428,13 @@ public class BlockDeletingService extends BackgroundService {
Table<Long, DeletedBlocksTransaction> deleteTxns =
((DeleteTransactionStore<Long>) meta.getStore())
.getDeleteTransactionTable();
- return deleteViaTransactionStore(
- deleteTxns.iterator(), meta,
- container, dataDir, startTime, schema2Deleter);
+ try (TableIterator<Long,
+ ? extends Table.KeyValue<Long, DeletedBlocksTransaction>>
+ iterator = deleteTxns.iterator()) {
+ return deleteViaTransactionStore(
+ iterator, meta,
+ container, dataDir, startTime, schema2Deleter);
+ }
}
public ContainerBackgroundTaskResult deleteViaSchema3(
@@ -444,12 +448,16 @@ public class BlockDeletingService extends BackgroundService {
Table<String, DeletedBlocksTransaction> deleteTxns =
((DeleteTransactionStore<String>) meta.getStore())
.getDeleteTransactionTable();
- return deleteViaTransactionStore(
- deleteTxns.iterator(containerData.containerPrefix()), meta,
- container, dataDir, startTime, schema3Deleter);
+ try (TableIterator<String,
+ ? extends Table.KeyValue<String, DeletedBlocksTransaction>>
+ iterator = deleteTxns.iterator(containerData.containerPrefix())) {
+ return deleteViaTransactionStore(
+ iterator, meta,
+ container, dataDir, startTime, schema3Deleter);
+ }
}
- public ContainerBackgroundTaskResult deleteViaTransactionStore(
+ private ContainerBackgroundTaskResult deleteViaTransactionStore(
TableIterator<?, ? extends Table.KeyValue<?, DeletedBlocksTransaction>>
iter, DBHandle meta, Container container, File dataDir,
long startTime, Deleter deleter) throws IOException {
@@ -471,7 +479,6 @@ public class BlockDeletingService extends BackgroundService {
numBlocks += delTx.getLocalIDList().size();
delBlocks.add(delTx);
}
- iter.close();
if (delBlocks.isEmpty()) {
LOG.debug("No transaction found in container : {}",
containerData.getContainerID());
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
index 7f2deeb093..df368e5319 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
@@ -251,28 +251,28 @@ public final class HAUtils {
OzoneConfiguration tempConfig, Path dbDir, String dbName,
DBDefinition definition)
throws Exception {
- DBStore dbStore = loadDB(tempConfig, dbDir.toFile(),
- dbName, definition);
- // Get the table name with TransactionInfo as the value. The transaction
- // info table name are different in SCM and SCM.
+ try (DBStore dbStore = loadDB(tempConfig, dbDir.toFile(),
+ dbName, definition)) {
- // In case, a new table gets added where the value is TransactionInfo, this
- // logic may not work.
+ // Get the table name with TransactionInfo as the value. The transaction
+ // info table name are different in SCM and SCM.
+ // In case, a new table gets added where the value is TransactionInfo,
+ // this logic may not work.
- Table<String, TransactionInfo> transactionInfoTable =
- getTransactionInfoTable(dbStore, definition);
+ Table<String, TransactionInfo> transactionInfoTable =
+ getTransactionInfoTable(dbStore, definition);
- TransactionInfo transactionInfo =
- transactionInfoTable.get(TRANSACTION_INFO_KEY);
- dbStore.close();
+ TransactionInfo transactionInfo =
+ transactionInfoTable.get(TRANSACTION_INFO_KEY);
- if (transactionInfo == null) {
- throw new IOException("Failed to read TransactionInfo from DB " +
- definition.getName() + " at " + dbDir);
+ if (transactionInfo == null) {
+ throw new IOException("Failed to read TransactionInfo from DB " +
+ definition.getName() + " at " + dbDir);
+ }
+ return transactionInfo;
}
- return transactionInfo;
}
public static Table<String, TransactionInfo> getTransactionInfoTable(
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java
index 9cc23b53ba..943d58ef1d 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hdds.utils.db;
+import java.io.Closeable;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
@@ -34,7 +35,7 @@ import org.slf4j.LoggerFactory;
/**
* RocksDB Checkpoint Manager, used to create and cleanup checkpoints.
*/
-public class RDBCheckpointManager {
+public class RDBCheckpointManager implements Closeable {
private final RocksCheckpoint checkpoint;
public static final String RDB_CHECKPOINT_DIR_PREFIX = "checkpoint_";
@@ -90,4 +91,9 @@ public class RDBCheckpointManager {
}
return null;
}
+
+ @Override
+ public void close() throws IOException {
+ checkpoint.close();
+ }
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java
index e9fb83b446..31b2774a12 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java
@@ -38,10 +38,10 @@ public class RDBSstFileWriter implements DumpFileWriter, Closeable {
private File sstFile;
private AtomicLong keyCounter;
private ManagedOptions emptyOption = new ManagedOptions();
+ private final ManagedEnvOptions emptyEnvOptions = new ManagedEnvOptions();
public RDBSstFileWriter() {
- ManagedEnvOptions envOptions = new ManagedEnvOptions();
- this.sstFileWriter = new ManagedSstFileWriter(envOptions, emptyOption);
+ this.sstFileWriter = new ManagedSstFileWriter(emptyEnvOptions, emptyOption);
this.keyCounter = new AtomicLong(0);
}
@@ -82,19 +82,23 @@ public class RDBSstFileWriter implements DumpFileWriter, Closeable {
throw toIOException("Failed to finish dumping into file "
+ sstFile.getAbsolutePath(), e);
} finally {
- sstFileWriter.close();
- sstFileWriter = null;
- emptyOption.close();
+ closeResources();
}
keyCounter.set(0);
}
}
+ private void closeResources() {
+ sstFileWriter.close();
+ sstFileWriter = null;
+ emptyOption.close();
+ emptyEnvOptions.close();
+ }
+
private void closeOnFailure() {
if (sstFileWriter != null) {
- sstFileWriter.close();
- sstFileWriter = null;
+ closeResources();
}
}
}
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index c18354168b..fed0991734 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -132,13 +132,14 @@ public class RDBStore implements DBStore {
}
@Override
- public void close() {
+ public void close() throws IOException {
if (statMBeanName != null) {
MBeans.unregister(statMBeanName);
statMBeanName = null;
}
RDBMetrics.unRegister();
+ checkPointManager.close();
db.close();
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
index 9a710ec4ea..6f20dcee7b 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
@@ -36,6 +36,7 @@ import org.rocksdb.RocksDBException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
@@ -199,7 +200,7 @@ public final class RocksDatabase {
*
* @see ManagedCheckpoint
*/
- final class RocksCheckpoint {
+ final class RocksCheckpoint implements Closeable {
private final ManagedCheckpoint checkpoint;
private RocksCheckpoint() {
@@ -218,6 +219,11 @@ public final class RocksDatabase {
public long getLatestSequenceNumber() {
return RocksDatabase.this.getLatestSequenceNumber();
}
+
+ @Override
+ public void close() throws IOException {
+ checkpoint.close();
+ }
}
/**
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java
index 588478ab3a..b29b7f1777 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java
@@ -34,7 +34,7 @@ public class ManagedColumnFamilyOptions extends ColumnFamilyOptions {
}
@Override
- public ManagedColumnFamilyOptions setTableFormatConfig(
+ public synchronized ManagedColumnFamilyOptions setTableFormatConfig(
TableFormatConfig tableFormatConfig) {
TableFormatConfig previous = tableFormatConfig();
if (previous instanceof ManagedBlockBasedTableConfig) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java
index e7a1256e79..7ad66af5ea 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java
@@ -138,5 +138,8 @@ public class SCMHADBTransactionBufferImpl implements SCMHADBTransactionBuffer {
@Override
public void close() throws IOException {
+ if (currentBatchOperation != null) {
+ currentBatchOperation.close();
+ }
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java
index 680bf4384f..f7fca60225 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java
@@ -111,9 +111,8 @@ public final class SCMCertStore implements CertificateStore {
public void storeValidScmCertificate(BigInteger serialID,
X509Certificate certificate) throws IOException {
lock.lock();
- try {
- BatchOperation batchOperation =
- scmMetadataStore.getBatchHandler().initBatchOperation();
+ try (BatchOperation batchOperation =
+ scmMetadataStore.getBatchHandler().initBatchOperation()) {
scmMetadataStore.getValidSCMCertsTable().putWithBatch(batchOperation,
serialID, certificate);
scmMetadataStore.getValidCertsTable().putWithBatch(batchOperation,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 7b03436f59..e6ab0c3f5c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -3172,32 +3172,33 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
// are flushed to the table. This should be acceptable for a list tenant
// request.
- final TableIterator<String, ? extends KeyValue<String, OmDBTenantState>>
- iterator = tenantStateTable.iterator();
-
- final List<TenantState> tenantStateList = new ArrayList<>();
-
- // Iterate table
- while (iterator.hasNext()) {
- final Table.KeyValue<String, OmDBTenantState> dbEntry = iterator.next();
- final String tenantId = dbEntry.getKey();
- final OmDBTenantState omDBTenantState = dbEntry.getValue();
- assert (tenantId.equals(omDBTenantState.getTenantId()));
- tenantStateList.add(TenantState.newBuilder()
- .setTenantId(omDBTenantState.getTenantId())
- .setBucketNamespaceName(omDBTenantState.getBucketNamespaceName())
- .setUserRoleName(omDBTenantState.getUserRoleName())
- .setAdminRoleName(omDBTenantState.getAdminRoleName())
- .setBucketNamespacePolicyName(
- omDBTenantState.getBucketNamespacePolicyName())
- .setBucketPolicyName(omDBTenantState.getBucketPolicyName())
- .build());
- }
+ try (TableIterator<String, ? extends KeyValue<String, OmDBTenantState>>
+ iterator = tenantStateTable.iterator()) {
+
+ final List<TenantState> tenantStateList = new ArrayList<>();
+
+ // Iterate table
+ while (iterator.hasNext()) {
+ final Table.KeyValue<String, OmDBTenantState> dbEntry = iterator.next();
+ final String tenantId = dbEntry.getKey();
+ final OmDBTenantState omDBTenantState = dbEntry.getValue();
+ assert (tenantId.equals(omDBTenantState.getTenantId()));
+ tenantStateList.add(TenantState.newBuilder()
+ .setTenantId(omDBTenantState.getTenantId())
+ .setBucketNamespaceName(omDBTenantState.getBucketNamespaceName())
+ .setUserRoleName(omDBTenantState.getUserRoleName())
+ .setAdminRoleName(omDBTenantState.getAdminRoleName())
+ .setBucketNamespacePolicyName(
+ omDBTenantState.getBucketNamespacePolicyName())
+ .setBucketPolicyName(omDBTenantState.getBucketPolicyName())
+ .build());
+ }
- AUDIT.logReadSuccess(buildAuditMessageForSuccess(
- OMAction.LIST_TENANT, new LinkedHashMap<>()));
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(
+ OMAction.LIST_TENANT, new LinkedHashMap<>()));
- return new TenantStateList(tenantStateList);
+ return new TenantStateList(tenantStateList);
+ }
}
/**
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java
index 7fd3852f6e..aa2699f8bb 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java
@@ -61,7 +61,7 @@ public class NSSummaryEndpoint {
this.omMetadataManager = omMetadataManager;
this.reconSCM = reconSCM;
}
-
+
/**
* This endpoint will return the entity type and aggregate count of objects.
* @param path the request path.
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java
index 906a1e1e5a..bddbddb1e5 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java
@@ -226,15 +226,16 @@ public abstract class EntityHandler {
List<OmVolumeArgs> result = new ArrayList<>();
Table<String, OmVolumeArgs> volumeTable =
omMetadataManager.getVolumeTable();
- TableIterator<String, ? extends Table.KeyValue<String, OmVolumeArgs>>
- iterator = volumeTable.iterator();
+ try (TableIterator<String, ? extends Table.KeyValue<String, OmVolumeArgs>>
+ iterator = volumeTable.iterator()) {
- while (iterator.hasNext()) {
- Table.KeyValue<String, OmVolumeArgs> kv = iterator.next();
+ while (iterator.hasNext()) {
+ Table.KeyValue<String, OmVolumeArgs> kv = iterator.next();
- OmVolumeArgs omVolumeArgs = kv.getValue();
- if (omVolumeArgs != null) {
- result.add(omVolumeArgs);
+ OmVolumeArgs omVolumeArgs = kv.getValue();
+ if (omVolumeArgs != null) {
+ result.add(omVolumeArgs);
+ }
}
}
return result;
@@ -256,26 +257,28 @@ public abstract class EntityHandler {
Table<String, OmBucketInfo> bucketTable =
omMetadataManager.getBucketTable();
- TableIterator<String, ? extends Table.KeyValue<String, OmBucketInfo>>
- iterator = bucketTable.iterator();
+ try (TableIterator<String, ? extends Table.KeyValue<String, OmBucketInfo>>
+ iterator = bucketTable.iterator()) {
- if (volumeName != null) {
- if (!volumeExists(omMetadataManager, volumeName)) {
- return result;
+ if (volumeName != null) {
+ if (!volumeExists(omMetadataManager, volumeName)) {
+ return result;
+ }
+ seekPrefix = omMetadataManager.getVolumeKey(volumeName + OM_KEY_PREFIX);
}
- seekPrefix = omMetadataManager.getVolumeKey(volumeName + OM_KEY_PREFIX);
- }
- while (iterator.hasNext()) {
- Table.KeyValue<String, OmBucketInfo> kv = iterator.next();
+ while (iterator.hasNext()) {
+ Table.KeyValue<String, OmBucketInfo> kv = iterator.next();
- String key = kv.getKey();
- OmBucketInfo omBucketInfo = kv.getValue();
+ String key = kv.getKey();
+ OmBucketInfo omBucketInfo = kv.getValue();
- if (omBucketInfo != null) {
- // We should return only the keys, whose keys match with the seek prefix
- if (key.startsWith(seekPrefix)) {
- result.add(omBucketInfo);
+ if (omBucketInfo != null) {
+ // We should return only the keys, whose keys match with
+ // the seek prefix
+ if (key.startsWith(seekPrefix)) {
+ result.add(omBucketInfo);
+ }
}
}
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java
index 28a3d6fabf..ba188b70bd 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java
@@ -121,29 +121,30 @@ public class FSOBucketHandler extends BucketHandler {
throws IOException {
Table<String, OmKeyInfo> keyTable = getOmMetadataManager().getFileTable();
- TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
- iterator = keyTable.iterator();
-
- String seekPrefix = OM_KEY_PREFIX +
- volumeId +
- OM_KEY_PREFIX +
- bucketId +
- OM_KEY_PREFIX +
- parentId +
- OM_KEY_PREFIX;
- iterator.seek(seekPrefix);
long totalDU = 0L;
- // handle direct keys
- while (iterator.hasNext()) {
- Table.KeyValue<String, OmKeyInfo> kv = iterator.next();
- String dbKey = kv.getKey();
- // since the RocksDB is ordered, seek until the prefix isn't matched
- if (!dbKey.startsWith(seekPrefix)) {
- break;
- }
- OmKeyInfo keyInfo = kv.getValue();
- if (keyInfo != null) {
- totalDU += getKeySizeWithReplication(keyInfo);
+ try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+ iterator = keyTable.iterator()) {
+
+ String seekPrefix = OM_KEY_PREFIX +
+ volumeId +
+ OM_KEY_PREFIX +
+ bucketId +
+ OM_KEY_PREFIX +
+ parentId +
+ OM_KEY_PREFIX;
+ iterator.seek(seekPrefix);
+ // handle direct keys
+ while (iterator.hasNext()) {
+ Table.KeyValue<String, OmKeyInfo> kv = iterator.next();
+ String dbKey = kv.getKey();
+ // since the RocksDB is ordered, seek until the prefix isn't matched
+ if (!dbKey.startsWith(seekPrefix)) {
+ break;
+ }
+ OmKeyInfo keyInfo = kv.getValue();
+ if (keyInfo != null) {
+ totalDU += getKeySizeWithReplication(keyInfo);
+ }
}
}
@@ -180,44 +181,45 @@ public class FSOBucketHandler extends BucketHandler {
String normalizedPath) throws IOException {
Table<String, OmKeyInfo> keyTable = getOmMetadataManager().getFileTable();
- TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
- iterator = keyTable.iterator();
-
- String seekPrefix = OM_KEY_PREFIX +
- volumeId +
- OM_KEY_PREFIX +
- bucketId +
- OM_KEY_PREFIX +
- parentId +
- OM_KEY_PREFIX;
- iterator.seek(seekPrefix);
-
long keyDataSizeWithReplica = 0L;
- while (iterator.hasNext()) {
- Table.KeyValue<String, OmKeyInfo> kv = iterator.next();
- String dbKey = kv.getKey();
+ try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+ iterator = keyTable.iterator()) {
- if (!dbKey.startsWith(seekPrefix)) {
- break;
- }
- OmKeyInfo keyInfo = kv.getValue();
- if (keyInfo != null) {
- DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage();
- String subpath = buildSubpath(normalizedPath,
- keyInfo.getFileName());
- diskUsage.setSubpath(subpath);
- diskUsage.setKey(true);
- diskUsage.setSize(keyInfo.getDataSize());
+ String seekPrefix = OM_KEY_PREFIX +
+ volumeId +
+ OM_KEY_PREFIX +
+ bucketId +
+ OM_KEY_PREFIX +
+ parentId +
+ OM_KEY_PREFIX;
+ iterator.seek(seekPrefix);
- if (withReplica) {
- long keyDU = getKeySizeWithReplication(keyInfo);
- keyDataSizeWithReplica += keyDU;
- diskUsage.setSizeWithReplica(keyDU);
+ while (iterator.hasNext()) {
+ Table.KeyValue<String, OmKeyInfo> kv = iterator.next();
+ String dbKey = kv.getKey();
+
+ if (!dbKey.startsWith(seekPrefix)) {
+ break;
}
- // list the key as a subpath
- if (listFile) {
- duData.add(diskUsage);
+ OmKeyInfo keyInfo = kv.getValue();
+ if (keyInfo != null) {
+ DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage();
+ String subpath = buildSubpath(normalizedPath,
+ keyInfo.getFileName());
+ diskUsage.setSubpath(subpath);
+ diskUsage.setKey(true);
+ diskUsage.setSize(keyInfo.getDataSize());
+
+ if (withReplica) {
+ long keyDU = getKeySizeWithReplication(keyInfo);
+ keyDataSizeWithReplica += keyDU;
+ diskUsage.setSizeWithReplica(keyDU);
+ }
+ // list the key as a subpath
+ if (listFile) {
+ duData.add(diskUsage);
+ }
}
}
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
index 5f7af8365f..70a971ea69 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
@@ -101,10 +101,9 @@ public class ReconNodeManager extends SCMNodeManager {
}
private void loadExistingNodes() {
- try {
+ try (TableIterator<UUID, ? extends Table.KeyValue<UUID, DatanodeDetails>>
+ iterator = nodeDB.iterator()) {
int nodeCount = 0;
- TableIterator<UUID, ? extends Table.KeyValue<UUID, DatanodeDetails>>
- iterator = nodeDB.iterator();
while (iterator.hasNext()) {
DatanodeDetails datanodeDetails = iterator.next().getValue();
register(datanodeDetails, null, null,
@@ -313,12 +312,13 @@ public class ReconNodeManager extends SCMNodeManager {
@VisibleForTesting
public long getNodeDBKeyCount() throws IOException {
long nodeCount = 0;
- TableIterator<UUID, ? extends Table.KeyValue<UUID, DatanodeDetails>>
- iterator = nodeDB.iterator();
- while (iterator.hasNext()) {
- iterator.next();
- nodeCount++;
+ try (TableIterator<UUID, ? extends Table.KeyValue<UUID, DatanodeDetails>>
+ iterator = nodeDB.iterator()) {
+ while (iterator.hasNext()) {
+ iterator.next();
+ nodeCount++;
+ }
+ return nodeCount;
}
- return nodeCount;
}
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
index f619e29703..27c474a1bd 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
@@ -425,11 +425,12 @@ public class ReconStorageContainerManagerFacade
ReconSCMDBDefinition.NODES.getTable(dbStore);
Table<UUID, DatanodeDetails> newNodeTable =
ReconSCMDBDefinition.NODES.getTable(newStore);
- TableIterator<UUID, ? extends KeyValue<UUID,
- DatanodeDetails>> iterator = nodeTable.iterator();
- while (iterator.hasNext()) {
- KeyValue<UUID, DatanodeDetails> keyValue = iterator.next();
- newNodeTable.put(keyValue.getKey(), keyValue.getValue());
+ try (TableIterator<UUID, ? extends KeyValue<UUID,
+ DatanodeDetails>> iterator = nodeTable.iterator()) {
+ while (iterator.hasNext()) {
+ KeyValue<UUID, DatanodeDetails> keyValue = iterator.next();
+ newNodeTable.put(keyValue.getKey(), keyValue.getValue());
+ }
}
sequenceIdGen.reinitialize(
ReconSCMDBDefinition.SEQUENCE_ID.getTable(newStore));
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java
index 71bf9b8641..1461437679 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java
@@ -216,23 +216,24 @@ public class ReconContainerMetadataManagerImpl
public void batchStoreContainerReplicaHistory(
Map<Long, Map<UUID, ContainerReplicaHistory>> replicaHistoryMap)
throws IOException {
- BatchOperation batchOperation = containerDbStore.initBatchOperation();
-
- for (Map.Entry<Long, Map<UUID, ContainerReplicaHistory>> entry :
- replicaHistoryMap.entrySet()) {
- final long containerId = entry.getKey();
- final Map<UUID, ContainerReplicaHistory> tsMap = entry.getValue();
+ try (BatchOperation batchOperation =
+ containerDbStore.initBatchOperation()) {
+ for (Map.Entry<Long, Map<UUID, ContainerReplicaHistory>> entry :
+ replicaHistoryMap.entrySet()) {
+ final long containerId = entry.getKey();
+ final Map<UUID, ContainerReplicaHistory> tsMap = entry.getValue();
+
+ List<ContainerReplicaHistory> tsList = new ArrayList<>();
+ for (Map.Entry<UUID, ContainerReplicaHistory> e : tsMap.entrySet()) {
+ tsList.add(e.getValue());
+ }
- List<ContainerReplicaHistory> tsList = new ArrayList<>();
- for (Map.Entry<UUID, ContainerReplicaHistory> e : tsMap.entrySet()) {
- tsList.add(e.getValue());
+ containerReplicaHistoryTable.putWithBatch(batchOperation, containerId,
+ new ContainerReplicaHistoryList(tsList));
}
- containerReplicaHistoryTable.putWithBatch(batchOperation, containerId,
- new ContainerReplicaHistoryList(tsList));
+ containerDbStore.commitBatchOperation(batchOperation);
}
-
- containerDbStore.commitBatchOperation(batchOperation);
}
/**
@@ -330,54 +331,57 @@ public class ReconContainerMetadataManagerImpl
long containerId, String prevKeyPrefix) throws IOException {
Map<ContainerKeyPrefix, Integer> prefixes = new LinkedHashMap<>();
- TableIterator<ContainerKeyPrefix, ? extends KeyValue<ContainerKeyPrefix,
- Integer>> containerIterator = containerKeyTable.iterator();
- ContainerKeyPrefix seekKey;
- boolean skipPrevKey = false;
- if (StringUtils.isNotBlank(prevKeyPrefix)) {
- skipPrevKey = true;
- seekKey = new ContainerKeyPrefix(containerId, prevKeyPrefix);
- } else {
- seekKey = new ContainerKeyPrefix(containerId);
- }
- KeyValue<ContainerKeyPrefix, Integer> seekKeyValue =
- containerIterator.seek(seekKey);
-
- // check if RocksDB was able to seek correctly to the given key prefix
- // if not, then return empty result
- // In case of an empty prevKeyPrefix, all the keys in the container are
- // returned
- if (seekKeyValue == null ||
- (StringUtils.isNotBlank(prevKeyPrefix) &&
- !seekKeyValue.getKey().getKeyPrefix().equals(prevKeyPrefix))) {
- return prefixes;
- }
+ try (TableIterator<ContainerKeyPrefix,
+ ? extends KeyValue<ContainerKeyPrefix, Integer>>
+ containerIterator = containerKeyTable.iterator()) {
+ ContainerKeyPrefix seekKey;
+ boolean skipPrevKey = false;
+ if (StringUtils.isNotBlank(prevKeyPrefix)) {
+ skipPrevKey = true;
+ seekKey = new ContainerKeyPrefix(containerId, prevKeyPrefix);
+ } else {
+ seekKey = new ContainerKeyPrefix(containerId);
+ }
+ KeyValue<ContainerKeyPrefix, Integer> seekKeyValue =
+ containerIterator.seek(seekKey);
+
+ // check if RocksDB was able to seek correctly to the given key prefix
+ // if not, then return empty result
+ // In case of an empty prevKeyPrefix, all the keys in the container are
+ // returned
+ if (seekKeyValue == null ||
+ (StringUtils.isNotBlank(prevKeyPrefix) &&
+ !seekKeyValue.getKey().getKeyPrefix().equals(prevKeyPrefix))) {
+ return prefixes;
+ }
- while (containerIterator.hasNext()) {
- KeyValue<ContainerKeyPrefix, Integer> keyValue = containerIterator.next();
- ContainerKeyPrefix containerKeyPrefix = keyValue.getKey();
+ while (containerIterator.hasNext()) {
+ KeyValue<ContainerKeyPrefix, Integer> keyValue =
+ containerIterator.next();
+ ContainerKeyPrefix containerKeyPrefix = keyValue.getKey();
- // skip the prev key if prev key is present
- if (skipPrevKey &&
- containerKeyPrefix.getKeyPrefix().equals(prevKeyPrefix)) {
- continue;
- }
+ // skip the prev key if prev key is present
+ if (skipPrevKey &&
+ containerKeyPrefix.getKeyPrefix().equals(prevKeyPrefix)) {
+ continue;
+ }
- // The prefix seek only guarantees that the iterator's head will be
- // positioned at the first prefix match. We still have to check the key
- // prefix.
- if (containerKeyPrefix.getContainerId() == containerId) {
- if (StringUtils.isNotEmpty(containerKeyPrefix.getKeyPrefix())) {
- prefixes.put(new ContainerKeyPrefix(containerId,
- containerKeyPrefix.getKeyPrefix(),
- containerKeyPrefix.getKeyVersion()),
- keyValue.getValue());
+ // The prefix seek only guarantees that the iterator's head will be
+ // positioned at the first prefix match. We still have to check the key
+ // prefix.
+ if (containerKeyPrefix.getContainerId() == containerId) {
+ if (StringUtils.isNotEmpty(containerKeyPrefix.getKeyPrefix())) {
+ prefixes.put(new ContainerKeyPrefix(containerId,
+ containerKeyPrefix.getKeyPrefix(),
+ containerKeyPrefix.getKeyVersion()),
+ keyValue.getValue());
+ } else {
+ LOG.warn("Null key prefix returned for containerId = {} ",
+ containerId);
+ }
} else {
- LOG.warn("Null key prefix returned for containerId = {} ",
- containerId);
+ break; //Break when the first mismatch occurs.
}
- } else {
- break; //Break when the first mismatch occurs.
}
}
return prefixes;
@@ -401,43 +405,48 @@ public class ReconContainerMetadataManagerImpl
long prevContainer)
throws IOException {
Map<Long, ContainerMetadata> containers = new LinkedHashMap<>();
- TableIterator<ContainerKeyPrefix, ? extends KeyValue<ContainerKeyPrefix,
- Integer>> containerIterator = containerKeyTable.iterator();
- ContainerKeyPrefix seekKey;
- if (prevContainer > 0L) {
- seekKey = new ContainerKeyPrefix(prevContainer);
- KeyValue<ContainerKeyPrefix,
- Integer> seekKeyValue = containerIterator.seek(seekKey);
- // Check if RocksDB was able to correctly seek to the given
- // prevContainer containerId. If not, then return empty result
- if (seekKeyValue != null &&
- seekKeyValue.getKey().getContainerId() != prevContainer) {
- return containers;
- } else {
- // seek to the prevContainer+1 containerID to start scan
- seekKey = new ContainerKeyPrefix(prevContainer + 1);
- containerIterator.seek(seekKey);
- }
- }
- while (containerIterator.hasNext()) {
- KeyValue<ContainerKeyPrefix, Integer> keyValue = containerIterator.next();
- ContainerKeyPrefix containerKeyPrefix = keyValue.getKey();
- Long containerID = containerKeyPrefix.getContainerId();
- Integer numberOfKeys = keyValue.getValue();
-
- // break the loop if limit has been reached
- // and one more new entity needs to be added to the containers map
- if (containers.size() == limit && !containers.containsKey(containerID)) {
- break;
+ try (
+ TableIterator<ContainerKeyPrefix,
+ ? extends KeyValue<ContainerKeyPrefix, Integer>>
+ containerIterator = containerKeyTable.iterator()) {
+ ContainerKeyPrefix seekKey;
+ if (prevContainer > 0L) {
+ seekKey = new ContainerKeyPrefix(prevContainer);
+ KeyValue<ContainerKeyPrefix,
+ Integer> seekKeyValue = containerIterator.seek(seekKey);
+ // Check if RocksDB was able to correctly seek to the given
+ // prevContainer containerId. If not, then return empty result
+ if (seekKeyValue != null &&
+ seekKeyValue.getKey().getContainerId() != prevContainer) {
+ return containers;
+ } else {
+ // seek to the prevContainer+1 containerID to start scan
+ seekKey = new ContainerKeyPrefix(prevContainer + 1);
+ containerIterator.seek(seekKey);
+ }
}
+ while (containerIterator.hasNext()) {
+ KeyValue<ContainerKeyPrefix, Integer> keyValue =
+ containerIterator.next();
+ ContainerKeyPrefix containerKeyPrefix = keyValue.getKey();
+ Long containerID = containerKeyPrefix.getContainerId();
+ Integer numberOfKeys = keyValue.getValue();
+
+ // break the loop if limit has been reached
+ // and one more new entity needs to be added to the containers map
+ if (containers.size() == limit &&
+ !containers.containsKey(containerID)) {
+ break;
+ }
- // initialize containerMetadata with 0 as number of keys.
- containers.computeIfAbsent(containerID, ContainerMetadata::new);
- // increment number of keys for the containerID
- ContainerMetadata containerMetadata = containers.get(containerID);
- containerMetadata.setNumberOfKeys(containerMetadata.getNumberOfKeys() +
- numberOfKeys);
- containers.put(containerID, containerMetadata);
+ // initialize containerMetadata with 0 as number of keys.
+ containers.computeIfAbsent(containerID, ContainerMetadata::new);
+ // increment number of keys for the containerID
+ ContainerMetadata containerMetadata = containers.get(containerID);
+ containerMetadata.setNumberOfKeys(containerMetadata.getNumberOfKeys() +
+ numberOfKeys);
+ containers.put(containerID, containerMetadata);
+ }
}
return containers;
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBProvider.java
index fe4b3ee13a..f3b32a4d19 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBProvider.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBProvider.java
@@ -83,11 +83,12 @@ public class ReconDBProvider {
if (table == null) {
return;
}
- TableIterator<Object, ? extends KeyValue<Object, Object>>
- tableIterator = table.iterator();
- while (tableIterator.hasNext()) {
- KeyValue<Object, Object> entry = tableIterator.next();
- table.delete(entry.getKey());
+ try (TableIterator<Object, ? extends KeyValue<Object, Object>>
+ tableIterator = table.iterator()) {
+ while (tableIterator.hasNext()) {
+ KeyValue<Object, Object> entry = tableIterator.next();
+ table.delete(entry.getKey());
+ }
}
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java
index b88807587c..72fd26296f 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java
@@ -196,40 +196,42 @@ public class ContainerKeyMapperTask implements ReconOmTask {
Map<Long, Long> containerKeyCountMap,
List<ContainerKeyPrefix> deletedContainerKeyList)
throws IOException {
- RDBBatchOperation rdbBatchOperation = new RDBBatchOperation();
- containerKeyMap.keySet().forEach((ContainerKeyPrefix key) -> {
- try {
- reconContainerMetadataManager
- .batchStoreContainerKeyMapping(rdbBatchOperation, key,
- containerKeyMap.get(key));
- } catch (IOException e) {
- LOG.error("Unable to write Container Key Prefix data in Recon DB.",
- e);
- }
- });
+ try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) {
+ containerKeyMap.keySet().forEach((ContainerKeyPrefix key) -> {
+ try {
+ reconContainerMetadataManager
+ .batchStoreContainerKeyMapping(rdbBatchOperation, key,
+ containerKeyMap.get(key));
+ } catch (IOException e) {
+ LOG.error("Unable to write Container Key Prefix data in Recon DB.",
+ e);
+ }
+ });
- containerKeyCountMap.keySet().forEach((Long key) -> {
- try {
- reconContainerMetadataManager
- .batchStoreContainerKeyCounts(rdbBatchOperation, key,
- containerKeyCountMap.get(key));
- } catch (IOException e) {
- LOG.error("Unable to write Container Key Prefix data in Recon DB.",
- e);
- }
- });
- deletedContainerKeyList.forEach((ContainerKeyPrefix key) -> {
- try {
- reconContainerMetadataManager
- .batchDeleteContainerMapping(rdbBatchOperation, key);
- } catch (IOException e) {
- LOG.error("Unable to write Container Key Prefix data in Recon DB.",
- e);
- }
- });
+ containerKeyCountMap.keySet().forEach((Long key) -> {
+ try {
+ reconContainerMetadataManager
+ .batchStoreContainerKeyCounts(rdbBatchOperation, key,
+ containerKeyCountMap.get(key));
+ } catch (IOException e) {
+ LOG.error("Unable to write Container Key Prefix data in Recon DB.",
+ e);
+ }
+ });
+
+ deletedContainerKeyList.forEach((ContainerKeyPrefix key) -> {
+ try {
+ reconContainerMetadataManager
+ .batchDeleteContainerMapping(rdbBatchOperation, key);
+ } catch (IOException e) {
+ LOG.error("Unable to write Container Key Prefix data in Recon DB.",
+ e);
+ }
+ });
- reconContainerMetadataManager.commitBatchOperation(rdbBatchOperation);
+ reconContainerMetadataManager.commitBatchOperation(rdbBatchOperation);
+ }
}
/**
@@ -250,20 +252,19 @@ public class ContainerKeyMapperTask implements ReconOmTask {
List<ContainerKeyPrefix>
deletedContainerKeyList)
throws IOException {
-
- TableIterator<ContainerKeyPrefix, ? extends
- Table.KeyValue<ContainerKeyPrefix, Integer>> containerIterator =
- reconContainerMetadataManager.getContainerTableIterator();
-
Set<ContainerKeyPrefix> keysToBeDeleted = new HashSet<>();
-
- // Check if we have keys in this container in the DB
- while (containerIterator.hasNext()) {
- Table.KeyValue<ContainerKeyPrefix, Integer> keyValue =
- containerIterator.next();
- String keyPrefix = keyValue.getKey().getKeyPrefix();
- if (keyPrefix.equals(key)) {
- keysToBeDeleted.add(keyValue.getKey());
+ try (TableIterator<ContainerKeyPrefix,
+ ? extends Table.KeyValue<ContainerKeyPrefix, Integer>> containerIterator
+ = reconContainerMetadataManager.getContainerTableIterator()) {
+
+ // Check if we have keys in this container in the DB
+ while (containerIterator.hasNext()) {
+ Table.KeyValue<ContainerKeyPrefix, Integer> keyValue =
+ containerIterator.next();
+ String keyPrefix = keyValue.getKey().getKeyPrefix();
+ if (keyPrefix.equals(key)) {
+ keysToBeDeleted.add(keyValue.getKey());
+ }
}
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java
index 4c73e92fe1..7baeefdbe4 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java
@@ -73,17 +73,18 @@ public abstract class NSSummaryTask implements ReconOmTask {
protected void writeNSSummariesToDB(Map<Long, NSSummary> nsSummaryMap)
throws IOException {
- RDBBatchOperation rdbBatchOperation = new RDBBatchOperation();
- nsSummaryMap.keySet().forEach((Long key) -> {
- try {
- reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation,
- key, nsSummaryMap.get(key));
- } catch (IOException e) {
- LOG.error("Unable to write Namespace Summary data in Recon DB.",
- e);
- }
- });
- reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation);
+ try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) {
+ nsSummaryMap.keySet().forEach((Long key) -> {
+ try {
+ reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation,
+ key, nsSummaryMap.get(key));
+ } catch (IOException e) {
+ LOG.error("Unable to write Namespace Summary data in Recon DB.",
+ e);
+ }
+ });
+ reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation);
+ }
}
protected void handlePutKeyEvent(OmKeyInfo keyInfo, Map<Long,
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithFSO.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithFSO.java
index 3a74da45de..1b8a0ce5a5 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithFSO.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithFSO.java
@@ -177,25 +177,26 @@ public class NSSummaryTaskWithFSO extends NSSummaryTask {
Table<String, OmDirectoryInfo> dirTable =
omMetadataManager.getDirectoryTable();
- TableIterator<String, ? extends Table.KeyValue<String, OmDirectoryInfo>>
- dirTableIter = dirTable.iterator();
-
- while (dirTableIter.hasNext()) {
- Table.KeyValue<String, OmDirectoryInfo> kv = dirTableIter.next();
- OmDirectoryInfo directoryInfo = kv.getValue();
- handlePutDirEvent(directoryInfo, nsSummaryMap);
+ try (TableIterator<String,
+ ? extends Table.KeyValue<String, OmDirectoryInfo>>
+ dirTableIter = dirTable.iterator()) {
+ while (dirTableIter.hasNext()) {
+ Table.KeyValue<String, OmDirectoryInfo> kv = dirTableIter.next();
+ OmDirectoryInfo directoryInfo = kv.getValue();
+ handlePutDirEvent(directoryInfo, nsSummaryMap);
+ }
}
// Get fileTable used by FSO
Table<String, OmKeyInfo> keyTable = omMetadataManager.getFileTable();
- TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
- keyTableIter = keyTable.iterator();
-
- while (keyTableIter.hasNext()) {
- Table.KeyValue<String, OmKeyInfo> kv = keyTableIter.next();
- OmKeyInfo keyInfo = kv.getValue();
- handlePutKeyEvent(keyInfo, nsSummaryMap);
+ try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+ keyTableIter = keyTable.iterator()) {
+ while (keyTableIter.hasNext()) {
+ Table.KeyValue<String, OmKeyInfo> kv = keyTableIter.next();
+ OmKeyInfo keyInfo = kv.getValue();
+ handlePutKeyEvent(keyInfo, nsSummaryMap);
+ }
}
} catch (IOException ioEx) {
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java
index 82862e8e0f..b8509d60c9 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java
@@ -125,13 +125,15 @@ public class GeneratorOm extends BaseGenerator implements
long containerId = getContainerIdOffset() + index;
int keyPerContainer = getKeysPerContainer(config);
- BatchOperation omKeyTableBatchOperation = omDb.initBatchOperation();
- for (long localId = 0; localId < keyPerContainer; localId++) {
- BlockID blockId = new BlockID(containerId, localId);
- writeOmData(localId, blockId, omKeyTableBatchOperation);
+ try (BatchOperation omKeyTableBatchOperation
+ = omDb.initBatchOperation()) {
+ for (long localId = 0; localId < keyPerContainer; localId++) {
+ BlockID blockId = new BlockID(containerId, localId);
+ writeOmData(localId, blockId, omKeyTableBatchOperation);
+ }
+ commitAndResetOMKeyTableBatchOperation(omKeyTableBatchOperation);
+ return null;
}
- commitAndResetOMKeyTableBatchOperation(omKeyTableBatchOperation);
- return null;
});
}
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org