You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ad...@apache.org on 2023/02/16 14:22:34 UTC

[ozone] branch master updated: HDDS-7969. CacheValue should not store value as an Optional. (#4275)

This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new bd900ae9db HDDS-7969. CacheValue should not store value as an Optional. (#4275)
bd900ae9db is described below

commit bd900ae9db8abf819b28a29551cff87259700afc
Author: Tsz-Wo Nicholas Sze <sz...@apache.org>
AuthorDate: Thu Feb 16 06:22:27 2023 -0800

    HDDS-7969. CacheValue should not store value as an Optional. (#4275)
---
 .../org/apache/hadoop/hdds/utils/db/Table.java     | 11 ++++
 .../hadoop/hdds/utils/db/cache/CacheValue.java     | 32 ++++++++--
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |  6 ++
 .../om/request/file/OMDirectoryCreateRequest.java  | 14 ++---
 .../file/OMDirectoryCreateRequestWithFSO.java      |  3 +-
 .../ozone/om/request/file/OMFileCreateRequest.java | 10 +--
 .../request/file/OMFileCreateRequestWithFSO.java   |  4 +-
 .../ozone/om/request/file/OMFileRequest.java       | 72 ++++++++++------------
 .../ozone/om/request/key/OMKeyCommitRequest.java   |  9 +--
 .../ozone/om/request/key/OMKeyCreateRequest.java   | 10 +--
 .../om/request/key/OMKeyCreateRequestWithFSO.java  |  3 +-
 .../S3InitiateMultipartUploadRequestWithFSO.java   |  8 +--
 12 files changed, 94 insertions(+), 88 deletions(-)

diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
index 63d4fc5678..0e0e67be01 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
@@ -191,6 +191,17 @@ public interface Table<KEY, VALUE> extends AutoCloseable {
     throw new NotImplementedException("addCacheEntry is not implemented");
   }
 
+  /** Add entry to the table cache with a non-null key and a null value. */
+  default void addCacheEntry(KEY cacheKey, long epoch) {
+    addCacheEntry(new CacheKey<>(cacheKey), CacheValue.get(epoch));
+  }
+
+  /** Add entry to the table cache with a non-null key and a non-null value. */
+  default void addCacheEntry(KEY cacheKey, VALUE value, long epoch) {
+    addCacheEntry(new CacheKey<>(cacheKey),
+        CacheValue.get(epoch, value));
+  }
+
   /**
    * Get the cache value from table cache.
    * @param cacheKey
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java
index de9fe0d95f..f52e739e3e 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -20,24 +20,46 @@ package org.apache.hadoop.hdds.utils.db.cache;
 
 import com.google.common.base.Optional;
 
+import java.util.Objects;
+
 /**
  * CacheValue for the RocksDB Table.
  * @param <VALUE>
  */
 public class CacheValue<VALUE> {
+  /** @return a {@link CacheValue} with a non-null value. */
+  public static <V> CacheValue<V> get(long epoch, V value) {
+    Objects.requireNonNull(value, "value == null");
+    return new CacheValue<>(epoch, value);
+  }
+
+  /** @return a {@link CacheValue} with a null value. */
+  public static <V> CacheValue<V> get(long epoch) {
+    return new CacheValue<>(epoch, null);
+  }
 
-  private Optional<VALUE> value;
+  private final VALUE value;
   // This value is used for evict entries from cache.
   // This value is set with ratis transaction context log entry index.
-  private long epoch;
+  private final long epoch;
 
-  public CacheValue(Optional<VALUE> value, long epoch) {
+  private CacheValue(long epoch, VALUE value) {
     this.value = value;
     this.epoch = epoch;
   }
 
+  /**
+   * @deprecated
+   * use {@link #get(long, Object)} or {@link #get(long)}.
+   */
+  @Deprecated
+  public CacheValue(Optional<VALUE> value, long epoch) {
+    this.value = value.orNull();
+    this.epoch = epoch;
+  }
+
   public VALUE getCacheValue() {
-    return value.orNull();
+    return value;
   }
 
   public long getEpoch() {
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index 8ec576f8ad..2343532ac4 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -453,6 +453,12 @@ public interface OMMetadataManager extends DBStoreHAManager {
   String getOzonePathKey(long volumeId, long bucketId,
                          long parentObjectId, String pathComponentName);
 
+  default String getOzonePathKey(long volumeId, long bucketId,
+      OmDirectoryInfo dir) {
+    return getOzonePathKey(volumeId, bucketId,
+        dir.getParentObjectID(), dir.getName());
+  }
+
   /**
    * Given ozone path key, component id, return the corresponding 
    * DB path key for delete table.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
index 780ee9ca72..73f30c80aa 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
@@ -26,7 +26,6 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 
-import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -74,8 +73,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .Status;
 import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME;
@@ -219,8 +216,8 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
         omBucketInfo.incrUsedNamespace(numMissingParents + 1L);
 
         OMFileRequest.addKeyTableCacheEntries(omMetadataManager, volumeName,
-            bucketName, Optional.of(dirKeyInfo),
-            Optional.of(missingParentInfos), trxnLogIndex);
+            bucketName, omBucketInfo.getBucketLayout(),
+            dirKeyInfo, missingParentInfos, trxnLogIndex);
         
         result = Result.SUCCESS;
         omClientResponse = new OMDirectoryCreateResponse(omResponse.build(),
@@ -302,10 +299,9 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
 
       missingParentInfos.add(parentKeyInfo);
       omMetadataManager.getKeyTable(BucketLayout.DEFAULT).addCacheEntry(
-          new CacheKey<>(omMetadataManager.getOzoneKey(volumeName,
-              bucketName, parentKeyInfo.getKeyName())),
-          new CacheValue<>(Optional.of(parentKeyInfo),
-              trxnLogIndex));
+          omMetadataManager.getOzoneKey(
+              volumeName, bucketName, parentKeyInfo.getKeyName()),
+          parentKeyInfo, trxnLogIndex);
     }
 
     return missingParentInfos;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java
index 9140a89ecd..081efd42e7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.ozone.om.request.file;
 
-import com.google.common.base.Optional;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.audit.AuditLogger;
 import org.apache.hadoop.ozone.audit.OMAction;
@@ -176,7 +175,7 @@ public class OMDirectoryCreateRequestWithFSO extends OMDirectoryCreateRequest {
             OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()));
         OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
             volumeId, bucketId, trxnLogIndex,
-            Optional.of(missingParentInfos), Optional.of(dirInfo));
+            missingParentInfos, dirInfo);
 
         result = OMDirectoryCreateRequest.Result.SUCCESS;
         omClientResponse =
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
index d92400af3e..a7fea52cbd 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
@@ -25,7 +25,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
 
-import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -68,8 +67,6 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdds.utils.UniqueId;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS;
@@ -287,14 +284,13 @@ public class OMFileCreateRequest extends OMKeyRequest {
       // Even if bucket gets deleted, when commitKey we shall identify if
       // bucket gets deleted.
       omMetadataManager.getOpenKeyTable(getBucketLayout()).addCacheEntry(
-          new CacheKey<>(dbOpenKeyName),
-          new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+          dbOpenKeyName, omKeyInfo, trxnLogIndex);
 
       // Add cache entries for the prefix directories.
       // Skip adding for the file key itself, until Key Commit.
       OMFileRequest.addKeyTableCacheEntries(omMetadataManager, volumeName,
-          bucketName, Optional.absent(), Optional.of(missingParentInfos),
-          trxnLogIndex);
+          bucketName, omBucketInfo.getBucketLayout(),
+          null, missingParentInfos, trxnLogIndex);
 
       // Prepare response
       omResponse.setCreateFileResponse(CreateFileResponse.newBuilder()
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java
index caeb2b0a92..64dc695911 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.ozone.om.request.file;
 
-import com.google.common.base.Optional;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
@@ -214,8 +213,7 @@ public class OMFileCreateRequestWithFSO extends OMFileCreateRequest {
       // Add cache entries for the prefix directories.
       // Skip adding for the file key itself, until Key Commit.
       OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager, volumeId,
-              bucketId, trxnLogIndex, Optional.of(missingParentInfos),
-              Optional.absent());
+              bucketId, trxnLogIndex, missingParentInfos, null);
 
       // Prepare response. Sets user given full key name in the 'keyName'
       // attribute in response object.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index 4a273d367c..662780c94d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -27,7 +27,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
-import com.google.common.base.Optional;
 import com.google.common.base.Strings;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -410,23 +409,21 @@ public final class OMFileRequest {
    */
   public static void addKeyTableCacheEntries(
       OMMetadataManager omMetadataManager, String volumeName,
-      String bucketName, Optional<OmKeyInfo> keyInfo,
-      Optional<List<OmKeyInfo>> parentInfoList,
+      String bucketName, BucketLayout layout,
+      OmKeyInfo keyInfo, List<OmKeyInfo> parentInfoList,
       long index) throws IOException {
-    for (OmKeyInfo parentInfo : parentInfoList.get()) {
-      omMetadataManager.getKeyTable(
-          getBucketLayout(omMetadataManager, volumeName, bucketName))
-          .addCacheEntry(new CacheKey<>(omMetadataManager
-          .getOzoneKey(volumeName, bucketName, parentInfo.getKeyName())),
-              new CacheValue<>(Optional.of(parentInfo), index));
+    final Table<String, OmKeyInfo> table
+        = omMetadataManager.getKeyTable(layout);
+    for (OmKeyInfo parentInfo : parentInfoList) {
+      table.addCacheEntry(omMetadataManager.getOzoneKey(
+          volumeName, bucketName, parentInfo.getKeyName()),
+          parentInfo, index);
     }
 
-    if (keyInfo.isPresent()) {
-      omMetadataManager.getKeyTable(
-          getBucketLayout(omMetadataManager, volumeName, bucketName))
-          .addCacheEntry(new CacheKey<>(omMetadataManager
-          .getOzoneKey(volumeName, bucketName, keyInfo.get().getKeyName())),
-              new CacheValue<>(keyInfo, index));
+    if (keyInfo != null) {
+      table.addCacheEntry(omMetadataManager.getOzoneKey(
+          volumeName, bucketName, keyInfo.getKeyName()),
+          keyInfo, index);
     }
   }
 
@@ -443,23 +440,21 @@ public final class OMFileRequest {
   public static void addDirectoryTableCacheEntries(
           OMMetadataManager omMetadataManager,
           long volumeId, long bucketId, long trxnLogIndex,
-          Optional<List<OmDirectoryInfo>> missingParentInfos,
-          Optional<OmDirectoryInfo> dirInfo) {
-
-    for (OmDirectoryInfo subDirInfo : missingParentInfos.get()) {
-      omMetadataManager.getDirectoryTable().addCacheEntry(
-              new CacheKey<>(omMetadataManager.getOzonePathKey(
-                      volumeId, bucketId, subDirInfo.getParentObjectID(),
-                      subDirInfo.getName())),
-              new CacheValue<>(Optional.of(subDirInfo), trxnLogIndex));
+          List<OmDirectoryInfo> missingParentInfos,
+          OmDirectoryInfo dirInfo) {
+
+    final Table<String, OmDirectoryInfo> table
+        = omMetadataManager.getDirectoryTable();
+    for (OmDirectoryInfo subDirInfo : missingParentInfos) {
+      table.addCacheEntry(omMetadataManager.getOzonePathKey(
+          volumeId, bucketId, subDirInfo),
+          subDirInfo, trxnLogIndex);
     }
 
-    if (dirInfo.isPresent()) {
-      omMetadataManager.getDirectoryTable().addCacheEntry(
-              new CacheKey<>(omMetadataManager.getOzonePathKey(
-                      volumeId, bucketId, dirInfo.get().getParentObjectID(),
-                      dirInfo.get().getName())),
-              new CacheValue<>(dirInfo, trxnLogIndex));
+    if (dirInfo != null) {
+      table.addCacheEntry(omMetadataManager.getOzonePathKey(
+          volumeId, bucketId, dirInfo),
+          dirInfo, trxnLogIndex);
     }
   }
 
@@ -476,19 +471,18 @@ public final class OMFileRequest {
           OMMetadataManager omMetadataManager, String dbOpenFileName,
           @Nullable OmKeyInfo omFileInfo, String fileName, long trxnLogIndex) {
 
-    Optional<OmKeyInfo> keyInfoOptional = Optional.absent();
+    final Table<String, OmKeyInfo> table = omMetadataManager.getOpenKeyTable(
+        BucketLayout.FILE_SYSTEM_OPTIMIZED);
     if (omFileInfo != null) {
       // New key format for the openFileTable.
       // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
       // keyName field stores only the leaf node name, which is 'file1'.
       omFileInfo.setKeyName(fileName);
       omFileInfo.setFileName(fileName);
-      keyInfoOptional = Optional.of(omFileInfo);
+      table.addCacheEntry(dbOpenFileName, omFileInfo, trxnLogIndex);
+    } else {
+      table.addCacheEntry(dbOpenFileName, trxnLogIndex);
     }
-
-    omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED)
-        .addCacheEntry(new CacheKey<>(dbOpenFileName),
-            new CacheValue<>(keyInfoOptional, trxnLogIndex));
   }
 
   /**
@@ -516,8 +510,7 @@ public final class OMFileRequest {
             omFileInfo.getBucketName());
 
     omMetadataManager.getKeyTable(bucketLayout)
-        .addCacheEntry(new CacheKey<>(dbFileKey),
-            new CacheValue<>(Optional.of(omFileInfo), trxnLogIndex));
+        .addCacheEntry(dbFileKey, omFileInfo, trxnLogIndex);
   }
 
   /**
@@ -532,8 +525,7 @@ public final class OMFileRequest {
           OMMetadataManager omMetadataManager, String dbDeletedKey,
           RepeatedOmKeyInfo keysToDelete, long trxnLogIndex) {
     omMetadataManager.getDeletedTable().addCacheEntry(
-            new CacheKey<>(dbDeletedKey),
-            new CacheValue<>(Optional.of(keysToDelete), trxnLogIndex));
+        dbDeletedKey, keysToDelete, trxnLogIndex);
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index 45a9e95078..674c4ecfb3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -23,7 +23,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
-import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.ozone.OmUtils;
@@ -63,8 +62,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLoca
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
@@ -255,13 +252,11 @@ public class OMKeyCommitRequest extends OMKeyRequest {
       // Add to cache of open key table and key table.
       if (!isHSync) {
         omMetadataManager.getOpenKeyTable(getBucketLayout()).addCacheEntry(
-            new CacheKey<>(dbOpenKey),
-            new CacheValue<>(Optional.absent(), trxnLogIndex));
+            dbOpenKey, trxnLogIndex);
       }
 
       omMetadataManager.getKeyTable(getBucketLayout()).addCacheEntry(
-          new CacheKey<>(dbOzoneKey),
-          new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+          dbOzoneKey, omKeyInfo, trxnLogIndex);
 
       if (oldKeyVersionsToDelete != null) {
         OMFileRequest.addDeletedTableCacheEntry(omMetadataManager, dbOzoneKey,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
index eeb25fb505..73e5586808 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
@@ -25,7 +25,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
 
-import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.ozone.OmUtils;
@@ -48,8 +47,6 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OMMetrics;
@@ -314,16 +311,15 @@ public class OMKeyCreateRequest extends OMKeyRequest {
         // Add cache entries for the prefix directories.
         // Skip adding for the file key itself, until Key Commit.
         OMFileRequest.addKeyTableCacheEntries(omMetadataManager, volumeName,
-            bucketName, Optional.absent(), Optional.of(missingParentInfos),
-            trxnLogIndex);
+            bucketName, omBucketInfo.getBucketLayout(),
+            null, missingParentInfos, trxnLogIndex);
       }
 
       // Add to cache entry can be done outside of lock for this openKey.
       // Even if bucket gets deleted, when commitKey we shall identify if
       // bucket gets deleted.
       omMetadataManager.getOpenKeyTable(getBucketLayout()).addCacheEntry(
-          new CacheKey<>(dbOpenKeyName),
-          new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+          dbOpenKeyName, omKeyInfo, trxnLogIndex);
 
       // Prepare response
       omResponse.setCreateKeyResponse(CreateKeyResponse.newBuilder()
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java
index e7bf882b7d..d8d567ed91 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.ozone.om.request.key;
 
-import com.google.common.base.Optional;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
@@ -201,7 +200,7 @@ public class OMKeyCreateRequestWithFSO extends OMKeyCreateRequest {
       // Skip adding for the file key itself, until Key Commit.
       OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
               volumeId, bucketId, trxnLogIndex,
-              Optional.of(missingParentInfos), Optional.absent());
+              missingParentInfos, null);
 
       // Prepare response. Sets user given full key name in the 'keyName'
       // attribute in response object.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
index a0b8b45da5..f6b30c63b5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
@@ -18,11 +18,8 @@
 
 package org.apache.hadoop.ozone.om.request.s3.multipart;
 
-import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OzoneConfigUtil;
 import org.apache.hadoop.ozone.om.OzoneManager;
@@ -204,7 +201,7 @@ public class S3InitiateMultipartUploadRequestWithFSO
       // Skip adding for the file key itself, until Key Commit.
       OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
               volumeId, bucketId, transactionLogIndex,
-              Optional.of(missingParentInfos), Optional.absent());
+              missingParentInfos, null);
 
       OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
           multipartOpenKey, omKeyInfo, pathInfoFSO.getLeafNodeName(),
@@ -212,8 +209,7 @@ public class S3InitiateMultipartUploadRequestWithFSO
 
       // Add to cache
       omMetadataManager.getMultipartInfoTable().addCacheEntry(
-          new CacheKey<>(multipartKey),
-          new CacheValue<>(Optional.of(multipartKeyInfo), transactionLogIndex));
+          multipartKey, multipartKeyInfo, transactionLogIndex);
 
       omClientResponse =
           new S3InitiateMultipartUploadResponseWithFSO(


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org