You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2018/09/20 19:16:06 UTC

[2/3] hadoop git commit: HDDS-394. Rename *Key Apis in DatanodeContainerProtocol to *Block apis. Contributed Dinesh Chitlangia.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index ed4536f..4f2b3a2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.utils.MetadataKeyFilters;
 import org.apache.hadoop.utils.MetadataStore;
@@ -116,7 +116,7 @@ public final class KeyValueContainerUtil {
     File chunksPath = new File(containerData.getChunksPath());
 
     // Close the DB connection and remove the DB handler from cache
-    KeyUtils.removeDB(containerData, conf);
+    BlockUtils.removeDB(containerData, conf);
 
     // Delete the Container MetaData path.
     FileUtils.deleteDirectory(containerMetaDataPath);
@@ -175,16 +175,16 @@ public final class KeyValueContainerUtil {
     }
     kvContainerData.setDbFile(dbFile);
 
-    MetadataStore metadata = KeyUtils.getDB(kvContainerData, config);
+    MetadataStore metadata = BlockUtils.getDB(kvContainerData, config);
     long bytesUsed = 0;
     List<Map.Entry<byte[], byte[]>> liveKeys = metadata
         .getRangeKVs(null, Integer.MAX_VALUE,
             MetadataKeyFilters.getNormalKeyFilter());
     bytesUsed = liveKeys.parallelStream().mapToLong(e-> {
-      KeyData keyData;
+      BlockData blockData;
       try {
-        keyData = KeyUtils.getKeyData(e.getValue());
-        return keyData.getSize();
+        blockData = BlockUtils.getBlockData(e.getValue());
+        return blockData.getSize();
       } catch (IOException ex) {
         return 0L;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
index df60c60..3495363 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
@@ -69,7 +69,7 @@ public final class SmallFileUtils {
         ContainerProtos.ReadChunkResponseProto.newBuilder();
     readChunkresponse.setChunkData(info.getProtoBufMessage());
     readChunkresponse.setData(ByteString.copyFrom(data));
-    readChunkresponse.setBlockID(msg.getGetSmallFile().getKey().getBlockID());
+    readChunkresponse.setBlockID(msg.getGetSmallFile().getBlock().getBlockID());
 
     ContainerProtos.GetSmallFileResponseProto.Builder getSmallFile =
         ContainerProtos.GetSmallFileResponseProto.newBuilder();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
new file mode 100644
index 0000000..54c15fb
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -0,0 +1,229 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.keyvalue.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.primitives.Longs;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
+import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
+import org.apache.hadoop.utils.MetadataStore;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK;
+
+/**
+ * This class is for performing block related operations on the KeyValue
+ * Container.
+ */
+public class BlockManagerImpl implements BlockManager {
+
+  static final Logger LOG = LoggerFactory.getLogger(BlockManagerImpl.class);
+
+  private Configuration config;
+
+  /**
+   * Constructs a Block Manager.
+   *
+   * @param conf - Ozone configuration
+   */
+  public BlockManagerImpl(Configuration conf) {
+    Preconditions.checkNotNull(conf, "Config cannot be null");
+    this.config = conf;
+  }
+
+  /**
+   * Puts or overwrites a block.
+   *
+   * @param container - Container for which block need to be added.
+   * @param data     - BlockData.
+   * @return length of the block.
+   * @throws IOException
+   */
+  public long putBlock(Container container, BlockData data) throws IOException {
+    Preconditions.checkNotNull(data, "BlockData cannot be null for put " +
+        "operation.");
+    Preconditions.checkState(data.getContainerID() >= 0, "Container Id " +
+        "cannot be negative");
+    // We are not locking the key manager since LevelDb serializes all actions
+    // against a single DB. We rely on DB level locking to avoid conflicts.
+    MetadataStore db = BlockUtils.getDB((KeyValueContainerData) container
+        .getContainerData(), config);
+
+    // This is a post condition that acts as a hint to the user.
+    // Should never fail.
+    Preconditions.checkNotNull(db, "DB cannot be null here");
+    db.put(Longs.toByteArray(data.getLocalID()), data.getProtoBufMessage()
+        .toByteArray());
+
+    // Increment keycount here
+    container.getContainerData().incrKeyCount();
+    return data.getSize();
+  }
+
+  /**
+   * Gets an existing block.
+   *
+   * @param container - Container from which block need to be fetched.
+   * @param blockID - BlockID of the block.
+   * @return Key Data.
+   * @throws IOException
+   */
+  public BlockData getBlock(Container container, BlockID blockID)
+      throws IOException {
+    Preconditions.checkNotNull(blockID,
+        "BlockID cannot be null in GetBlock request");
+    Preconditions.checkNotNull(blockID.getContainerID(),
+        "Container name cannot be null");
+
+    KeyValueContainerData containerData = (KeyValueContainerData) container
+        .getContainerData();
+    MetadataStore db = BlockUtils.getDB(containerData, config);
+    // This is a post condition that acts as a hint to the user.
+    // Should never fail.
+    Preconditions.checkNotNull(db, "DB cannot be null here");
+    byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID()));
+    if (kData == null) {
+      throw new StorageContainerException("Unable to find the block.",
+          NO_SUCH_BLOCK);
+    }
+    ContainerProtos.BlockData blockData =
+        ContainerProtos.BlockData.parseFrom(kData);
+    return BlockData.getFromProtoBuf(blockData);
+  }
+
+  /**
+   * Returns the length of the committed block.
+   *
+   * @param container - Container from which block need to be fetched.
+   * @param blockID - BlockID of the block.
+   * @return length of the block.
+   * @throws IOException in case, the block key does not exist in db.
+   */
+  @Override
+  public long getCommittedBlockLength(Container container, BlockID blockID)
+      throws IOException {
+    KeyValueContainerData containerData = (KeyValueContainerData) container
+        .getContainerData();
+    MetadataStore db = BlockUtils.getDB(containerData, config);
+    // This is a post condition that acts as a hint to the user.
+    // Should never fail.
+    Preconditions.checkNotNull(db, "DB cannot be null here");
+    byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID()));
+    if (kData == null) {
+      throw new StorageContainerException("Unable to find the block.",
+          NO_SUCH_BLOCK);
+    }
+    ContainerProtos.BlockData blockData =
+        ContainerProtos.BlockData.parseFrom(kData);
+    return blockData.getSize();
+  }
+
+  /**
+   * Deletes an existing block.
+   *
+   * @param container - Container from which block need to be deleted.
+   * @param blockID - ID of the block.
+   * @throws StorageContainerException
+   */
+  public void deleteBlock(Container container, BlockID blockID) throws
+      IOException {
+    Preconditions.checkNotNull(blockID, "block ID cannot be null.");
+    Preconditions.checkState(blockID.getContainerID() >= 0,
+        "Container ID cannot be negative.");
+    Preconditions.checkState(blockID.getLocalID() >= 0,
+        "Local ID cannot be negative.");
+
+    KeyValueContainerData cData = (KeyValueContainerData) container
+        .getContainerData();
+    MetadataStore db = BlockUtils.getDB(cData, config);
+    // This is a post condition that acts as a hint to the user.
+    // Should never fail.
+    Preconditions.checkNotNull(db, "DB cannot be null here");
+    // Note : There is a race condition here, since get and delete
+    // are not atomic. Leaving it here since the impact is refusing
+    // to delete a Block which might have just gotten inserted after
+    // the get check.
+    byte[] kKey = Longs.toByteArray(blockID.getLocalID());
+    byte[] kData = db.get(kKey);
+    if (kData == null) {
+      throw new StorageContainerException("Unable to find the block.",
+          NO_SUCH_BLOCK);
+    }
+    db.delete(kKey);
+
+    // Decrement blockcount here
+    container.getContainerData().decrKeyCount();
+  }
+
+  /**
+   * List blocks in a container.
+   *
+   * @param container - Container from which blocks need to be listed.
+   * @param startLocalID  - Key to start from, 0 to begin.
+   * @param count    - Number of blocks to return.
+   * @return List of Blocks that match the criteria.
+   */
+  @Override
+  public List<BlockData> listBlock(Container container, long startLocalID, int
+      count) throws IOException {
+    Preconditions.checkNotNull(container, "container cannot be null");
+    Preconditions.checkState(startLocalID >= 0, "startLocal ID cannot be " +
+        "negative");
+    Preconditions.checkArgument(count > 0,
+        "Count must be a positive number.");
+    container.readLock();
+    List<BlockData> result = null;
+    KeyValueContainerData cData = (KeyValueContainerData) container
+        .getContainerData();
+    MetadataStore db = BlockUtils.getDB(cData, config);
+    result = new ArrayList<>();
+    byte[] startKeyInBytes = Longs.toByteArray(startLocalID);
+    List<Map.Entry<byte[], byte[]>> range = db.getSequentialRangeKVs(
+        startKeyInBytes, count, null);
+    for (Map.Entry<byte[], byte[]> entry : range) {
+      BlockData value = BlockUtils.getBlockData(entry.getValue());
+      BlockData data = new BlockData(value.getBlockID());
+      result.add(data);
+    }
+    return result;
+  }
+
+  /**
+   * Shutdown KeyValueContainerManager.
+   */
+  public void shutdown() {
+    BlockUtils.shutdownCache(ContainerCache.getInstance(config));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyManagerImpl.java
deleted file mode 100644
index 6370f8e..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyManagerImpl.java
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.keyvalue.interfaces.KeyManager;
-import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
-import org.apache.hadoop.utils.MetadataStore;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_KEY;
-
-/**
- * This class is for performing key related operations on the KeyValue
- * Container.
- */
-public class KeyManagerImpl implements KeyManager {
-
-  static final Logger LOG = LoggerFactory.getLogger(KeyManagerImpl.class);
-
-  private Configuration config;
-
-  /**
-   * Constructs a key Manager.
-   *
-   * @param conf - Ozone configuration
-   */
-  public KeyManagerImpl(Configuration conf) {
-    Preconditions.checkNotNull(conf, "Config cannot be null");
-    this.config = conf;
-  }
-
-  /**
-   * Puts or overwrites a key.
-   *
-   * @param container - Container for which key need to be added.
-   * @param data     - Key Data.
-   * @return length of the key.
-   * @throws IOException
-   */
-  public long putKey(Container container, KeyData data) throws IOException {
-    Preconditions.checkNotNull(data, "KeyData cannot be null for put " +
-        "operation.");
-    Preconditions.checkState(data.getContainerID() >= 0, "Container Id " +
-        "cannot be negative");
-    // We are not locking the key manager since LevelDb serializes all actions
-    // against a single DB. We rely on DB level locking to avoid conflicts.
-    MetadataStore db = KeyUtils.getDB((KeyValueContainerData) container
-        .getContainerData(), config);
-
-    // This is a post condition that acts as a hint to the user.
-    // Should never fail.
-    Preconditions.checkNotNull(db, "DB cannot be null here");
-    db.put(Longs.toByteArray(data.getLocalID()), data.getProtoBufMessage()
-        .toByteArray());
-
-    // Increment keycount here
-    container.getContainerData().incrKeyCount();
-    return data.getSize();
-  }
-
-  /**
-   * Gets an existing key.
-   *
-   * @param container - Container from which key need to be get.
-   * @param blockID - BlockID of the key.
-   * @return Key Data.
-   * @throws IOException
-   */
-  public KeyData getKey(Container container, BlockID blockID)
-      throws IOException {
-    Preconditions.checkNotNull(blockID,
-        "BlockID cannot be null in GetKet request");
-    Preconditions.checkNotNull(blockID.getContainerID(),
-        "Container name cannot be null");
-
-    KeyValueContainerData containerData = (KeyValueContainerData) container
-        .getContainerData();
-    MetadataStore db = KeyUtils.getDB(containerData, config);
-    // This is a post condition that acts as a hint to the user.
-    // Should never fail.
-    Preconditions.checkNotNull(db, "DB cannot be null here");
-    byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID()));
-    if (kData == null) {
-      throw new StorageContainerException("Unable to find the key.",
-          NO_SUCH_KEY);
-    }
-    ContainerProtos.KeyData keyData = ContainerProtos.KeyData.parseFrom(kData);
-    return KeyData.getFromProtoBuf(keyData);
-  }
-
-  /**
-   * Returns the length of the committed block.
-   *
-   * @param container - Container from which key need to be get.
-   * @param blockID - BlockID of the key.
-   * @return length of the block.
-   * @throws IOException in case, the block key does not exist in db.
-   */
-  @Override
-  public long getCommittedBlockLength(Container container, BlockID blockID)
-      throws IOException {
-    KeyValueContainerData containerData = (KeyValueContainerData) container
-        .getContainerData();
-    MetadataStore db = KeyUtils.getDB(containerData, config);
-    // This is a post condition that acts as a hint to the user.
-    // Should never fail.
-    Preconditions.checkNotNull(db, "DB cannot be null here");
-    byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID()));
-    if (kData == null) {
-      throw new StorageContainerException("Unable to find the key.",
-          NO_SUCH_KEY);
-    }
-    ContainerProtos.KeyData keyData = ContainerProtos.KeyData.parseFrom(kData);
-    return keyData.getSize();
-  }
-
-  /**
-   * Deletes an existing Key.
-   *
-   * @param container - Container from which key need to be deleted.
-   * @param blockID - ID of the block.
-   * @throws StorageContainerException
-   */
-  public void deleteKey(Container container, BlockID blockID) throws
-      IOException {
-    Preconditions.checkNotNull(blockID, "block ID cannot be null.");
-    Preconditions.checkState(blockID.getContainerID() >= 0,
-        "Container ID cannot be negative.");
-    Preconditions.checkState(blockID.getLocalID() >= 0,
-        "Local ID cannot be negative.");
-
-    KeyValueContainerData cData = (KeyValueContainerData) container
-        .getContainerData();
-    MetadataStore db = KeyUtils.getDB(cData, config);
-    // This is a post condition that acts as a hint to the user.
-    // Should never fail.
-    Preconditions.checkNotNull(db, "DB cannot be null here");
-    // Note : There is a race condition here, since get and delete
-    // are not atomic. Leaving it here since the impact is refusing
-    // to delete a key which might have just gotten inserted after
-    // the get check.
-    byte[] kKey = Longs.toByteArray(blockID.getLocalID());
-    byte[] kData = db.get(kKey);
-    if (kData == null) {
-      throw new StorageContainerException("Unable to find the key.",
-          NO_SUCH_KEY);
-    }
-    db.delete(kKey);
-
-    // Decrement keycount here
-    container.getContainerData().decrKeyCount();
-  }
-
-  /**
-   * List keys in a container.
-   *
-   * @param container - Container from which keys need to be listed.
-   * @param startLocalID  - Key to start from, 0 to begin.
-   * @param count    - Number of keys to return.
-   * @return List of Keys that match the criteria.
-   */
-  @Override
-  public List<KeyData> listKey(Container container, long startLocalID, int
-      count) throws IOException {
-    Preconditions.checkNotNull(container, "container cannot be null");
-    Preconditions.checkState(startLocalID >= 0, "startLocal ID cannot be " +
-        "negative");
-    Preconditions.checkArgument(count > 0,
-        "Count must be a positive number.");
-    container.readLock();
-    List<KeyData> result = null;
-    KeyValueContainerData cData = (KeyValueContainerData) container
-        .getContainerData();
-    MetadataStore db = KeyUtils.getDB(cData, config);
-    result = new ArrayList<>();
-    byte[] startKeyInBytes = Longs.toByteArray(startLocalID);
-    List<Map.Entry<byte[], byte[]>> range = db.getSequentialRangeKVs(
-        startKeyInBytes, count, null);
-    for (Map.Entry<byte[], byte[]> entry : range) {
-      KeyData value = KeyUtils.getKeyData(entry.getValue());
-      KeyData data = new KeyData(value.getBlockID());
-      result.add(data);
-    }
-    return result;
-  }
-
-  /**
-   * Shutdown KeyValueContainerManager.
-   */
-  public void shutdown() {
-    KeyUtils.shutdownCache(ContainerCache.getInstance(config));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java
index 525d51b..564b50e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java
@@ -17,6 +17,5 @@
  */
 package org.apache.hadoop.ozone.container.keyvalue.impl;
 /**
- This package contains chunk manager and key manager implementation for
- keyvalue container type.
- **/
\ No newline at end of file
+ * Chunk manager and block manager implementations for keyvalue container type.
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
new file mode 100644
index 0000000..35ed22a
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.container.keyvalue.interfaces;
+
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * BlockManager is for performing key related operations on the container.
+ */
+public interface BlockManager {
+
+  /**
+   * Puts or overwrites a block.
+   *
+   * @param container - Container for which block need to be added.
+   * @param data     - Block Data.
+   * @return length of the Block.
+   * @throws IOException
+   */
+  long putBlock(Container container, BlockData data) throws IOException;
+
+  /**
+   * Gets an existing block.
+   *
+   * @param container - Container from which block need to be get.
+   * @param blockID - BlockID of the Block.
+   * @return Block Data.
+   * @throws IOException
+   */
+  BlockData getBlock(Container container, BlockID blockID) throws IOException;
+
+  /**
+   * Deletes an existing block.
+   *
+   * @param container - Container from which block need to be deleted.
+   * @param blockID - ID of the block.
+   * @throws StorageContainerException
+   */
+  void deleteBlock(Container container, BlockID blockID) throws IOException;
+
+  /**
+   * List blocks in a container.
+   *
+   * @param container - Container from which blocks need to be listed.
+   * @param startLocalID  - Block to start from, 0 to begin.
+   * @param count    - Number of blocks to return.
+   * @return List of Blocks that match the criteria.
+   */
+  List<BlockData> listBlock(Container container, long startLocalID, int count)
+      throws IOException;
+
+  /**
+   * Returns the last committed block length for the block.
+   * @param blockID blockId
+   */
+  long getCommittedBlockLength(Container container, BlockID blockID)
+      throws IOException;
+
+  /**
+   * Shutdown ContainerManager.
+   */
+  void shutdown();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java
deleted file mode 100644
index 84f771a..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue.interfaces;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * KeyManager is for performing key related operations on the container.
- */
-public interface KeyManager {
-
-  /**
-   * Puts or overwrites a key.
-   *
-   * @param container - Container for which key need to be added.
-   * @param data     - Key Data.
-   * @return length of the Key.
-   * @throws IOException
-   */
-  long putKey(Container container, KeyData data) throws IOException;
-
-  /**
-   * Gets an existing key.
-   *
-   * @param container - Container from which key need to be get.
-   * @param blockID - BlockID of the Key.
-   * @return Key Data.
-   * @throws IOException
-   */
-  KeyData getKey(Container container, BlockID blockID) throws IOException;
-
-  /**
-   * Deletes an existing Key.
-   *
-   * @param container - Container from which key need to be deleted.
-   * @param blockID - ID of the block.
-   * @throws StorageContainerException
-   */
-  void deleteKey(Container container, BlockID blockID) throws IOException;
-
-  /**
-   * List keys in a container.
-   *
-   * @param container - Container from which keys need to be listed.
-   * @param startLocalID  - Key to start from, 0 to begin.
-   * @param count    - Number of keys to return.
-   * @return List of Keys that match the criteria.
-   */
-  List<KeyData> listKey(Container container, long startLocalID, int count)
-      throws IOException;
-
-  /**
-   * Returns the last committed block length for the block.
-   * @param blockID blockId
-   */
-  long getCommittedBlockLength(Container container, BlockID blockID)
-      throws IOException;
-
-  /**
-   * Shutdown ContainerManager.
-   */
-  void shutdown();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java
new file mode 100644
index 0000000..5129094
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.keyvalue.interfaces;
+/**
+ * Chunk manager and block manager interfaces for keyvalue container type.
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
index 51eed7f..d96fbfa 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.ratis.shaded.com.google.protobuf
     .InvalidProtocolBufferException;
@@ -72,7 +72,7 @@ public class BlockDeletingService extends BackgroundService{
   private static final Logger LOG =
       LoggerFactory.getLogger(BlockDeletingService.class);
 
-  ContainerSet containerSet;
+  private ContainerSet containerSet;
   private ContainerDeletionChoosingPolicy containerDeletionPolicy;
   private final Configuration conf;
 
@@ -185,7 +185,7 @@ public class BlockDeletingService extends BackgroundService{
       ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult();
       long startTime = Time.monotonicNow();
       // Scan container's db and get list of under deletion blocks
-      MetadataStore meta = KeyUtils.getDB(
+      MetadataStore meta = BlockUtils.getDB(
           (KeyValueContainerData) containerData, conf);
       // # of blocks to delete is throttled
       KeyPrefixFilter filter =
@@ -211,8 +211,8 @@ public class BlockDeletingService extends BackgroundService{
         String blockName = DFSUtil.bytes2String(entry.getKey());
         LOG.debug("Deleting block {}", blockName);
         try {
-          ContainerProtos.KeyData data =
-              ContainerProtos.KeyData.parseFrom(entry.getValue());
+          ContainerProtos.BlockData data =
+              ContainerProtos.BlockData.parseFrom(entry.getValue());
           for (ContainerProtos.ChunkInfo chunkInfo : data.getChunksList()) {
             File chunkFile = dataDir.toPath()
                 .resolve(chunkInfo.getChunkName()).toFile();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
new file mode 100644
index 0000000..6fe6d81
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.keyvalue;
+
+import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
+import org.apache.hadoop.ozone.container.common.volume
+    .RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.mockito.Mockito;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.UUID;
+
+import static org.junit.Assert.*;
+import static org.mockito.ArgumentMatchers.anyList;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.mock;
+
+/**
+ * This class is used to test key related operations on the container.
+ */
+public class TestBlockManagerImpl {
+
+  private OzoneConfiguration config;
+  private String scmId = UUID.randomUUID().toString();
+  private VolumeSet volumeSet;
+  private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
+  private KeyValueContainerData keyValueContainerData;
+  private KeyValueContainer keyValueContainer;
+  private BlockData blockData;
+  private BlockManagerImpl blockManager;
+  private BlockID blockID;
+
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+
+  @Before
+  public void setUp() throws Exception {
+    config = new OzoneConfiguration();
+
+    HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot()
+        .getAbsolutePath()).conf(config).datanodeUuid(UUID.randomUUID()
+        .toString()).build();
+
+    volumeSet = mock(VolumeSet.class);
+
+    volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
+    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
+        .thenReturn(hddsVolume);
+
+    keyValueContainerData = new KeyValueContainerData(1L,
+        (long) StorageUnit.GB.toBytes(5));
+
+    keyValueContainer = new KeyValueContainer(
+        keyValueContainerData, config);
+
+    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
+
+    // Creating BlockData
+    blockID = new BlockID(1L, 1L);
+    blockData = new BlockData(blockID);
+    blockData.addMetadata("VOLUME", "ozone");
+    blockData.addMetadata("OWNER", "hdfs");
+    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
+    ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
+        .getLocalID(), 0), 0, 1024);
+    chunkList.add(info.getProtoBufMessage());
+    blockData.setChunks(chunkList);
+
+    // Create KeyValueContainerManager
+    blockManager = new BlockManagerImpl(config);
+
+  }
+
+  @Test
+  public void testPutAndGetBlock() throws Exception {
+    assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
+    //Put Block
+    blockManager.putBlock(keyValueContainer, blockData);
+
+    assertEquals(1, keyValueContainer.getContainerData().getKeyCount());
+    //Get Block
+    BlockData fromGetBlockData = blockManager.getBlock(keyValueContainer,
+        blockData.getBlockID());
+
+    assertEquals(blockData.getContainerID(), fromGetBlockData.getContainerID());
+    assertEquals(blockData.getLocalID(), fromGetBlockData.getLocalID());
+    assertEquals(blockData.getChunks().size(),
+        fromGetBlockData.getChunks().size());
+    assertEquals(blockData.getMetadata().size(), fromGetBlockData.getMetadata()
+        .size());
+
+  }
+
+
+  @Test
+  public void testDeleteBlock() throws Exception {
+    try {
+      assertEquals(0,
+          keyValueContainer.getContainerData().getKeyCount());
+      //Put Block
+      blockManager.putBlock(keyValueContainer, blockData);
+      assertEquals(1,
+          keyValueContainer.getContainerData().getKeyCount());
+      //Delete Block
+      blockManager.deleteBlock(keyValueContainer, blockID);
+      assertEquals(0,
+          keyValueContainer.getContainerData().getKeyCount());
+      try {
+        blockManager.getBlock(keyValueContainer, blockID);
+        fail("testDeleteBlock");
+      } catch (StorageContainerException ex) {
+        GenericTestUtils.assertExceptionContains(
+            "Unable to find the block", ex);
+      }
+    } catch (IOException ex) {
+      fail("testDeleteBlock failed");
+    }
+  }
+
+  @Test
+  public void testListBlock() throws Exception {
+    try {
+      blockManager.putBlock(keyValueContainer, blockData);
+      List<BlockData> listBlockData = blockManager.listBlock(
+          keyValueContainer, 1, 10);
+      assertNotNull(listBlockData);
+      assertTrue(listBlockData.size() == 1);
+
+      for (long i = 2; i <= 10; i++) {
+        blockID = new BlockID(1L, i);
+        blockData = new BlockData(blockID);
+        blockData.addMetadata("VOLUME", "ozone");
+        blockData.addMetadata("OWNER", "hdfs");
+        List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
+        ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
+            .getLocalID(), 0), 0, 1024);
+        chunkList.add(info.getProtoBufMessage());
+        blockData.setChunks(chunkList);
+        blockManager.putBlock(keyValueContainer, blockData);
+      }
+
+      listBlockData = blockManager.listBlock(
+          keyValueContainer, 1, 10);
+      assertNotNull(listBlockData);
+      assertTrue(listBlockData.size() == 10);
+
+    } catch (IOException ex) {
+      fail("testListBlock failed");
+    }
+  }
+
+  @Test
+  public void testGetNoSuchBlock() throws Exception {
+    try {
+      assertEquals(0,
+          keyValueContainer.getContainerData().getKeyCount());
+      //Put Block
+      blockManager.putBlock(keyValueContainer, blockData);
+      assertEquals(1,
+          keyValueContainer.getContainerData().getKeyCount());
+      //Delete Block
+      blockManager.deleteBlock(keyValueContainer, blockID);
+      assertEquals(0,
+          keyValueContainer.getContainerData().getKeyCount());
+      try {
+        //Since the block has been deleted, we should not be able to find it
+        blockManager.getBlock(keyValueContainer, blockID);
+        fail("testGetNoSuchBlock failed");
+      } catch (StorageContainerException ex) {
+        GenericTestUtils.assertExceptionContains(
+            "Unable to find the block", ex);
+        assertEquals(ContainerProtos.Result.NO_SUCH_BLOCK, ex.getResult());
+      }
+    } catch (IOException ex) {
+      fail("testGetNoSuchBlock failed");
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
index 9664052..3c0876b 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
@@ -88,7 +88,7 @@ public class TestChunkManagerImpl {
     keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
 
     data = "testing write chunks".getBytes();
-    // Creating KeyData
+    // Creating BlockData
     blockID = new BlockID(1L, 1L);
     chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
         .getLocalID(), 0), 0, data.length);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyManagerImpl.java
deleted file mode 100644
index b05dbca..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyManagerImpl.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
-import org.apache.hadoop.ozone.container.common.volume
-    .RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.keyvalue.impl.KeyManagerImpl;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.UUID;
-
-import static org.junit.Assert.*;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.Mockito.mock;
-
-/**
- * This class is used to test key related operations on the container.
- */
-public class TestKeyManagerImpl {
-
-  private OzoneConfiguration config;
-  private String scmId = UUID.randomUUID().toString();
-  private VolumeSet volumeSet;
-  private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
-  private KeyValueContainerData keyValueContainerData;
-  private KeyValueContainer keyValueContainer;
-  private KeyData keyData;
-  private KeyManagerImpl keyManager;
-  private BlockID blockID;
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-
-  @Before
-  public void setUp() throws Exception {
-    config = new OzoneConfiguration();
-
-    HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot()
-        .getAbsolutePath()).conf(config).datanodeUuid(UUID.randomUUID()
-        .toString()).build();
-
-    volumeSet = mock(VolumeSet.class);
-
-    volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
-    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
-        .thenReturn(hddsVolume);
-
-    keyValueContainerData = new KeyValueContainerData(1L,
-        (long) StorageUnit.GB.toBytes(5));
-
-    keyValueContainer = new KeyValueContainer(
-        keyValueContainerData, config);
-
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-
-    // Creating KeyData
-    blockID = new BlockID(1L, 1L);
-    keyData = new KeyData(blockID);
-    keyData.addMetadata("VOLUME", "ozone");
-    keyData.addMetadata("OWNER", "hdfs");
-    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
-    ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
-        .getLocalID(), 0), 0, 1024);
-    chunkList.add(info.getProtoBufMessage());
-    keyData.setChunks(chunkList);
-
-    // Create KeyValueContainerManager
-    keyManager = new KeyManagerImpl(config);
-
-  }
-
-  @Test
-  public void testPutAndGetKey() throws Exception {
-    assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
-    //Put Key
-    keyManager.putKey(keyValueContainer, keyData);
-
-    assertEquals(1, keyValueContainer.getContainerData().getKeyCount());
-    //Get Key
-    KeyData fromGetKeyData = keyManager.getKey(keyValueContainer,
-        keyData.getBlockID());
-
-    assertEquals(keyData.getContainerID(), fromGetKeyData.getContainerID());
-    assertEquals(keyData.getLocalID(), fromGetKeyData.getLocalID());
-    assertEquals(keyData.getChunks().size(), fromGetKeyData.getChunks().size());
-    assertEquals(keyData.getMetadata().size(), fromGetKeyData.getMetadata()
-        .size());
-
-  }
-
-
-  @Test
-  public void testDeleteKey() throws Exception {
-    try {
-      assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
-      //Put Key
-      keyManager.putKey(keyValueContainer, keyData);
-      assertEquals(1, keyValueContainer.getContainerData().getKeyCount());
-      //Delete Key
-      keyManager.deleteKey(keyValueContainer, blockID);
-      assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
-      try {
-        keyManager.getKey(keyValueContainer, blockID);
-        fail("testDeleteKey");
-      } catch (StorageContainerException ex) {
-        GenericTestUtils.assertExceptionContains("Unable to find the key", ex);
-      }
-    } catch (IOException ex) {
-      fail("testDeleteKey failed");
-    }
-  }
-
-  @Test
-  public void testListKey() throws Exception {
-    try {
-      keyManager.putKey(keyValueContainer, keyData);
-      List<KeyData> listKeyData = keyManager.listKey(
-          keyValueContainer, 1, 10);
-      assertNotNull(listKeyData);
-      assertTrue(listKeyData.size() == 1);
-
-      for (long i = 2; i <= 10; i++) {
-        blockID = new BlockID(1L, i);
-        keyData = new KeyData(blockID);
-        keyData.addMetadata("VOLUME", "ozone");
-        keyData.addMetadata("OWNER", "hdfs");
-        List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
-        ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
-            .getLocalID(), 0), 0, 1024);
-        chunkList.add(info.getProtoBufMessage());
-        keyData.setChunks(chunkList);
-        keyManager.putKey(keyValueContainer, keyData);
-      }
-
-      listKeyData = keyManager.listKey(
-          keyValueContainer, 1, 10);
-      assertNotNull(listKeyData);
-      assertTrue(listKeyData.size() == 10);
-
-    } catch (IOException ex) {
-      fail("testListKey failed");
-    }
-  }
-
-  @Test
-  public void testGetNoSuchKey() throws Exception {
-    try {
-      keyData = new KeyData(new BlockID(1L, 2L));
-      keyManager.getKey(keyValueContainer, new BlockID(1L, 2L));
-      fail("testGetNoSuchKey failed");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("Unable to find the key.", ex);
-      assertEquals(ContainerProtos.Result.NO_SUCH_KEY, ex.getResult());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
index f1fe88e..fbc5ad0 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
@@ -27,11 +27,11 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.utils.MetadataKeyFilters;
 import org.apache.hadoop.utils.MetadataStore;
@@ -114,8 +114,8 @@ public class TestKeyValueBlockIterator {
 
     int counter = 0;
     while(keyValueBlockIterator.hasNext()) {
-      KeyData keyData = keyValueBlockIterator.nextBlock();
-      assertEquals(keyData.getLocalID(), counter++);
+      BlockData blockData = keyValueBlockIterator.nextBlock();
+      assertEquals(blockData.getLocalID(), counter++);
     }
 
     assertFalse(keyValueBlockIterator.hasNext());
@@ -123,8 +123,8 @@ public class TestKeyValueBlockIterator {
     keyValueBlockIterator.seekToFirst();
     counter = 0;
     while(keyValueBlockIterator.hasNext()) {
-      KeyData keyData = keyValueBlockIterator.nextBlock();
-      assertEquals(keyData.getLocalID(), counter++);
+      BlockData blockData = keyValueBlockIterator.nextBlock();
+      assertEquals(blockData.getLocalID(), counter++);
     }
     assertFalse(keyValueBlockIterator.hasNext());
 
@@ -214,8 +214,8 @@ public class TestKeyValueBlockIterator {
 
     int counter = 5;
     while(keyValueBlockIterator.hasNext()) {
-      KeyData keyData = keyValueBlockIterator.nextBlock();
-      assertEquals(keyData.getLocalID(), counter++);
+      BlockData blockData = keyValueBlockIterator.nextBlock();
+      assertEquals(blockData.getLocalID(), counter++);
     }
   }
 
@@ -250,7 +250,7 @@ public class TestKeyValueBlockIterator {
     container = new KeyValueContainer(containerData, conf);
     container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID
         .randomUUID().toString());
-    MetadataStore metadataStore = KeyUtils.getDB(containerData, conf);
+    MetadataStore metadataStore = BlockUtils.getDB(containerData, conf);
 
     List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
     ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024);
@@ -258,18 +258,18 @@ public class TestKeyValueBlockIterator {
 
     for (int i=0; i<normalBlocks; i++) {
       BlockID blockID = new BlockID(containerId, i);
-      KeyData keyData = new KeyData(blockID);
-      keyData.setChunks(chunkList);
-      metadataStore.put(Longs.toByteArray(blockID.getLocalID()), keyData
+      BlockData blockData = new BlockData(blockID);
+      blockData.setChunks(chunkList);
+      metadataStore.put(Longs.toByteArray(blockID.getLocalID()), blockData
           .getProtoBufMessage().toByteArray());
     }
 
     for (int i=normalBlocks; i<deletedBlocks; i++) {
       BlockID blockID = new BlockID(containerId, i);
-      KeyData keyData = new KeyData(blockID);
-      keyData.setChunks(chunkList);
+      BlockData blockData = new BlockData(blockID);
+      blockData.setChunks(chunkList);
       metadataStore.put(DFSUtil.string2Bytes(OzoneConsts
-          .DELETING_KEY_PREFIX + blockID.getLocalID()), keyData
+          .DELETING_KEY_PREFIX + blockID.getLocalID()), blockData
           .getProtoBufMessage().toByteArray());
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index f84ba7d..bf6b8b0 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -28,14 +28,14 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerLifeCycleState;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume
     .RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.utils.MetadataStore;
@@ -117,11 +117,11 @@ public class TestKeyValueContainer {
     addBlocks(blockCount);
     blockIterator = keyValueContainer.blockIterator();
     assertTrue(blockIterator.hasNext());
-    KeyData keyData;
+    BlockData blockData;
     int blockCounter = 0;
     while(blockIterator.hasNext()) {
-      keyData = blockIterator.nextBlock();
-      assertEquals(blockCounter++, keyData.getBlockID().getLocalID());
+      blockData = blockIterator.nextBlock();
+      assertEquals(blockCounter++, blockData.getBlockID().getLocalID());
     }
     assertEquals(blockCount, blockCounter);
   }
@@ -129,20 +129,20 @@ public class TestKeyValueContainer {
   private void addBlocks(int count) throws Exception {
     long containerId = keyValueContainerData.getContainerID();
 
-    MetadataStore metadataStore = KeyUtils.getDB(keyValueContainer
+    MetadataStore metadataStore = BlockUtils.getDB(keyValueContainer
         .getContainerData(), conf);
     for (int i=0; i < count; i++) {
-      // Creating KeyData
+      // Creating BlockData
       BlockID blockID = new BlockID(containerId, i);
-      KeyData keyData = new KeyData(blockID);
-      keyData.addMetadata("VOLUME", "ozone");
-      keyData.addMetadata("OWNER", "hdfs");
+      BlockData blockData = new BlockData(blockID);
+      blockData.addMetadata("VOLUME", "ozone");
+      blockData.addMetadata("OWNER", "hdfs");
       List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
       ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
           .getLocalID(), 0), 0, 1024);
       chunkList.add(info.getProtoBufMessage());
-      keyData.setChunks(chunkList);
-      metadataStore.put(Longs.toByteArray(blockID.getLocalID()), keyData
+      blockData.setChunks(chunkList);
+      metadataStore.put(Longs.toByteArray(blockID.getLocalID()), blockData
           .getProtoBufMessage().toByteArray());
     }
 
@@ -189,7 +189,7 @@ public class TestKeyValueContainer {
 
     int numberOfKeysToWrite = 12;
     //write one few keys to check the key count after import
-    MetadataStore metadataStore = KeyUtils.getDB(keyValueContainerData, conf);
+    MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, conf);
     for (int i = 0; i < numberOfKeysToWrite; i++) {
       metadataStore.put(("test" + i).getBytes(), "test".getBytes());
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index d91bbf7..e1904c1 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -142,31 +142,31 @@ public class TestKeyValueHandler {
     Mockito.verify(handler, times(1)).handleCloseContainer(
         any(ContainerCommandRequestProto.class), any());
 
-    // Test Put Key Request handling
-    ContainerCommandRequestProto putKeyRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.PutKey);
-    dispatcher.dispatch(putKeyRequest);
-    Mockito.verify(handler, times(1)).handlePutKey(
+    // Test Put Block Request handling
+    ContainerCommandRequestProto putBlockRequest =
+        getDummyCommandRequestProto(ContainerProtos.Type.PutBlock);
+    dispatcher.dispatch(putBlockRequest);
+    Mockito.verify(handler, times(1)).handlePutBlock(
         any(ContainerCommandRequestProto.class), any());
 
-    // Test Get Key Request handling
-    ContainerCommandRequestProto getKeyRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.GetKey);
-    dispatcher.dispatch(getKeyRequest);
-    Mockito.verify(handler, times(1)).handleGetKey(
+    // Test Get Block Request handling
+    ContainerCommandRequestProto getBlockRequest =
+        getDummyCommandRequestProto(ContainerProtos.Type.GetBlock);
+    dispatcher.dispatch(getBlockRequest);
+    Mockito.verify(handler, times(1)).handleGetBlock(
         any(ContainerCommandRequestProto.class), any());
 
-    // Test Delete Key Request handling
-    ContainerCommandRequestProto deleteKeyRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.DeleteKey);
-    dispatcher.dispatch(deleteKeyRequest);
-    Mockito.verify(handler, times(1)).handleDeleteKey(
+    // Test Delete Block Request handling
+    ContainerCommandRequestProto deleteBlockRequest =
+        getDummyCommandRequestProto(ContainerProtos.Type.DeleteBlock);
+    dispatcher.dispatch(deleteBlockRequest);
+    Mockito.verify(handler, times(1)).handleDeleteBlock(
         any(ContainerCommandRequestProto.class), any());
 
-    // Test List Key Request handling
-    ContainerCommandRequestProto listKeyRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.ListKey);
-    dispatcher.dispatch(listKeyRequest);
+    // Test List Block Request handling
+    ContainerCommandRequestProto listBlockRequest =
+        getDummyCommandRequestProto(ContainerProtos.Type.ListBlock);
+    dispatcher.dispatch(listBlockRequest);
     Mockito.verify(handler, times(2)).handleUnsupportedOp(
         any(ContainerCommandRequestProto.class));
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
index 94966f6..2b10578 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
@@ -281,10 +281,10 @@ public class ChunkGroupInputStream extends InputStream implements Seekable {
         groupInputStream.streamOffset[i] = length;
         ContainerProtos.DatanodeBlockID datanodeBlockID = blockID
             .getDatanodeBlockIDProtobuf();
-        ContainerProtos.GetKeyResponseProto response = ContainerProtocolCalls
-            .getKey(xceiverClient, datanodeBlockID, requestId);
+        ContainerProtos.GetBlockResponseProto response = ContainerProtocolCalls
+            .getBlock(xceiverClient, datanodeBlockID, requestId);
         List<ContainerProtos.ChunkInfo> chunks =
-            response.getKeyData().getChunksList();
+            response.getBlockData().getChunksList();
         for (ContainerProtos.ChunkInfo chunk : chunks) {
           length += chunk.getLen();
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index d0e173c..0537f8a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
@@ -165,10 +165,10 @@ public class TestStorageContainerManagerHelper {
     DatanodeDetails leadDN = containerWithPipeline.getPipeline().getLeader();
     OzoneContainer containerServer =
         getContainerServerByDatanodeUuid(leadDN.getUuidString());
-    KeyValueContainerData containerData = (KeyValueContainerData) containerServer
-        .getContainerSet()
+    KeyValueContainerData containerData =
+        (KeyValueContainerData) containerServer.getContainerSet()
         .getContainer(containerID).getContainerData();
-    return KeyUtils.getDB(containerData, conf);
+    return BlockUtils.getDB(containerData, conf);
   }
 
   private OzoneContainer getContainerServerByDatanodeUuid(String dnUUID)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
index ddff0c5..b4a0ba7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.junit.AfterClass;
@@ -469,9 +469,9 @@ public class TestOzoneRestClient {
         containerID, new File(containerPath));
     long valueLength = 0;
     while (keyValueBlockIterator.hasNext()) {
-      KeyData keyData = keyValueBlockIterator.nextBlock();
-      if (keyData.getBlockID().getLocalID() == localID) {
-        List<ContainerProtos.ChunkInfo> chunks = keyData.getChunks();
+      BlockData blockData = keyValueBlockIterator.nextBlock();
+      if (blockData.getBlockID().getLocalID() == localID) {
+        List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks();
         for (ContainerProtos.ChunkInfo chunk : chunks) {
           valueLength += chunk.getLen();
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
index bf1eba6..cc045d0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.om.OzoneManager;
@@ -603,10 +603,10 @@ public class TestOzoneRpcClient {
     KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
         containerID, new File(containerPath));
     while (keyValueBlockIterator.hasNext()) {
-      KeyData keyData = keyValueBlockIterator.nextBlock();
-      if (keyData.getBlockID().getLocalID() == localID) {
+      BlockData blockData = keyValueBlockIterator.nextBlock();
+      if (blockData.getBlockID().getLocalID() == localID) {
         long length = 0;
-        List<ContainerProtos.ChunkInfo> chunks = keyData.getChunks();
+        List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks();
         for (ContainerProtos.ChunkInfo chunk : chunks) {
           length += chunk.getLen();
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index 0c86828..f278479 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.util.Time;
 import org.junit.Assert;
@@ -241,18 +241,18 @@ public final class ContainerTestHelper {
     setDataChecksum(info, data);
 
 
-    ContainerProtos.PutKeyRequestProto.Builder putRequest =
-        ContainerProtos.PutKeyRequestProto.newBuilder();
+    ContainerProtos.PutBlockRequestProto.Builder putRequest =
+        ContainerProtos.PutBlockRequestProto.newBuilder();
 
-    KeyData keyData = new KeyData(blockID);
+    BlockData blockData = new BlockData(blockID);
     List<ContainerProtos.ChunkInfo> newList = new LinkedList<>();
     newList.add(info.getProtoBufMessage());
-    keyData.setChunks(newList);
-    putRequest.setKeyData(keyData.getProtoBufMessage());
+    blockData.setChunks(newList);
+    putRequest.setBlockData(blockData.getProtoBufMessage());
 
     smallFileRequest.setChunkInfo(info.getProtoBufMessage());
     smallFileRequest.setData(ByteString.copyFrom(data));
-    smallFileRequest.setKey(putRequest);
+    smallFileRequest.setBlock(putRequest);
 
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
@@ -266,17 +266,17 @@ public final class ContainerTestHelper {
 
 
   public static ContainerCommandRequestProto getReadSmallFileRequest(
-      Pipeline pipeline, ContainerProtos.PutKeyRequestProto putKey)
+      Pipeline pipeline, ContainerProtos.PutBlockRequestProto putKey)
       throws Exception {
     ContainerProtos.GetSmallFileRequestProto.Builder smallFileRequest =
         ContainerProtos.GetSmallFileRequestProto.newBuilder();
-    ContainerCommandRequestProto getKey = getKeyRequest(pipeline, putKey);
-    smallFileRequest.setKey(getKey.getGetKey());
+    ContainerCommandRequestProto getKey = getBlockRequest(pipeline, putKey);
+    smallFileRequest.setBlock(getKey.getGetBlock());
 
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.GetSmallFile);
-    request.setContainerID(getKey.getGetKey().getBlockID().getContainerID());
+    request.setContainerID(getKey.getGetBlock().getBlockID().getContainerID());
     request.setGetSmallFile(smallFileRequest);
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
@@ -421,58 +421,58 @@ public final class ContainerTestHelper {
   }
 
   /**
-   * Returns the PutKeyRequest for test purpose.
+   * Returns the PutBlockRequest for test purpose.
    * @param pipeline - pipeline.
    * @param writeRequest - Write Chunk Request.
    * @return - Request
    */
-  public static ContainerCommandRequestProto getPutKeyRequest(
+  public static ContainerCommandRequestProto getPutBlockRequest(
       Pipeline pipeline, ContainerProtos.WriteChunkRequestProto writeRequest) {
-    LOG.trace("putKey: {} to pipeline={}",
+    LOG.trace("putBlock: {} to pipeline={}",
         writeRequest.getBlockID());
 
-    ContainerProtos.PutKeyRequestProto.Builder putRequest =
-        ContainerProtos.PutKeyRequestProto.newBuilder();
+    ContainerProtos.PutBlockRequestProto.Builder putRequest =
+        ContainerProtos.PutBlockRequestProto.newBuilder();
 
-    KeyData keyData = new KeyData(
+    BlockData blockData = new BlockData(
         BlockID.getFromProtobuf(writeRequest.getBlockID()));
     List<ContainerProtos.ChunkInfo> newList = new LinkedList<>();
     newList.add(writeRequest.getChunkData());
-    keyData.setChunks(newList);
-    putRequest.setKeyData(keyData.getProtoBufMessage());
+    blockData.setChunks(newList);
+    putRequest.setBlockData(blockData.getProtoBufMessage());
 
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.PutKey);
-    request.setContainerID(keyData.getContainerID());
-    request.setPutKey(putRequest);
+    request.setCmdType(ContainerProtos.Type.PutBlock);
+    request.setContainerID(blockData.getContainerID());
+    request.setPutBlock(putRequest);
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
     return request.build();
   }
 
   /**
-   * Gets a GetKeyRequest for test purpose.
+   * Gets a GetBlockRequest for test purpose.
    * @param  pipeline - pipeline
-   * @param putKeyRequest - putKeyRequest.
+   * @param putBlockRequest - putBlockRequest.
    * @return - Request
    * immediately.
    */
-  public static ContainerCommandRequestProto getKeyRequest(
-      Pipeline pipeline, ContainerProtos.PutKeyRequestProto putKeyRequest) {
+  public static ContainerCommandRequestProto getBlockRequest(
+      Pipeline pipeline, ContainerProtos.PutBlockRequestProto putBlockRequest) {
     ContainerProtos.DatanodeBlockID blockID =
-        putKeyRequest.getKeyData().getBlockID();
+        putBlockRequest.getBlockData().getBlockID();
     LOG.trace("getKey: blockID={}", blockID);
 
-    ContainerProtos.GetKeyRequestProto.Builder getRequest =
-        ContainerProtos.GetKeyRequestProto.newBuilder();
+    ContainerProtos.GetBlockRequestProto.Builder getRequest =
+        ContainerProtos.GetBlockRequestProto.newBuilder();
     getRequest.setBlockID(blockID);
 
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.GetKey);
+    request.setCmdType(ContainerProtos.Type.GetBlock);
     request.setContainerID(blockID.getContainerID());
-    request.setGetKey(getRequest);
+    request.setGetBlock(getRequest);
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
     return request.build();
@@ -484,32 +484,32 @@ public final class ContainerTestHelper {
    * @param request - Request
    * @param response - Response
    */
-  public static void verifyGetKey(ContainerCommandRequestProto request,
+  public static void verifyGetBlock(ContainerCommandRequestProto request,
       ContainerCommandResponseProto response, int expectedChunksCount) {
     Assert.assertEquals(request.getTraceID(), response.getTraceID());
     Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
     Assert.assertEquals(expectedChunksCount,
-        response.getGetKey().getKeyData().getChunksCount());
+        response.getGetBlock().getBlockData().getChunksCount());
   }
 
   /**
    * @param pipeline - pipeline.
-   * @param putKeyRequest - putKeyRequest.
+   * @param putBlockRequest - putBlockRequest.
    * @return - Request
    */
-  public static ContainerCommandRequestProto getDeleteKeyRequest(
-      Pipeline pipeline, ContainerProtos.PutKeyRequestProto putKeyRequest) {
-    ContainerProtos.DatanodeBlockID blockID = putKeyRequest.getKeyData()
+  public static ContainerCommandRequestProto getDeleteBlockRequest(
+      Pipeline pipeline, ContainerProtos.PutBlockRequestProto putBlockRequest) {
+    ContainerProtos.DatanodeBlockID blockID = putBlockRequest.getBlockData()
         .getBlockID();
-    LOG.trace("deleteKey: name={}", blockID);
-    ContainerProtos.DeleteKeyRequestProto.Builder delRequest =
-        ContainerProtos.DeleteKeyRequestProto.newBuilder();
+    LOG.trace("deleteBlock: name={}", blockID);
+    ContainerProtos.DeleteBlockRequestProto.Builder delRequest =
+        ContainerProtos.DeleteBlockRequestProto.newBuilder();
     delRequest.setBlockID(blockID);
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.DeleteKey);
+    request.setCmdType(ContainerProtos.Type.DeleteBlock);
     request.setContainerID(blockID.getContainerID());
-    request.setDeleteKey(delRequest);
+    request.setDeleteBlock(delRequest);
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
     return request.build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
index 7391b25..52cebb3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.ozone.HddsDatanodeService;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
@@ -100,17 +100,20 @@ public class TestContainerReplication {
 
     DatanodeBlockID blockID = requestProto.getWriteChunk().getBlockID();
 
-    // Put Key to the test container
-    ContainerCommandRequestProto putKeyRequest = ContainerTestHelper
-        .getPutKeyRequest(sourcePipelines, requestProto.getWriteChunk());
+    // Put Block to the test container
+    ContainerCommandRequestProto putBlockRequest = ContainerTestHelper
+        .getPutBlockRequest(sourcePipelines, requestProto.getWriteChunk());
 
-    ContainerProtos.KeyData keyData = putKeyRequest.getPutKey().getKeyData();
+    ContainerProtos.BlockData blockData =
+        putBlockRequest.getPutBlock().getBlockData();
 
-    ContainerCommandResponseProto response = client.sendCommand(putKeyRequest);
+    ContainerCommandResponseProto response =
+        client.sendCommand(putBlockRequest);
 
     Assert.assertNotNull(response);
     Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-    Assert.assertTrue(putKeyRequest.getTraceID().equals(response.getTraceID()));
+    Assert.assertTrue(
+        putBlockRequest.getTraceID().equals(response.getTraceID()));
 
     HddsDatanodeService destinationDatanode =
         chooseDatanodeWithoutContainer(sourcePipelines,
@@ -147,8 +150,8 @@ public class TestContainerReplication {
     KeyValueHandler handler = (KeyValueHandler) ozoneContainer.getDispatcher()
         .getHandler(ContainerType.KeyValueContainer);
 
-    KeyData key = handler.getKeyManager()
-        .getKey(container, BlockID.getFromProtobuf(blockID));
+    BlockData key = handler.getBlockManager()
+        .getBlock(container, BlockID.getFromProtobuf(blockID));
 
     Assert.assertNotNull(key);
     Assert.assertEquals(1, key.getChunks().size());
@@ -164,7 +167,8 @@ public class TestContainerReplication {
         return datanode;
       }
     }
-    throw new AssertionError("No datanode outside of the pipeline");
+    throw new AssertionError(
+        "No datanode outside of the pipeline");
   }
 
   static OzoneConfiguration newOzoneConfiguration() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index 25c8c6b..7e30c5f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
@@ -33,9 +34,8 @@ import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingP
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.ozone.container.common.impl.RandomContainerDeletionChoosingPolicy;
 import org.apache.hadoop.ozone.container.keyvalue.statemachine.background
     .BlockDeletingService;
@@ -117,13 +117,13 @@ public class TestBlockDeletingService {
       containerSet.addContainer(container);
       data = (KeyValueContainerData) containerSet.getContainer(
           containerID).getContainerData();
-      MetadataStore metadata = KeyUtils.getDB(data, conf);
+      MetadataStore metadata = BlockUtils.getDB(data, conf);
       for (int j = 0; j<numOfBlocksPerContainer; j++) {
         BlockID blockID =
             ContainerTestHelper.getTestBlockID(containerID);
         String deleteStateName = OzoneConsts.DELETING_KEY_PREFIX +
             blockID.getLocalID();
-        KeyData kd = new KeyData(blockID);
+        BlockData kd = new BlockData(blockID);
         List<ContainerProtos.ChunkInfo> chunks = Lists.newArrayList();
         for (int k = 0; k<numOfChunksPerBlock; k++) {
           // offset doesn't matter here
@@ -200,7 +200,7 @@ public class TestBlockDeletingService {
     containerSet.listContainer(0L, 1, containerData);
     Assert.assertEquals(1, containerData.size());
 
-    MetadataStore meta = KeyUtils.getDB(
+    MetadataStore meta = BlockUtils.getDB(
         (KeyValueContainerData) containerData.get(0), conf);
     Map<Long, Container> containerMap = containerSet.getContainerMap();
     // NOTE: this test assumes that all the container is KetValueContainer and
@@ -309,7 +309,7 @@ public class TestBlockDeletingService {
     // get container meta data
     List<ContainerData> containerData = Lists.newArrayList();
     containerSet.listContainer(0L, 1, containerData);
-    MetadataStore meta = KeyUtils.getDB(
+    MetadataStore meta = BlockUtils.getDB(
         (KeyValueContainerData) containerData.get(0), conf);
 
     LogCapturer newLog = LogCapturer.captureLogs(BackgroundService.LOG);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org