You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by bh...@apache.org on 2018/07/05 22:30:55 UTC

[2/4] hadoop git commit: HDDS-182:CleanUp Reimplemented classes. Contributed by Hansiha Koneru

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
deleted file mode 100644
index 9de84da..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.interfaces;
-
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-
-/**
- * Chunk Manager allows read, write, delete and listing of chunks in
- * a container.
- */
-public interface ChunkManager {
-
-  /**
-   * writes a given chunk.
-   * @param blockID - ID of the block.
-   * @param info - ChunkInfo.
-   * @param stage - Chunk Stage write.
-   * @throws StorageContainerException
-   */
-  void writeChunk(BlockID blockID,
-      ChunkInfo info, byte[] data, ContainerProtos.Stage stage)
-      throws StorageContainerException;
-
-  /**
-   * reads the data defined by a chunk.
-   * @param blockID - ID of the block.
-   * @param info - ChunkInfo.
-   * @return  byte array
-   * @throws StorageContainerException
-   *
-   * TODO: Right now we do not support partial reads and writes of chunks.
-   * TODO: Explore if we need to do that for ozone.
-   */
-  byte[] readChunk(BlockID blockID, ChunkInfo info) throws
-      StorageContainerException;
-
-  /**
-   * Deletes a given chunk.
-   * @param blockID - ID of the block.
-   * @param info  - Chunk Info
-   * @throws StorageContainerException
-   */
-  void deleteChunk(BlockID blockID, ChunkInfo info) throws
-      StorageContainerException;
-
-  // TODO : Support list operations.
-
-  /**
-   * Shutdown the chunkManager.
-   */
-  void shutdown();
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
index a5559aa..f0f1b37 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.ozone.container.common.interfaces;
 
-
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerLifeCycleState;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java
index 1ed50fb..2538368 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.container.common.interfaces;
 
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 
 import java.util.List;
 import java.util.Map;
@@ -28,6 +28,7 @@ import java.util.Map;
  * This interface is used for choosing desired containers for
  * block deletion.
  */
+// TODO: Fix ContainerDeletionChoosingPolicy to work with new StorageLayer
 public interface ContainerDeletionChoosingPolicy {
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
deleted file mode 100644
index 49b68dc..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
+++ /dev/null
@@ -1,267 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.interfaces;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
-import org.apache.hadoop.hdfs.util.RwLock;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-
-import java.io.IOException;
-import java.security.NoSuchAlgorithmException;
-import java.util.List;
-
-/**
- * Interface for container operations.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public interface ContainerManager extends RwLock {
-
-  /**
-   * Init call that sets up a container Manager.
-   *
-   * @param config        - Configuration.
-   * @param containerDirs - List of Metadata Container locations.
-   * @param datanodeDetails - DatanodeDetails
-   * @throws StorageContainerException
-   */
-  void init(Configuration config, List<StorageLocation> containerDirs,
-            DatanodeDetails datanodeDetails) throws IOException;
-
-  /**
-   * Creates a container with the given name.
-   *
-   * @param containerData - Container Name and metadata.
-   * @throws StorageContainerException
-   */
-  void createContainer(ContainerData containerData)
-      throws StorageContainerException;
-
-  /**
-   * Deletes an existing container.
-   *
-   * @param containerID - ID of the container.
-   * @param forceDelete   - whether this container should be deleted forcibly.
-   * @throws StorageContainerException
-   */
-  void deleteContainer(long containerID,
-      boolean forceDelete) throws StorageContainerException;
-
-  /**
-   * Update an existing container.
-   *
-   * @param containerID ID of the container
-   * @param data container data
-   * @param forceUpdate if true, update container forcibly.
-   * @throws StorageContainerException
-   */
-  void updateContainer(long containerID, ContainerData data,
-      boolean forceUpdate) throws StorageContainerException;
-
-  /**
-   * As simple interface for container Iterations.
-   *
-   * @param startContainerID -  Return containers with ID >= startContainerID.
-   * @param count - how many to return
-   * @param data - Actual containerData
-   * @throws StorageContainerException
-   */
-  void listContainer(long startContainerID, long count,
-      List<ContainerData> data) throws StorageContainerException;
-
-  /**
-   * Choose containers for block deletion.
-   *
-   * @param count   - how many to return
-   * @throws StorageContainerException
-   */
-  List<ContainerData> chooseContainerForBlockDeletion(int count)
-      throws StorageContainerException;
-
-  /**
-   * Get metadata about a specific container.
-   *
-   * @param containerID - ID of the container.
-   * @return ContainerData - Container Data.
-   * @throws StorageContainerException
-   */
-  ContainerData readContainer(long containerID)
-      throws StorageContainerException;
-
-  /**
-   * Closes a open container, if it is already closed or does not exist a
-   * StorageContainerException is thrown.
-   * @param containerID - ID of the container.
-   * @throws StorageContainerException
-   */
-  void closeContainer(long containerID)
-      throws StorageContainerException, NoSuchAlgorithmException;
-
-  /**
-   * Checks if a container exists.
-   * @param containerID - ID of the container.
-   * @return true if the container is open false otherwise.
-   * @throws StorageContainerException  - Throws Exception if we are not
-   * able to find the container.
-   */
-  boolean isOpen(long containerID) throws StorageContainerException;
-
-  /**
-   * Supports clean shutdown of container.
-   *
-   * @throws StorageContainerException
-   */
-  void shutdown() throws IOException;
-
-  /**
-   * Sets the Chunk Manager.
-   *
-   * @param chunkManager - ChunkManager.
-   */
-  void setChunkManager(ChunkManager chunkManager);
-
-  /**
-   * Gets the Chunk Manager.
-   *
-   * @return ChunkManager.
-   */
-  ChunkManager getChunkManager();
-
-  /**
-   * Sets the Key Manager.
-   *
-   * @param keyManager - Key Manager.
-   */
-  void setKeyManager(KeyManager keyManager);
-
-  /**
-   * Gets the Key Manager.
-   *
-   * @return KeyManager.
-   */
-  KeyManager getKeyManager();
-
-  /**
-   * Get the Node Report of container storage usage.
-   * @return node report.
-   */
-  NodeReportProto getNodeReport() throws IOException;
-
-  /**
-   * Gets container report.
-   * @return container report.
-   * @throws IOException
-   */
-  ContainerReportsProto getContainerReport() throws IOException;
-
-  /**
-   * Gets container reports.
-   * @return List of all closed containers.
-   * @throws IOException
-   */
-  List<ContainerData> getClosedContainerReports() throws IOException;
-
-  /**
-   * Increase pending deletion blocks count number of specified container.
-   *
-   * @param numBlocks
-   *          increment  count number
-   * @param containerId
-   *          container id
-   */
-  void incrPendingDeletionBlocks(int numBlocks, long containerId);
-
-  /**
-   * Decrease pending deletion blocks count number of specified container.
-   *
-   * @param numBlocks
-   *          decrement count number
-   * @param containerId
-   *          container id
-   */
-  void decrPendingDeletionBlocks(int numBlocks, long containerId);
-
-  /**
-   * Increase the read count of the container.
-   * @param containerId - ID of the container.
-   */
-  void incrReadCount(long containerId);
-
-  /**
-   * Increse the read counter for bytes read from the container.
-   * @param containerId - ID of the container.
-   * @param readBytes - bytes read from the container.
-   */
-  void incrReadBytes(long containerId, long readBytes);
-
-
-  /**
-   * Increase the write count of the container.
-   * @param containerId - ID of the container.
-   */
-  void incrWriteCount(long containerId);
-
-  /**
-   * Increase the write counter for bytes write into the container.
-   * @param containerId - ID of the container.
-   * @param writeBytes - bytes write into the container.
-   */
-  void incrWriteBytes(long containerId, long writeBytes);
-
-  /**
-   * Increase the bytes used by the container.
-   * @param containerId - ID of the container.
-   * @param used - additional bytes used by the container.
-   * @return the current bytes used.
-   */
-  long incrBytesUsed(long containerId, long used);
-
-  /**
-   * Decrease the bytes used by the container.
-   * @param containerId - ID of the container.
-   * @param used - additional bytes reclaimed by the container.
-   * @return the current bytes used.
-   */
-  long decrBytesUsed(long containerId, long used);
-
-  /**
-   * Get the bytes used by the container.
-   * @param containerId - ID of the container.
-   * @return the current bytes used by the container.
-   */
-  long getBytesUsed(long containerId);
-
-  /**
-   * Get the number of keys in the container.
-   * @param containerId - ID of the container.
-   * @return the current key count.
-   */
-  long getNumKeys(long containerId);
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java
deleted file mode 100644
index 158ce38..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.interfaces;
-
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * KeyManager deals with Key Operations in the container Level.
- */
-public interface KeyManager {
-  /**
-   * Puts or overwrites a key.
-   *
-   * @param data     - Key Data.
-   * @throws IOException
-   */
-  void putKey(KeyData data) throws IOException;
-
-  /**
-   * Gets an existing key.
-   *
-   * @param data - Key Data.
-   * @return Key Data.
-   * @throws IOException
-   */
-  KeyData getKey(KeyData data) throws IOException;
-
-  /**
-   * Deletes an existing Key.
-   *
-   * @param blockID - ID of the block.
-   * @throws StorageContainerException
-   */
-  void deleteKey(BlockID blockID)
-      throws IOException;
-
-  /**
-   * List keys in a container.
-   *
-   * @param containerID - ID of the container.
-   * @param startLocalID  - Key to start from, 0 to begin.
-   * @param count    - Number of keys to return.
-   * @return List of Keys that match the criteria.
-   */
-  List<KeyData> listKey(long containerID, long startLocalID,
-      int count) throws IOException;
-
-  /**
-   * Shutdown keyManager.
-   */
-  void shutdown();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
deleted file mode 100644
index 52cf2e0..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
+++ /dev/null
@@ -1,247 +0,0 @@
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.statemachine.background;
-
-import com.google.common.collect.Lists;
-import org.apache.ratis.shaded.com.google.protobuf
-    .InvalidProtocolBufferException;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.utils.BackgroundService;
-import org.apache.hadoop.utils.BackgroundTask;
-import org.apache.hadoop.utils.BackgroundTaskQueue;
-import org.apache.hadoop.utils.BackgroundTaskResult;
-import org.apache.hadoop.utils.BatchOperation;
-import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.utils.MetadataStore;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT;
-
-/**
- * A per-datanode container block deleting service takes in charge
- * of deleting staled ozone blocks.
- */
-public class BlockDeletingService extends BackgroundService{
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(BlockDeletingService.class);
-
-  private final ContainerManager containerManager;
-  private final Configuration conf;
-
-  // Throttle number of blocks to delete per task,
-  // set to 1 for testing
-  private final int blockLimitPerTask;
-
-  // Throttle the number of containers to process concurrently at a time,
-  private final int containerLimitPerInterval;
-
-  // Task priority is useful when a to-delete block has weight.
-  private final static int TASK_PRIORITY_DEFAULT = 1;
-  // Core pool size for container tasks
-  private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 10;
-
-  public BlockDeletingService(ContainerManager containerManager,
-      long serviceInterval, long serviceTimeout, TimeUnit unit,
-      Configuration conf) {
-    super("BlockDeletingService", serviceInterval, unit,
-        BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout);
-    this.containerManager = containerManager;
-    this.conf = conf;
-    this.blockLimitPerTask = conf.getInt(
-        OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER,
-        OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT);
-    this.containerLimitPerInterval = conf.getInt(
-        OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL,
-        OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT);
-  }
-
-
-  @Override
-  public BackgroundTaskQueue getTasks() {
-    BackgroundTaskQueue queue = new BackgroundTaskQueue();
-    List<ContainerData> containers = Lists.newArrayList();
-    try {
-      // We at most list a number of containers a time,
-      // in case there are too many containers and start too many workers.
-      // We must ensure there is no empty container in this result.
-      // The chosen result depends on what container deletion policy is
-      // configured.
-      containers = containerManager.chooseContainerForBlockDeletion(
-          containerLimitPerInterval);
-      LOG.info("Plan to choose {} containers for block deletion, "
-          + "actually returns {} valid containers.",
-          containerLimitPerInterval, containers.size());
-
-      for(ContainerData container : containers) {
-        BlockDeletingTask containerTask =
-            new BlockDeletingTask(container, TASK_PRIORITY_DEFAULT);
-        queue.add(containerTask);
-      }
-    } catch (StorageContainerException e) {
-      LOG.warn("Failed to initiate block deleting tasks, "
-          + "caused by unable to get containers info. "
-          + "Retry in next interval. ", e);
-    } catch (Exception e) {
-      // In case listContainer call throws any uncaught RuntimeException.
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Unexpected error occurs during deleting blocks.", e);
-      }
-    }
-    return queue;
-  }
-
-  private static class ContainerBackgroundTaskResult
-      implements BackgroundTaskResult {
-    private List<String> deletedBlockIds;
-
-    ContainerBackgroundTaskResult() {
-      deletedBlockIds = new LinkedList<>();
-    }
-
-    public void addBlockId(String blockId) {
-      deletedBlockIds.add(blockId);
-    }
-
-    public void addAll(List<String> blockIds) {
-      deletedBlockIds.addAll(blockIds);
-    }
-
-    public List<String> getDeletedBlocks() {
-      return deletedBlockIds;
-    }
-
-    @Override
-    public int getSize() {
-      return deletedBlockIds.size();
-    }
-  }
-
-  private class BlockDeletingTask
-      implements BackgroundTask<BackgroundTaskResult> {
-
-    private final int priority;
-    private final ContainerData containerData;
-
-    BlockDeletingTask(ContainerData containerName, int priority) {
-      this.priority = priority;
-      this.containerData = containerName;
-    }
-
-    @Override
-    public BackgroundTaskResult call() throws Exception {
-      ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult();
-      long startTime = Time.monotonicNow();
-      // Scan container's db and get list of under deletion blocks
-      MetadataStore meta = KeyUtils.getDB(containerData, conf);
-      // # of blocks to delete is throttled
-      KeyPrefixFilter filter =
-          new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX);
-      List<Map.Entry<byte[], byte[]>> toDeleteBlocks =
-          meta.getSequentialRangeKVs(null, blockLimitPerTask, filter);
-      if (toDeleteBlocks.isEmpty()) {
-        LOG.debug("No under deletion block found in container : {}",
-            containerData.getContainerID());
-      }
-
-      List<String> succeedBlocks = new LinkedList<>();
-      LOG.debug("Container : {}, To-Delete blocks : {}",
-          containerData.getContainerID(), toDeleteBlocks.size());
-      File dataDir = ContainerUtils.getDataDirectory(containerData).toFile();
-      if (!dataDir.exists() || !dataDir.isDirectory()) {
-        LOG.error("Invalid container data dir {} : "
-            + "not exist or not a directory", dataDir.getAbsolutePath());
-        return crr;
-      }
-
-      toDeleteBlocks.forEach(entry -> {
-        String blockName = DFSUtil.bytes2String(entry.getKey());
-        LOG.debug("Deleting block {}", blockName);
-        try {
-          ContainerProtos.KeyData data =
-              ContainerProtos.KeyData.parseFrom(entry.getValue());
-          for (ContainerProtos.ChunkInfo chunkInfo : data.getChunksList()) {
-            File chunkFile = dataDir.toPath()
-                .resolve(chunkInfo.getChunkName()).toFile();
-            if (FileUtils.deleteQuietly(chunkFile)) {
-              LOG.debug("block {} chunk {} deleted", blockName,
-                  chunkFile.getAbsolutePath());
-            }
-          }
-          succeedBlocks.add(blockName);
-        } catch (InvalidProtocolBufferException e) {
-          LOG.error("Failed to parse block info for block {}", blockName, e);
-        }
-      });
-
-      // Once files are deleted... replace deleting entries with deleted entries
-      BatchOperation batch = new BatchOperation();
-      succeedBlocks.forEach(entry -> {
-        String blockId =
-            entry.substring(OzoneConsts.DELETING_KEY_PREFIX.length());
-        String deletedEntry = OzoneConsts.DELETED_KEY_PREFIX + blockId;
-        batch.put(DFSUtil.string2Bytes(deletedEntry),
-            DFSUtil.string2Bytes(blockId));
-        batch.delete(DFSUtil.string2Bytes(entry));
-      });
-      meta.writeBatch(batch);
-      // update count of pending deletion blocks in in-memory container status
-      containerManager.decrPendingDeletionBlocks(succeedBlocks.size(),
-          containerData.getContainerID());
-
-      if (!succeedBlocks.isEmpty()) {
-        LOG.info("Container: {}, deleted blocks: {}, task elapsed time: {}ms",
-            containerData.getContainerID(), succeedBlocks.size(),
-            Time.monotonicNow() - startTime);
-      }
-      crr.addAll(succeedBlocks);
-      return crr;
-    }
-
-    @Override
-    public int getPriority() {
-      return priority;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/package-info.java
deleted file mode 100644
index a9e202e..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine.background;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
index e35becd..692a9d1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
@@ -86,20 +86,17 @@ public class VolumeSet {
    */
   private final AutoCloseableLock volumeSetLock;
 
-  private final DatanodeDetails dnDetails;
-  private String datanodeUuid;
+  private final String datanodeUuid;
   private String clusterID;
 
-  public VolumeSet(DatanodeDetails datanodeDetails, Configuration conf)
+  public VolumeSet(String dnUuid, Configuration conf)
       throws DiskOutOfSpaceException {
-    this(datanodeDetails, null, conf);
+    this(dnUuid, null, conf);
   }
 
-  public VolumeSet(DatanodeDetails datanodeDetails, String clusterID,
-      Configuration conf)
+  public VolumeSet(String dnUuid, String clusterID, Configuration conf)
       throws DiskOutOfSpaceException {
-    this.dnDetails = datanodeDetails;
-    this.datanodeUuid = datanodeDetails.getUuidString();
+    this.datanodeUuid = dnUuid;
     this.clusterID = clusterID;
     this.conf = conf;
     this.volumeSetLock = new AutoCloseableLock(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 474c625..95621e5 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -31,6 +31,8 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
@@ -112,7 +114,7 @@ public class KeyValueContainer implements Container {
           .getVolumesList(), maxSize);
       String containerBasePath = containerVolume.getHddsRootDir().toString();
 
-      long containerId = containerData.getContainerId();
+      long containerId = containerData.getContainerID();
       String containerName = Long.toString(containerId);
 
       containerMetaDataPath = KeyValueContainerLocationUtil
@@ -127,7 +129,7 @@ public class KeyValueContainer implements Container {
           containerMetaDataPath, containerName);
 
       // Check if it is new Container.
-      KeyValueContainerUtil.verifyIsNewContainer(containerMetaDataPath);
+      ContainerUtils.verifyIsNewContainer(containerMetaDataPath);
 
       //Create Metadata path chunks path and metadata db
       KeyValueContainerUtil.createContainerMetaData(containerMetaDataPath,
@@ -184,7 +186,7 @@ public class KeyValueContainer implements Container {
     File tempCheckSumFile = null;
     FileOutputStream containerCheckSumStream = null;
     Writer writer = null;
-    long containerId = containerData.getContainerId();
+    long containerId = containerData.getContainerID();
     try {
       tempContainerFile = createTempFile(containerFile);
       tempCheckSumFile = createTempFile(containerCheckSumFile);
@@ -238,7 +240,7 @@ public class KeyValueContainer implements Container {
 
     File containerBkpFile = null;
     File checkSumBkpFile = null;
-    long containerId = containerData.getContainerId();
+    long containerId = containerData.getContainerID();
 
     try {
       if (containerFile.exists() && containerCheckSumFile.exists()) {
@@ -251,8 +253,8 @@ public class KeyValueContainer implements Container {
       } else {
         containerData.setState(ContainerProtos.ContainerLifeCycleState.INVALID);
         throw new StorageContainerException("Container is an Inconsistent " +
-            "state, missing required files(.container, .chksm)",
-            INVALID_CONTAINER_STATE);
+            "state, missing required files(.container, .chksm). ContainerID: " +
+            containerId, INVALID_CONTAINER_STATE);
       }
     } catch (StorageContainerException ex) {
       throw ex;
@@ -303,7 +305,7 @@ public class KeyValueContainer implements Container {
   @Override
   public void delete(boolean forceDelete)
       throws StorageContainerException {
-    long containerId = containerData.getContainerId();
+    long containerId = containerData.getContainerID();
     try {
       KeyValueContainerUtil.removeContainer(containerData, config, forceDelete);
     } catch (StorageContainerException ex) {
@@ -326,11 +328,11 @@ public class KeyValueContainer implements Container {
     // complete this action
     try {
       writeLock();
-      long containerId = containerData.getContainerId();
+      long containerId = containerData.getContainerID();
       if(!containerData.isValid()) {
         LOG.debug("Invalid container data. Container Id: {}", containerId);
-        throw new StorageContainerException("Invalid container data. Name : " +
-            containerId, INVALID_CONTAINER_STATE);
+        throw new StorageContainerException("Invalid container data. " +
+            "ContainerID: " + containerId, INVALID_CONTAINER_STATE);
       }
       containerData.closeContainer();
       File containerFile = getContainerFile();
@@ -380,16 +382,16 @@ public class KeyValueContainer implements Container {
     // TODO: Now, when writing the updated data to .container file, we are
     // holding lock and writing data to disk. We can have async implementation
     // to flush the update container data to disk.
-    long containerId = containerData.getContainerId();
+    long containerId = containerData.getContainerID();
     if(!containerData.isValid()) {
-      LOG.debug("Invalid container data. ID: {}", containerId);
+      LOG.debug("Invalid container data. ContainerID: {}", containerId);
       throw new StorageContainerException("Invalid container data. " +
-          "Container Name : " + containerId, INVALID_CONTAINER_STATE);
+          "ContainerID: " + containerId, INVALID_CONTAINER_STATE);
     }
     if (!forceUpdate && !containerData.isOpen()) {
       throw new StorageContainerException(
-          "Updating a closed container is not allowed. ID: " + containerId,
-          UNSUPPORTED_REQUEST);
+          "Updating a closed container without force option is not allowed. " +
+              "ContainerID: " + containerId, UNSUPPORTED_REQUEST);
     }
     try {
       for (Map.Entry<String, String> entry : metadata.entrySet()) {
@@ -482,7 +484,7 @@ public class KeyValueContainer implements Container {
    */
   private File getContainerFile() {
     return new File(containerData.getMetadataPath(), containerData
-        .getContainerId() + OzoneConsts.CONTAINER_EXTENSION);
+        .getContainerID() + OzoneConsts.CONTAINER_EXTENSION);
   }
 
   /**
@@ -491,7 +493,7 @@ public class KeyValueContainer implements Container {
    */
   private File getContainerCheckSumFile() {
     return new File(containerData.getMetadataPath(), containerData
-        .getContainerId() + OzoneConsts.CONTAINER_FILE_CHECKSUM_EXTENSION);
+        .getContainerID() + OzoneConsts.CONTAINER_FILE_CHECKSUM_EXTENSION);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
index ed2c6af..d9ae38a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
@@ -18,16 +18,29 @@
 
 package org.apache.hadoop.ozone.container.keyvalue;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.yaml.snakeyaml.nodes.Tag;
 
 
 import java.io.File;
+import java.io.IOException;
 import java.util.List;
 import java.util.Map;
 
+import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH;
+import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE;
+import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ID;
+import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_TYPE;
+import static org.apache.hadoop.ozone.OzoneConsts.LAYOUTVERSION;
+import static org.apache.hadoop.ozone.OzoneConsts.MAX_SIZE_GB;
+import static org.apache.hadoop.ozone.OzoneConsts.METADATA;
+import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH;
+import static org.apache.hadoop.ozone.OzoneConsts.STATE;
+
 /**
  * This class represents the KeyValueContainer metadata, which is the
  * in-memory representation of container metadata and is represented on disk
@@ -36,12 +49,20 @@ import java.util.Map;
 public class KeyValueContainerData extends ContainerData {
 
   // Yaml Tag used for KeyValueContainerData.
-  public static final Tag YAML_TAG = new Tag("KeyValueContainerData");
+  public static final Tag KEYVALUE_YAML_TAG = new Tag("KeyValueContainerData");
 
   // Fields need to be stored in .container file.
-  private static final List<String> YAML_FIELDS = Lists.newArrayList(
-      "containerType", "containerId", "layOutVersion", "state", "metadata",
-      "metadataPath", "chunksPath", "containerDBType", "maxSizeGB");
+  private static final List<String> YAML_FIELDS =
+      Lists.newArrayList(
+          CONTAINER_TYPE,
+          CONTAINER_ID,
+          LAYOUTVERSION,
+          STATE,
+          METADATA,
+          METADATA_PATH,
+          CHUNKS_PATH,
+          CONTAINER_DB_TYPE,
+          MAX_SIZE_GB);
 
   // Path to Container metadata Level DB/RocksDB Store and .container file.
   private String metadataPath;
@@ -96,11 +117,11 @@ public class KeyValueContainerData extends ContainerData {
   public File getDbFile() {
     return dbFile;
   }
+
   /**
    * Returns container metadata path.
-   *
-   * @return - path
    */
+  @Override
   public String getMetadataPath() {
     return metadataPath;
   }
@@ -123,6 +144,14 @@ public class KeyValueContainerData extends ContainerData {
   }
 
   /**
+   * Returns container chunks path.
+   */
+  @Override
+  public String getDataPath() {
+    return chunksPath;
+  }
+
+  /**
    * Set chunks Path.
    * @param chunkPath - File path.
    */
@@ -181,7 +210,7 @@ public class KeyValueContainerData extends ContainerData {
   public ContainerProtos.ContainerData getProtoBufMessage() {
     ContainerProtos.ContainerData.Builder builder = ContainerProtos
         .ContainerData.newBuilder();
-    builder.setContainerID(this.getContainerId());
+    builder.setContainerID(this.getContainerID());
     builder.setDbPath(this.getDbFile().getPath());
     builder.setContainerPath(this.getMetadataPath());
     builder.setState(this.getState());
@@ -211,4 +240,41 @@ public class KeyValueContainerData extends ContainerData {
   public static List<String> getYamlFields() {
     return YAML_FIELDS;
   }
+
+  /**
+   * Constructs a KeyValueContainerData object from ProtoBuf classes.
+   *
+   * @param protoData - ProtoBuf Message
+   * @throws IOException
+   */
+  @VisibleForTesting
+  public static KeyValueContainerData getFromProtoBuf(
+      ContainerProtos.ContainerData protoData) throws IOException {
+    // TODO: Add containerMaxSize to ContainerProtos.ContainerData
+    KeyValueContainerData data = new KeyValueContainerData(
+        protoData.getContainerID(),
+        ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT);
+    for (int x = 0; x < protoData.getMetadataCount(); x++) {
+      data.addMetadata(protoData.getMetadata(x).getKey(),
+          protoData.getMetadata(x).getValue());
+    }
+
+    if (protoData.hasContainerPath()) {
+      data.setContainerPath(protoData.getContainerPath());
+    }
+
+    if (protoData.hasState()) {
+      data.setState(protoData.getState());
+    }
+
+    if (protoData.hasBytesUsed()) {
+      data.setBytesUsed(protoData.getBytesUsed());
+    }
+
+    if(protoData.hasContainerDBType()) {
+      data.setContainerDBType(protoData.getContainerDBType());
+    }
+
+    return data;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 8166a83..b2c82f0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -288,7 +288,7 @@ public class KeyValueHandler extends Handler {
             DELETE_ON_OPEN_CONTAINER);
       } else {
         containerSet.removeContainer(
-            kvContainer.getContainerData().getContainerId());
+            kvContainer.getContainerData().getContainerID());
         // Release the lock first.
         // Avoid holding write locks for disk operations
         kvContainer.writeUnlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 3529af8..62e328e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
@@ -33,8 +33,8 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl;
 import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats;
 import org.apache.hadoop.util.Time;
@@ -229,17 +229,16 @@ public final class ChunkUtils {
    * Validates chunk data and returns a file object to Chunk File that we are
    * expected to write data to.
    *
-   * @param data - container data.
+   * @param chunkFile - chunkFile to write data into.
    * @param info - chunk info.
-   * @return File
+   * @return boolean isOverwrite
    * @throws StorageContainerException
    */
-  public static File validateChunk(KeyValueContainerData data, ChunkInfo info)
-      throws StorageContainerException {
+  public static boolean validateChunkForOverwrite(File chunkFile,
+      ChunkInfo info) throws StorageContainerException {
 
     Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class);
 
-    File chunkFile = getChunkFile(data, info);
     if (isOverWriteRequested(chunkFile, info)) {
       if (!isOverWritePermitted(info)) {
         log.error("Rejecting write chunk request. Chunk overwrite " +
@@ -248,8 +247,9 @@ public final class ChunkUtils {
             "OverWrite flag required." + info.toString(),
             OVERWRITE_FLAG_REQUIRED);
       }
+      return true;
     }
-    return chunkFile;
+    return false;
   }
 
   /**
@@ -340,8 +340,8 @@ public final class ChunkUtils {
   public static ContainerCommandResponseProto getReadChunkResponse(
       ContainerCommandRequestProto msg, byte[] data, ChunkInfo info) {
     Preconditions.checkNotNull(msg);
-    Preconditions.checkNotNull("Chunk data is null", data);
-    Preconditions.checkNotNull("Chunk Info is null", info);
+    Preconditions.checkNotNull(data, "Chunk data is null");
+    Preconditions.checkNotNull(info, "Chunk Info is null");
 
     ReadChunkResponseProto.Builder response =
         ReadChunkResponseProto.newBuilder();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java
index 714f445..5845fae 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java
@@ -57,24 +57,25 @@ public final class KeyUtils {
    * add into cache. This function is called with containerManager
    * ReadLock held.
    *
-   * @param container container.
+   * @param containerData containerData.
    * @param conf configuration.
    * @return MetadataStore handle.
    * @throws StorageContainerException
    */
-  public static MetadataStore getDB(KeyValueContainerData container,
+  public static MetadataStore getDB(KeyValueContainerData containerData,
                                     Configuration conf) throws
       StorageContainerException {
-    Preconditions.checkNotNull(container);
+    Preconditions.checkNotNull(containerData);
     ContainerCache cache = ContainerCache.getInstance(conf);
     Preconditions.checkNotNull(cache);
-    Preconditions.checkNotNull(container.getDbFile());
+    Preconditions.checkNotNull(containerData.getDbFile());
     try {
-      return cache.getDB(container.getContainerId(), container
-          .getContainerDBType(), container.getDbFile().getAbsolutePath());
+      return cache.getDB(containerData.getContainerID(), containerData
+          .getContainerDBType(), containerData.getDbFile().getAbsolutePath());
     } catch (IOException ex) {
-      String message = String.format("Unable to open DB Path: " +
-          "%s. ex: %s", container.getDbFile(), ex.getMessage());
+      String message = String.format("Error opening DB. Container:%s " +
+          "ContainerPath:%s", containerData.getContainerID(), containerData
+          .getDbFile().getPath());
       throw new StorageContainerException(message, UNABLE_TO_READ_METADATA_DB);
     }
   }
@@ -89,7 +90,7 @@ public final class KeyUtils {
     Preconditions.checkNotNull(container);
     ContainerCache cache = ContainerCache.getInstance(conf);
     Preconditions.checkNotNull(cache);
-    cache.removeDB(container.getContainerId());
+    cache.removeDB(container.getContainerID());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index 4c17dce..3c3c9cb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.utils.MetadataKeyFilters;
 import org.apache.hadoop.utils.MetadataStore;
@@ -68,19 +69,6 @@ public final class KeyValueContainerUtil {
   private static final Logger LOG = LoggerFactory.getLogger(
       KeyValueContainerUtil.class);
 
-
-  public static void verifyIsNewContainer(File containerFile) throws
-      FileAlreadyExistsException {
-    Preconditions.checkNotNull(containerFile, "containerFile Should not be " +
-        "null");
-    if (containerFile.getParentFile().exists()) {
-      LOG.error("container already exists on disk. File: {}", containerFile
-          .toPath());
-      throw new FileAlreadyExistsException("container already exists on " +
-            "disk.");
-    }
-  }
-
   /**
    * creates metadata path, chunks path and  metadata DB for the specified
    * container.
@@ -271,7 +259,7 @@ public final class KeyValueContainerUtil {
     Preconditions.checkNotNull(dbFile, "dbFile cannot be null");
     Preconditions.checkNotNull(config, "ozone config cannot be null");
 
-    long containerId = containerData.getContainerId();
+    long containerId = containerData.getContainerID();
     String containerName = String.valueOf(containerId);
     File metadataPath = new File(containerData.getMetadataPath());
 
@@ -282,7 +270,7 @@ public final class KeyValueContainerUtil {
 
     // Verify Checksum
     String checksum = KeyValueContainerUtil.computeCheckSum(
-        containerData.getContainerId(), containerFile);
+        containerData.getContainerID(), containerFile);
     KeyValueContainerUtil.verifyCheckSum(containerId, checksumFile, checksum);
 
     containerData.setDbFile(dbFile);
@@ -305,4 +293,34 @@ public final class KeyValueContainerUtil {
     containerData.setKeyCount(liveKeys.size());
   }
 
+  /**
+   * Returns the path where data or chunks live for a given container.
+   *
+   * @param kvContainerData - KeyValueContainerData
+   * @return - Path to the chunks directory
+   */
+  public static Path getDataDirectory(KeyValueContainerData kvContainerData) {
+
+    String chunksPath = kvContainerData.getChunksPath();
+    Preconditions.checkNotNull(chunksPath);
+
+    return Paths.get(chunksPath);
+  }
+
+  /**
+   * Container metadata directory -- here is where the level DB and
+   * .container file lives.
+   *
+   * @param kvContainerData - KeyValueContainerData
+   * @return Path to the metadata directory
+   */
+  public static Path getMetadataDirectory(
+      KeyValueContainerData kvContainerData) {
+
+    String metadataPath = kvContainerData.getMetadataPath();
+    Preconditions.checkNotNull(metadataPath);
+
+    return Paths.get(metadataPath);
+
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java
index c3160a8..ce317bd 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java
@@ -76,7 +76,10 @@ public class ChunkManagerImpl implements ChunkManager {
       HddsVolume volume = containerData.getVolume();
       VolumeIOStats volumeIOStats = volume.getVolumeIOStats();
 
-      File chunkFile = ChunkUtils.validateChunk(containerData, info);
+      File chunkFile = ChunkUtils.getChunkFile(containerData, info);
+
+      boolean isOverwrite = ChunkUtils.validateChunkForOverwrite(
+          chunkFile, info);
       File tmpChunkFile = getTmpChunkFile(chunkFile, info);
 
       LOG.debug("writing chunk:{} chunk stage:{} chunk file:{} tmp chunk file",
@@ -101,8 +104,9 @@ public class ChunkManagerImpl implements ChunkManager {
       case COMBINED:
         // directly write to the chunk file
         ChunkUtils.writeData(chunkFile, info, data, volumeIOStats);
-        // Increment container stats here, as we directly write to chunk file.
-        containerData.incrBytesUsed(info.getLen());
+        if (!isOverwrite) {
+          containerData.incrBytesUsed(info.getLen());
+        }
         containerData.incrWriteCount();
         containerData.incrWriteBytes(info.getLen());
         break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
new file mode 100644
index 0000000..6aa54d1
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
@@ -0,0 +1,248 @@
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.container.keyvalue.statemachine.background;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.ratis.shaded.com.google.protobuf
+    .InvalidProtocolBufferException;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.scm.container.common.helpers
+    .StorageContainerException;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.utils.BackgroundService;
+import org.apache.hadoop.utils.BackgroundTask;
+import org.apache.hadoop.utils.BackgroundTaskQueue;
+import org.apache.hadoop.utils.BackgroundTaskResult;
+import org.apache.hadoop.utils.BatchOperation;
+import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
+import org.apache.hadoop.utils.MetadataStore;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT;
+
+/**
+ * A per-datanode container block deleting service takes in charge
+ * of deleting staled ozone blocks.
+ */
+// TODO: Fix BlockDeletingService to work with new StorageLayer
+public class BlockDeletingService extends BackgroundService{
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(BlockDeletingService.class);
+
+  ContainerSet containerSet;
+  private final Configuration conf;
+
+  // Throttle number of blocks to delete per task,
+  // set to 1 for testing
+  private final int blockLimitPerTask;
+
+  // Throttle the number of containers to process concurrently at a time,
+  private final int containerLimitPerInterval;
+
+  // Task priority is useful when a to-delete block has weight.
+  private final static int TASK_PRIORITY_DEFAULT = 1;
+  // Core pool size for container tasks
+  private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 10;
+
+  public BlockDeletingService(ContainerSet containerSet,
+      long serviceInterval, long serviceTimeout, Configuration conf) {
+    super("BlockDeletingService", serviceInterval,
+        TimeUnit.MILLISECONDS, BLOCK_DELETING_SERVICE_CORE_POOL_SIZE,
+        serviceTimeout);
+    this.containerSet = containerSet;
+    this.conf = conf;
+    this.blockLimitPerTask = conf.getInt(
+        OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER,
+        OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT);
+    this.containerLimitPerInterval = conf.getInt(
+        OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL,
+        OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT);
+  }
+
+
+  @Override
+  public BackgroundTaskQueue getTasks() {
+    BackgroundTaskQueue queue = new BackgroundTaskQueue();
+    List<ContainerData> containers = Lists.newArrayList();
+    try {
+      // We at most list a number of containers a time,
+      // in case there are too many containers and start too many workers.
+      // We must ensure there is no empty container in this result.
+      // The chosen result depends on what container deletion policy is
+      // configured.
+      containers = containerSet.chooseContainerForBlockDeletion(
+          containerLimitPerInterval);
+      LOG.info("Plan to choose {} containers for block deletion, "
+          + "actually returns {} valid containers.",
+          containerLimitPerInterval, containers.size());
+
+      for(ContainerData container : containers) {
+        BlockDeletingTask containerTask =
+            new BlockDeletingTask(container, TASK_PRIORITY_DEFAULT);
+        queue.add(containerTask);
+      }
+    } catch (StorageContainerException e) {
+      LOG.warn("Failed to initiate block deleting tasks, "
+          + "caused by unable to get containers info. "
+          + "Retry in next interval. ", e);
+    } catch (Exception e) {
+      // In case listContainer call throws any uncaught RuntimeException.
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Unexpected error occurs during deleting blocks.", e);
+      }
+    }
+    return queue;
+  }
+
+  private static class ContainerBackgroundTaskResult
+      implements BackgroundTaskResult {
+    private List<String> deletedBlockIds;
+
+    ContainerBackgroundTaskResult() {
+      deletedBlockIds = new LinkedList<>();
+    }
+
+    public void addBlockId(String blockId) {
+      deletedBlockIds.add(blockId);
+    }
+
+    public void addAll(List<String> blockIds) {
+      deletedBlockIds.addAll(blockIds);
+    }
+
+    public List<String> getDeletedBlocks() {
+      return deletedBlockIds;
+    }
+
+    @Override
+    public int getSize() {
+      return deletedBlockIds.size();
+    }
+  }
+
+  private class BlockDeletingTask
+      implements BackgroundTask<BackgroundTaskResult> {
+
+    private final int priority;
+    private final ContainerData containerData;
+
+    BlockDeletingTask(ContainerData containerName, int priority) {
+      this.priority = priority;
+      this.containerData = containerName;
+    }
+
+    @Override
+    public BackgroundTaskResult call() throws Exception {
+      ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult();
+      long startTime = Time.monotonicNow();
+      // Scan container's db and get list of under deletion blocks
+      MetadataStore meta = KeyUtils.getDB(
+          (KeyValueContainerData) containerData, conf);
+      // # of blocks to delete is throttled
+      KeyPrefixFilter filter =
+          new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX);
+      List<Map.Entry<byte[], byte[]>> toDeleteBlocks =
+          meta.getSequentialRangeKVs(null, blockLimitPerTask, filter);
+      if (toDeleteBlocks.isEmpty()) {
+        LOG.debug("No under deletion block found in container : {}",
+            containerData.getContainerID());
+      }
+
+      List<String> succeedBlocks = new LinkedList<>();
+      LOG.debug("Container : {}, To-Delete blocks : {}",
+          containerData.getContainerID(), toDeleteBlocks.size());
+      File dataDir = new File(containerData.getDataPath());
+      if (!dataDir.exists() || !dataDir.isDirectory()) {
+        LOG.error("Invalid container data dir {} : "
+            + "not exist or not a directory", dataDir.getAbsolutePath());
+        return crr;
+      }
+
+      toDeleteBlocks.forEach(entry -> {
+        String blockName = DFSUtil.bytes2String(entry.getKey());
+        LOG.debug("Deleting block {}", blockName);
+        try {
+          ContainerProtos.KeyData data =
+              ContainerProtos.KeyData.parseFrom(entry.getValue());
+          for (ContainerProtos.ChunkInfo chunkInfo : data.getChunksList()) {
+            File chunkFile = dataDir.toPath()
+                .resolve(chunkInfo.getChunkName()).toFile();
+            if (FileUtils.deleteQuietly(chunkFile)) {
+              LOG.debug("block {} chunk {} deleted", blockName,
+                  chunkFile.getAbsolutePath());
+            }
+          }
+          succeedBlocks.add(blockName);
+        } catch (InvalidProtocolBufferException e) {
+          LOG.error("Failed to parse block info for block {}", blockName, e);
+        }
+      });
+
+      // Once files are deleted... replace deleting entries with deleted entries
+      BatchOperation batch = new BatchOperation();
+      succeedBlocks.forEach(entry -> {
+        String blockId =
+            entry.substring(OzoneConsts.DELETING_KEY_PREFIX.length());
+        String deletedEntry = OzoneConsts.DELETED_KEY_PREFIX + blockId;
+        batch.put(DFSUtil.string2Bytes(deletedEntry),
+            DFSUtil.string2Bytes(blockId));
+        batch.delete(DFSUtil.string2Bytes(entry));
+      });
+      meta.writeBatch(batch);
+      // update count of pending deletion blocks in in-memory container status
+      containerData.decrPendingDeletionBlocks(succeedBlocks.size());
+
+      if (!succeedBlocks.isEmpty()) {
+        LOG.info("Container: {}, deleted blocks: {}, task elapsed time: {}ms",
+            containerData.getContainerID(), succeedBlocks.size(),
+            Time.monotonicNow() - startTime);
+      }
+      crr.addAll(succeedBlocks);
+      return crr;
+    }
+
+    @Override
+    public int getPriority() {
+      return priority;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java
new file mode 100644
index 0000000..69d8042
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.keyvalue.statemachine.background;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 9e25c59..667ea5c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -73,7 +73,7 @@ public class OzoneContainer {
       conf) throws IOException {
     this.dnDetails = datanodeDetails;
     this.config = conf;
-    this.volumeSet = new VolumeSet(datanodeDetails, conf);
+    this.volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf);
     this.containerSet = new ContainerSet();
     boolean useGrpc = this.config.getBoolean(
         ScmConfigKeys.DFS_CONTAINER_GRPC_ENABLED_KEY,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
index 16c4c2a..42db66d 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
@@ -47,7 +47,7 @@ public class TestKeyValueContainerData {
         MAXSIZE);
 
     assertEquals(containerType, kvData.getContainerType());
-    assertEquals(containerId, kvData.getContainerId());
+    assertEquals(containerId, kvData.getContainerID());
     assertEquals(ContainerProtos.ContainerLifeCycleState.OPEN, kvData
         .getState());
     assertEquals(0, kvData.getMetadata().size());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
index 41d8315..eed5606 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
@@ -64,7 +64,7 @@ public class TestContainerDataYaml {
     // Read from .container file, and verify data.
     KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
         .readContainerFile(containerFile);
-    assertEquals(Long.MAX_VALUE, kvData.getContainerId());
+    assertEquals(Long.MAX_VALUE, kvData.getContainerID());
     assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
         .getContainerType());
     assertEquals("RocksDB", kvData.getContainerDBType());
@@ -92,7 +92,7 @@ public class TestContainerDataYaml {
         containerFile);
 
     // verify data.
-    assertEquals(Long.MAX_VALUE, kvData.getContainerId());
+    assertEquals(Long.MAX_VALUE, kvData.getContainerID());
     assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
         .getContainerType());
     assertEquals("RocksDB", kvData.getContainerDBType());
@@ -150,7 +150,7 @@ public class TestContainerDataYaml {
       assertEquals("RocksDB", kvData.getContainerDBType());
       assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
           .getContainerType());
-      assertEquals(9223372036854775807L, kvData.getContainerId());
+      assertEquals(9223372036854775807L, kvData.getContainerID());
       assertEquals("/hdds/current/aed-fg4-hji-jkl/containerdir0/1", kvData
           .getChunksPath());
       assertEquals("/hdds/current/aed-fg4-hji-jkl/containerdir0/1", kvData

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
index 6ec1fe4..ae670e0 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
@@ -74,7 +74,7 @@ public class TestContainerSet {
         .getContainer(containerId);
     KeyValueContainerData keyValueContainerData = (KeyValueContainerData)
         container.getContainerData();
-    assertEquals(containerId, keyValueContainerData.getContainerId());
+    assertEquals(containerId, keyValueContainerData.getContainerID());
     assertEquals(state, keyValueContainerData.getState());
     assertNull(containerSet.getContainer(1000L));
 
@@ -97,7 +97,7 @@ public class TestContainerSet {
     while(containerIterator.hasNext()) {
       Container kv = containerIterator.next();
       ContainerData containerData = kv.getContainerData();
-      long containerId = containerData.getContainerId();
+      long containerId = containerData.getContainerID();
       if (containerId%2 == 0) {
         assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED,
             containerData.getState());
@@ -117,7 +117,7 @@ public class TestContainerSet {
     while (containerMapIterator.hasNext()) {
       Container kv = containerMapIterator.next().getValue();
       ContainerData containerData = kv.getContainerData();
-      long containerId = containerData.getContainerId();
+      long containerId = containerData.getContainerID();
       if (containerId%2 == 0) {
         assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED,
             containerData.getState());
@@ -155,8 +155,8 @@ public class TestContainerSet {
     assertEquals(5, result.size());
 
     for(ContainerData containerData : result) {
-      assertTrue(containerData.getContainerId() >=2 && containerData
-          .getContainerId()<=6);
+      assertTrue(containerData.getContainerID() >=2 && containerData
+          .getContainerID()<=6);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
index 41610af..a45a639 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
@@ -54,11 +54,7 @@ public class TestRoundRobinVolumeChoosingPolicy {
     conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
     policy = ReflectionUtils.newInstance(
         RoundRobinVolumeChoosingPolicy.class, null);
-    DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder()
-        .setUuid(UUID.randomUUID().toString())
-        .setIpAddress(DUMMY_IP_ADDR)
-        .build();
-    VolumeSet volumeSet = new VolumeSet(datanodeDetails, conf);
+    VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
     volumes = volumeSet.getVolumesList();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index 61383de..41f75bd 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -53,11 +53,7 @@ public class TestVolumeSet {
   private static final String DUMMY_IP_ADDR = "0.0.0.0";
 
   private void initializeVolumeSet() throws Exception {
-    DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder()
-        .setUuid(UUID.randomUUID().toString())
-        .setIpAddress(DUMMY_IP_ADDR)
-        .build();
-    volumeSet = new VolumeSet(datanodeDetails, conf);
+    volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
   }
 
   @Rule

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index e55ea57..4f00507 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -245,24 +245,6 @@ public class TestKeyValueContainer {
   }
 
   @Test
-  public void testUpdateContainerInvalidMetadata() throws IOException {
-    try {
-      keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-      Map<String, String> metadata = new HashMap<>();
-      metadata.put("VOLUME", "ozone");
-      keyValueContainer.update(metadata, true);
-      //Trying to update again with same metadata
-      keyValueContainer.update(metadata, true);
-      fail("testUpdateContainerInvalidMetadata failed");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("Container Metadata update " +
-          "error", ex);
-      assertEquals(ContainerProtos.Result.CONTAINER_METADATA_ERROR, ex
-          .getResult());
-    }
-  }
-
-  @Test
   public void testUpdateContainerUnsupportedRequest() throws Exception {
     try {
       keyValueContainerData.setState(ContainerProtos.ContainerLifeCycleState
@@ -275,7 +257,7 @@ public class TestKeyValueContainer {
       fail("testUpdateContainerUnsupportedRequest failed");
     } catch (StorageContainerException ex) {
       GenericTestUtils.assertExceptionContains("Updating a closed container " +
-          "is not allowed", ex);
+          "without force option is not allowed", ex);
       assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, ex
           .getResult());
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index 947ad51..a997145 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.container.keyvalue;
 
+import com.google.common.base.Supplier;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -30,6 +31,7 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -71,12 +73,7 @@ public class TestKeyValueHandler {
     conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, volume);
 
     this.containerSet = new ContainerSet();
-    DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder()
-        .setUuid(DATANODE_UUID)
-        .setHostName("localhost")
-        .setIpAddress("127.0.0.1")
-        .build();
-    this.volumeSet = new VolumeSet(datanodeDetails, conf);
+    this.volumeSet = new VolumeSet(DATANODE_UUID, conf);
 
     this.dispatcher = new HddsDispatcher(conf, containerSet, volumeSet);
     this.handler = (KeyValueHandler) dispatcher.getHandler(
@@ -246,7 +243,7 @@ public class TestKeyValueHandler {
 
     // Verify that new container is added to containerSet.
     Container container = containerSet.getContainer(contId);
-    Assert.assertEquals(contId, container.getContainerData().getContainerId());
+    Assert.assertEquals(contId, container.getContainerData().getContainerID());
     Assert.assertEquals(ContainerProtos.ContainerLifeCycleState.OPEN,
         container.getContainerState());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 26e1c77..27c6528 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -62,7 +62,7 @@ public class TestOzoneContainer {
     conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.getRoot()
         .getAbsolutePath() + "," + folder.newFolder().getAbsolutePath());
     conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, folder.newFolder().getAbsolutePath());
-    volumeSet = new VolumeSet(datanodeDetails, conf);
+    volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf);
     volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
 
     for (int i=0; i<10; i++) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
index 7c12945..a87f655 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.ozone.container.testutils;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
-import org.apache.hadoop.ozone.container.common.statemachine.background
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import org.apache.hadoop.ozone.container.keyvalue.statemachine.background
     .BlockDeletingService;
 
 import java.util.concurrent.CountDownLatch;
@@ -42,10 +42,9 @@ public class BlockDeletingServiceTestImpl
   private Thread testingThread;
   private AtomicInteger numOfProcessed = new AtomicInteger(0);
 
-  public BlockDeletingServiceTestImpl(ContainerManager containerManager,
+  public BlockDeletingServiceTestImpl(ContainerSet containerSet,
       int serviceInterval, Configuration conf) {
-    super(containerManager, serviceInterval, SERVICE_TIMEOUT_IN_MILLISECONDS,
-        TimeUnit.MILLISECONDS, conf);
+    super(containerSet, serviceInterval, SERVICE_TIMEOUT_IN_MILLISECONDS, conf);
   }
 
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/test/resources/additionalfields.container
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/resources/additionalfields.container b/hadoop-hdds/container-service/src/test/resources/additionalfields.container
index 9027538..38c104a 100644
--- a/hadoop-hdds/container-service/src/test/resources/additionalfields.container
+++ b/hadoop-hdds/container-service/src/test/resources/additionalfields.container
@@ -1,7 +1,7 @@
 !<KeyValueContainerData>
 containerDBType: RocksDB
 chunksPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1
-containerId: 9223372036854775807
+containerID: 9223372036854775807
 containerType: KeyValueContainer
 metadataPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1
 layOutVersion: 1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-hdds/container-service/src/test/resources/incorrect.container
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/resources/incorrect.container b/hadoop-hdds/container-service/src/test/resources/incorrect.container
index 6848484..abbb6aa 100644
--- a/hadoop-hdds/container-service/src/test/resources/incorrect.container
+++ b/hadoop-hdds/container-service/src/test/resources/incorrect.container
@@ -1,7 +1,7 @@
 !<KeyValueContainerData>
 containerDBType: RocksDB
 chunksPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1
-containerId: 9223372036854775807
+containerID: 9223372036854775807
 containerType: KeyValueContainer
 metadataPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1
 layOutVersion: 1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2351e8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index b720549..d25b73e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -58,6 +58,8 @@ public final class ContainerTestHelper {
       ContainerTestHelper.class);
   private static Random r = new Random();
 
+  public static final int CONTAINER_MAX_SIZE_GB = 1;
+
   /**
    * Never constructed.
    */


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org