You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by su...@apache.org on 2018/10/05 12:56:31 UTC

[36/51] [partial] hadoop git commit: HADOOP-15791. Remove Ozone related sources from the 3.2 branch. Contributed by Elek, Marton.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
deleted file mode 100644
index c63eb73..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.utils;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.collections.MapIterator;
-import org.apache.commons.collections.map.LRUMap;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.utils.MetadataStore;
-import org.apache.hadoop.utils.MetadataStoreBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-/**
- * container cache is a LRUMap that maintains the DB handles.
- */
-public final class ContainerCache extends LRUMap {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerCache.class);
-  private final Lock lock = new ReentrantLock();
-  private static ContainerCache cache;
-  private static final float LOAD_FACTOR = 0.75f;
-  /**
-   * Constructs a cache that holds DBHandle references.
-   */
-  private ContainerCache(int maxSize, float loadFactor, boolean
-      scanUntilRemovable) {
-    super(maxSize, loadFactor, scanUntilRemovable);
-  }
-
-  /**
-   * Return a singleton instance of {@link ContainerCache}
-   * that holds the DB handlers.
-   *
-   * @param conf - Configuration.
-   * @return A instance of {@link ContainerCache}.
-   */
-  public synchronized static ContainerCache getInstance(Configuration conf) {
-    if (cache == null) {
-      int cacheSize = conf.getInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE,
-          OzoneConfigKeys.OZONE_CONTAINER_CACHE_DEFAULT);
-      cache = new ContainerCache(cacheSize, LOAD_FACTOR, true);
-    }
-    return cache;
-  }
-
-  /**
-   * Closes a db instance.
-   *
-   * @param containerID - ID of the container to be closed.
-   * @param db - db instance to close.
-   */
-  private void closeDB(long containerID, MetadataStore db) {
-    if (db != null) {
-      try {
-        db.close();
-      } catch (IOException e) {
-        LOG.error("Error closing DB. Container: " + containerID, e);
-      }
-    }
-  }
-
-  /**
-   * Closes all the db instances and resets the cache.
-   */
-  public void shutdownCache() {
-    lock.lock();
-    try {
-      // iterate the cache and close each db
-      MapIterator iterator = cache.mapIterator();
-      while (iterator.hasNext()) {
-        iterator.next();
-        MetadataStore db = (MetadataStore) iterator.getValue();
-        closeDB(((Number)iterator.getKey()).longValue(), db);
-      }
-      // reset the cache
-      cache.clear();
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected boolean removeLRU(LinkEntry entry) {
-    lock.lock();
-    try {
-      MetadataStore db = (MetadataStore) entry.getValue();
-      closeDB(((Number)entry.getKey()).longValue(), db);
-    } finally {
-      lock.unlock();
-    }
-    return true;
-  }
-
-  /**
-   * Returns a DB handle if available, create the handler otherwise.
-   *
-   * @param containerID - ID of the container.
-   * @param containerDBType - DB type of the container.
-   * @param containerDBPath - DB path of the container.
-   * @return MetadataStore.
-   */
-  public MetadataStore getDB(long containerID, String containerDBType, String
-                             containerDBPath)
-      throws IOException {
-    Preconditions.checkState(containerID >= 0,
-        "Container ID cannot be negative.");
-    lock.lock();
-    try {
-      MetadataStore db = (MetadataStore) this.get(containerID);
-
-      if (db == null) {
-        db = MetadataStoreBuilder.newBuilder()
-            .setDbFile(new File(containerDBPath))
-            .setCreateIfMissing(false)
-            .setDBType(containerDBType)
-            .build();
-        this.put(containerID, db);
-      }
-      return db;
-    } catch (Exception e) {
-      LOG.error("Error opening DB. Container:{} ContainerPath:{}",
-          containerID, containerDBPath, e);
-      throw e;
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * Remove a DB handler from cache.
-   *
-   * @param containerID - ID of the container.
-   */
-  public void removeDB(long containerID) {
-    Preconditions.checkState(containerID >= 0,
-        "Container ID cannot be negative.");
-    lock.lock();
-    try {
-      MetadataStore db = (MetadataStore)this.get(containerID);
-      closeDB(containerID, db);
-      this.remove(containerID);
-    } finally {
-      lock.unlock();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
deleted file mode 100644
index bc0bd05..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.utils;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
-import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Properties;
-import java.util.UUID;
-
-/**
- * A util class for {@link HddsVolume}.
- */
-public final class HddsVolumeUtil {
-
-  // Private constructor for Utility class. Unused.
-  private HddsVolumeUtil() {
-  }
-
-  private static final String VERSION_FILE   = "VERSION";
-  private static final String STORAGE_ID_PREFIX = "DS-";
-
-  public static File getVersionFile(File rootDir) {
-    return new File(rootDir, VERSION_FILE);
-  }
-
-  public static String generateUuid() {
-    return STORAGE_ID_PREFIX + UUID.randomUUID();
-  }
-
-  /**
-   * Get hddsRoot from volume root. If volumeRoot points to hddsRoot, it is
-   * returned as is.
-   * For a volumeRoot /data/disk1, the hddsRoot is /data/disk1/hdds.
-   * @param volumeRoot root of the volume.
-   * @return hddsRoot of the volume.
-   */
-  public static String getHddsRoot(String volumeRoot) {
-    if (volumeRoot.endsWith(HddsVolume.HDDS_VOLUME_DIR)) {
-      return volumeRoot;
-    } else {
-      File hddsRoot = new File(volumeRoot, HddsVolume.HDDS_VOLUME_DIR);
-      return hddsRoot.getPath();
-    }
-  }
-
-  /**
-   * Returns storageID if it is valid. Throws an exception otherwise.
-   */
-  @VisibleForTesting
-  public static String getStorageID(Properties props, File versionFile)
-      throws InconsistentStorageStateException {
-    return getProperty(props, OzoneConsts.STORAGE_ID, versionFile);
-  }
-
-  /**
-   * Returns clusterID if it is valid. It should match the clusterID from the
-   * Datanode. Throws an exception otherwise.
-   */
-  @VisibleForTesting
-  public static String getClusterID(Properties props, File versionFile,
-      String clusterID) throws InconsistentStorageStateException {
-    String cid = getProperty(props, OzoneConsts.CLUSTER_ID, versionFile);
-
-    if (clusterID == null) {
-      return cid;
-    }
-    if (!clusterID.equals(cid)) {
-      throw new InconsistentStorageStateException("Mismatched " +
-          "ClusterIDs. Version File : " + versionFile + " has clusterID: " +
-          cid + " and Datanode has clusterID: " + clusterID);
-    }
-    return cid;
-  }
-
-  /**
-   * Returns datanodeUuid if it is valid. It should match the UUID of the
-   * Datanode. Throws an exception otherwise.
-   */
-  @VisibleForTesting
-  public static String getDatanodeUUID(Properties props, File versionFile,
-      String datanodeUuid)
-      throws InconsistentStorageStateException {
-    String datanodeID = getProperty(props, OzoneConsts.DATANODE_UUID,
-        versionFile);
-
-    if (datanodeUuid != null && !datanodeUuid.equals(datanodeID)) {
-      throw new InconsistentStorageStateException("Mismatched " +
-          "DatanodeUUIDs. Version File : " + versionFile + " has datanodeUuid: "
-          + datanodeID + " and Datanode has datanodeUuid: " + datanodeUuid);
-    }
-    return datanodeID;
-  }
-
-  /**
-   * Returns creationTime if it is valid. Throws an exception otherwise.
-   */
-  @VisibleForTesting
-  public static long getCreationTime(Properties props, File versionFile)
-      throws InconsistentStorageStateException {
-    String cTimeStr = getProperty(props, OzoneConsts.CTIME, versionFile);
-
-    long cTime = Long.parseLong(cTimeStr);
-    long currentTime = Time.now();
-    if (cTime > currentTime || cTime < 0) {
-      throw new InconsistentStorageStateException("Invalid Creation time in " +
-          "Version File : " + versionFile + " - " + cTime + ". Current system" +
-          " time is " + currentTime);
-    }
-    return cTime;
-  }
-
-  /**
-   * Returns layOutVersion if it is valid. Throws an exception otherwise.
-   */
-  @VisibleForTesting
-  public static int getLayOutVersion(Properties props, File versionFile) throws
-      InconsistentStorageStateException {
-    String lvStr = getProperty(props, OzoneConsts.LAYOUTVERSION, versionFile);
-
-    int lv = Integer.parseInt(lvStr);
-    if(DataNodeLayoutVersion.getLatestVersion().getVersion() != lv) {
-      throw new InconsistentStorageStateException("Invalid layOutVersion. " +
-          "Version file has layOutVersion as " + lv + " and latest Datanode " +
-          "layOutVersion is " +
-          DataNodeLayoutVersion.getLatestVersion().getVersion());
-    }
-    return lv;
-  }
-
-  private static String getProperty(Properties props, String propName, File
-      versionFile)
-      throws InconsistentStorageStateException {
-    String value = props.getProperty(propName);
-    if (StringUtils.isBlank(value)) {
-      throw new InconsistentStorageStateException("Invalid " + propName +
-          ". Version File : " + versionFile + " has null or empty " + propName);
-    }
-    return value;
-  }
-
-  /**
-   * Check Volume is consistent state or not.
-   * @param hddsVolume
-   * @param scmId
-   * @param clusterId
-   * @param logger
-   * @return true - if volume is in consistent state, otherwise false.
-   */
-  public static boolean checkVolume(HddsVolume hddsVolume, String scmId, String
-      clusterId, Logger logger) {
-    File hddsRoot = hddsVolume.getHddsRootDir();
-    String volumeRoot = hddsRoot.getPath();
-    File scmDir = new File(hddsRoot, scmId);
-
-    try {
-      hddsVolume.format(clusterId);
-    } catch (IOException ex) {
-      logger.error("Error during formatting volume {}, exception is {}",
-          volumeRoot, ex);
-      return false;
-    }
-
-    File[] hddsFiles = hddsRoot.listFiles();
-
-    if(hddsFiles == null) {
-      // This is the case for IOException, where listFiles returns null.
-      // So, we fail the volume.
-      return false;
-    } else if (hddsFiles.length == 1) {
-      // DN started for first time or this is a newly added volume.
-      // So we create scm directory.
-      if (!scmDir.mkdir()) {
-        logger.error("Unable to create scmDir {}", scmDir);
-        return false;
-      }
-      return true;
-    } else if(hddsFiles.length == 2) {
-      // The files should be Version and SCM directory
-      if (scmDir.exists()) {
-        return true;
-      } else {
-        logger.error("Volume {} is in Inconsistent state, expected scm " +
-                "directory {} does not exist", volumeRoot, scmDir
-            .getAbsolutePath());
-        return false;
-      }
-    } else {
-      // The hdds root dir should always have 2 files. One is Version file
-      // and other is SCM directory.
-      return false;
-    }
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java
deleted file mode 100644
index 08264f0..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.utils;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
deleted file mode 100644
index 6b90146..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ /dev/null
@@ -1,408 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.GetSpaceUsed;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
-import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
-import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion;
-import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
-import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
-import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Properties;
-import java.util.UUID;
-
-/**
- * HddsVolume represents volume in a datanode. {@link VolumeSet} maitains a
- * list of HddsVolumes, one for each volume in the Datanode.
- * {@link VolumeInfo} in encompassed by this class.
- *
- * The disk layout per volume is as follows:
- * ../hdds/VERSION
- * ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID>>/metadata
- * ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID>>/<<dataDir>>
- *
- * Each hdds volume has its own VERSION file. The hdds volume will have one
- * scmUuid directory for each SCM it is a part of (currently only one SCM is
- * supported).
- *
- * During DN startup, if the VERSION file exists, we verify that the
- * clusterID in the version file matches the clusterID from SCM.
- */
-public final class HddsVolume {
-
-  private static final Logger LOG = LoggerFactory.getLogger(HddsVolume.class);
-
-  public static final String HDDS_VOLUME_DIR = "hdds";
-
-  private final File hddsRootDir;
-  private final VolumeInfo volumeInfo;
-  private VolumeState state;
-  private final VolumeIOStats volumeIOStats;
-
-  // VERSION file properties
-  private String storageID;       // id of the file system
-  private String clusterID;       // id of the cluster
-  private String datanodeUuid;    // id of the DataNode
-  private long cTime;             // creation time of the file system state
-  private int layoutVersion;      // layout version of the storage data
-
-  /**
-   * Builder for HddsVolume.
-   */
-  public static class Builder {
-    private final String volumeRootStr;
-    private Configuration conf;
-    private StorageType storageType;
-    private long configuredCapacity;
-
-    private String datanodeUuid;
-    private String clusterID;
-    private boolean failedVolume = false;
-
-    public Builder(String rootDirStr) {
-      this.volumeRootStr = rootDirStr;
-    }
-
-    public Builder conf(Configuration config) {
-      this.conf = config;
-      return this;
-    }
-
-    public Builder storageType(StorageType st) {
-      this.storageType = st;
-      return this;
-    }
-
-    public Builder configuredCapacity(long capacity) {
-      this.configuredCapacity = capacity;
-      return this;
-    }
-
-    public Builder datanodeUuid(String datanodeUUID) {
-      this.datanodeUuid = datanodeUUID;
-      return this;
-    }
-
-    public Builder clusterID(String cid) {
-      this.clusterID = cid;
-      return this;
-    }
-
-    // This is added just to create failed volume objects, which will be used
-    // to create failed HddsVolume objects in the case of any exceptions caused
-    // during creating HddsVolume object.
-    public Builder failedVolume(boolean failed) {
-      this.failedVolume = failed;
-      return this;
-    }
-
-    public HddsVolume build() throws IOException {
-      return new HddsVolume(this);
-    }
-  }
-
-  private HddsVolume(Builder b) throws IOException {
-    if (!b.failedVolume) {
-      StorageLocation location = StorageLocation.parse(b.volumeRootStr);
-      hddsRootDir = new File(location.getUri().getPath(), HDDS_VOLUME_DIR);
-      this.state = VolumeState.NOT_INITIALIZED;
-      this.clusterID = b.clusterID;
-      this.datanodeUuid = b.datanodeUuid;
-      this.volumeIOStats = new VolumeIOStats();
-
-      VolumeInfo.Builder volumeBuilder =
-          new VolumeInfo.Builder(b.volumeRootStr, b.conf)
-              .storageType(b.storageType)
-              .configuredCapacity(b.configuredCapacity);
-      this.volumeInfo = volumeBuilder.build();
-
-      LOG.info("Creating Volume: " + this.hddsRootDir + " of  storage type : " +
-          b.storageType + " and capacity : " + volumeInfo.getCapacity());
-
-      initialize();
-    } else {
-      // Builder is called with failedVolume set, so create a failed volume
-      // HddsVolumeObject.
-      hddsRootDir = new File(b.volumeRootStr);
-      volumeIOStats = null;
-      volumeInfo = null;
-      storageID = UUID.randomUUID().toString();
-      state = VolumeState.FAILED;
-    }
-  }
-
-  public VolumeInfo getVolumeInfo() {
-    return volumeInfo;
-  }
-
-  /**
-   * Initializes the volume.
-   * Creates the Version file if not present,
-   * otherwise returns with IOException.
-   * @throws IOException
-   */
-  private void initialize() throws IOException {
-    VolumeState intialVolumeState = analyzeVolumeState();
-    switch (intialVolumeState) {
-    case NON_EXISTENT:
-      // Root directory does not exist. Create it.
-      if (!hddsRootDir.mkdir()) {
-        throw new IOException("Cannot create directory " + hddsRootDir);
-      }
-      setState(VolumeState.NOT_FORMATTED);
-      createVersionFile();
-      break;
-    case NOT_FORMATTED:
-      // Version File does not exist. Create it.
-      createVersionFile();
-      break;
-    case NOT_INITIALIZED:
-      // Version File exists. Verify its correctness and update property fields.
-      readVersionFile();
-      setState(VolumeState.NORMAL);
-      break;
-    case INCONSISTENT:
-      // Volume Root is in an inconsistent state. Skip loading this volume.
-      throw new IOException("Volume is in an " + VolumeState.INCONSISTENT +
-          " state. Skipped loading volume: " + hddsRootDir.getPath());
-    default:
-      throw new IOException("Unrecognized initial state : " +
-          intialVolumeState + "of volume : " + hddsRootDir);
-    }
-  }
-
-  private VolumeState analyzeVolumeState() {
-    if (!hddsRootDir.exists()) {
-      // Volume Root does not exist.
-      return VolumeState.NON_EXISTENT;
-    }
-    if (!hddsRootDir.isDirectory()) {
-      // Volume Root exists but is not a directory.
-      return VolumeState.INCONSISTENT;
-    }
-    File[] files = hddsRootDir.listFiles();
-    if (files == null || files.length == 0) {
-      // Volume Root exists and is empty.
-      return VolumeState.NOT_FORMATTED;
-    }
-    if (!getVersionFile().exists()) {
-      // Volume Root is non empty but VERSION file does not exist.
-      return VolumeState.INCONSISTENT;
-    }
-    // Volume Root and VERSION file exist.
-    return VolumeState.NOT_INITIALIZED;
-  }
-
-  public void format(String cid) throws IOException {
-    Preconditions.checkNotNull(cid, "clusterID cannot be null while " +
-        "formatting Volume");
-    this.clusterID = cid;
-    initialize();
-  }
-
-  /**
-   * Create Version File and write property fields into it.
-   * @throws IOException
-   */
-  private void createVersionFile() throws IOException {
-    this.storageID = HddsVolumeUtil.generateUuid();
-    this.cTime = Time.now();
-    this.layoutVersion = ChunkLayOutVersion.getLatestVersion().getVersion();
-
-    if (this.clusterID == null || datanodeUuid == null) {
-      // HddsDatanodeService does not have the cluster information yet. Wait
-      // for registration with SCM.
-      LOG.debug("ClusterID not available. Cannot format the volume {}",
-          this.hddsRootDir.getPath());
-      setState(VolumeState.NOT_FORMATTED);
-    } else {
-      // Write the version file to disk.
-      writeVersionFile();
-      setState(VolumeState.NORMAL);
-    }
-  }
-
-  private void writeVersionFile() throws IOException {
-    Preconditions.checkNotNull(this.storageID,
-        "StorageID cannot be null in Version File");
-    Preconditions.checkNotNull(this.clusterID,
-        "ClusterID cannot be null in Version File");
-    Preconditions.checkNotNull(this.datanodeUuid,
-        "DatanodeUUID cannot be null in Version File");
-    Preconditions.checkArgument(this.cTime > 0,
-        "Creation Time should be positive");
-    Preconditions.checkArgument(this.layoutVersion ==
-            DataNodeLayoutVersion.getLatestVersion().getVersion(),
-        "Version File should have the latest LayOutVersion");
-
-    File versionFile = getVersionFile();
-    LOG.debug("Writing Version file to disk, {}", versionFile);
-
-    DatanodeVersionFile dnVersionFile = new DatanodeVersionFile(this.storageID,
-        this.clusterID, this.datanodeUuid, this.cTime, this.layoutVersion);
-    dnVersionFile.createVersionFile(versionFile);
-  }
-
-  /**
-   * Read Version File and update property fields.
-   * Get common storage fields.
-   * Should be overloaded if additional fields need to be read.
-   *
-   * @throws IOException on error
-   */
-  private void readVersionFile() throws IOException {
-    File versionFile = getVersionFile();
-    Properties props = DatanodeVersionFile.readFrom(versionFile);
-    if (props.isEmpty()) {
-      throw new InconsistentStorageStateException(
-          "Version file " + versionFile + " is missing");
-    }
-
-    LOG.debug("Reading Version file from disk, {}", versionFile);
-    this.storageID = HddsVolumeUtil.getStorageID(props, versionFile);
-    this.clusterID = HddsVolumeUtil.getClusterID(props, versionFile,
-        this.clusterID);
-    this.datanodeUuid = HddsVolumeUtil.getDatanodeUUID(props, versionFile,
-        this.datanodeUuid);
-    this.cTime = HddsVolumeUtil.getCreationTime(props, versionFile);
-    this.layoutVersion = HddsVolumeUtil.getLayOutVersion(props, versionFile);
-  }
-
-  private File getVersionFile() {
-    return HddsVolumeUtil.getVersionFile(hddsRootDir);
-  }
-
-  public File getHddsRootDir() {
-    return hddsRootDir;
-  }
-
-  public StorageType getStorageType() {
-    if(volumeInfo != null) {
-      return volumeInfo.getStorageType();
-    }
-    return StorageType.DEFAULT;
-  }
-
-  public String getStorageID() {
-    return storageID;
-  }
-
-  public String getClusterID() {
-    return clusterID;
-  }
-
-  public String getDatanodeUuid() {
-    return datanodeUuid;
-  }
-
-  public long getCTime() {
-    return cTime;
-  }
-
-  public int getLayoutVersion() {
-    return layoutVersion;
-  }
-
-  public VolumeState getStorageState() {
-    return state;
-  }
-
-  public long getCapacity() throws IOException {
-    if(volumeInfo != null) {
-      return volumeInfo.getCapacity();
-    }
-    return 0;
-  }
-
-  public long getAvailable() throws IOException {
-    if(volumeInfo != null) {
-      return volumeInfo.getAvailable();
-    }
-    return 0;
-  }
-
-  public void setState(VolumeState state) {
-    this.state = state;
-  }
-
-  public boolean isFailed() {
-    return (state == VolumeState.FAILED);
-  }
-
-  public VolumeIOStats getVolumeIOStats() {
-    return volumeIOStats;
-  }
-
-  public void failVolume() {
-    setState(VolumeState.FAILED);
-    if (volumeInfo != null) {
-      volumeInfo.shutdownUsageThread();
-    }
-  }
-
-  public void shutdown() {
-    this.state = VolumeState.NON_EXISTENT;
-    if (volumeInfo != null) {
-      volumeInfo.shutdownUsageThread();
-    }
-  }
-
-  /**
-   * VolumeState represents the different states a HddsVolume can be in.
-   * NORMAL          => Volume can be used for storage
-   * FAILED          => Volume has failed due and can no longer be used for
-   *                    storing containers.
-   * NON_EXISTENT    => Volume Root dir does not exist
-   * INCONSISTENT    => Volume Root dir is not empty but VERSION file is
-   *                    missing or Volume Root dir is not a directory
-   * NOT_FORMATTED   => Volume Root exists but not formatted (no VERSION file)
-   * NOT_INITIALIZED => VERSION file exists but has not been verified for
-   *                    correctness.
-   */
-  public enum VolumeState {
-    NORMAL,
-    FAILED,
-    NON_EXISTENT,
-    INCONSISTENT,
-    NOT_FORMATTED,
-    NOT_INITIALIZED
-  }
-
-  /**
-   * Only for testing. Do not use otherwise.
-   */
-  @VisibleForTesting
-  public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) {
-    if (volumeInfo != null) {
-      volumeInfo.setScmUsageForTesting(scmUsageForTest);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java
deleted file mode 100644
index 75c92ec..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
-import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * Choose volumes in round-robin order.
- * The caller should synchronize access to the list of volumes.
- */
-public class RoundRobinVolumeChoosingPolicy implements VolumeChoosingPolicy {
-
-  public static final Log LOG = LogFactory.getLog(
-      RoundRobinVolumeChoosingPolicy.class);
-
-  // Stores the index of the next volume to be returned.
-  private AtomicInteger nextVolumeIndex = new AtomicInteger(0);
-
-  @Override
-  public HddsVolume chooseVolume(List<HddsVolume> volumes,
-      long maxContainerSize) throws IOException {
-
-    // No volumes available to choose from
-    if (volumes.size() < 1) {
-      throw new DiskOutOfSpaceException("No more available volumes");
-    }
-
-    // since volumes could've been removed because of the failure
-    // make sure we are not out of bounds
-    int nextIndex = nextVolumeIndex.get();
-    int currentVolumeIndex = nextIndex < volumes.size() ? nextIndex : 0;
-
-    int startVolumeIndex = currentVolumeIndex;
-    long maxAvailable = 0;
-
-    while (true) {
-      final HddsVolume volume = volumes.get(currentVolumeIndex);
-      long availableVolumeSize = volume.getAvailable();
-
-      currentVolumeIndex = (currentVolumeIndex + 1) % volumes.size();
-
-      if (availableVolumeSize > maxContainerSize) {
-        nextVolumeIndex.compareAndSet(nextIndex, currentVolumeIndex);
-        return volume;
-      }
-
-      if (availableVolumeSize > maxAvailable) {
-        maxAvailable = availableVolumeSize;
-      }
-
-      if (currentVolumeIndex == startVolumeIndex) {
-        throw new DiskOutOfSpaceException("Out of space: "
-            + "The volume with the most available space (=" + maxAvailable
-            + " B) is less than the container size (=" + maxContainerSize
-            + " B).");
-      }
-
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java
deleted file mode 100644
index 9e2eb22..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * This class is used to track Volume IO stats for each HDDS Volume.
- */
-public class VolumeIOStats {
-
-  private final AtomicLong readBytes;
-  private final AtomicLong readOpCount;
-  private final AtomicLong writeBytes;
-  private final AtomicLong writeOpCount;
-  private final AtomicLong readTime;
-  private final AtomicLong writeTime;
-
-  public VolumeIOStats() {
-    readBytes = new AtomicLong(0);
-    readOpCount = new AtomicLong(0);
-    writeBytes = new AtomicLong(0);
-    writeOpCount = new AtomicLong(0);
-    readTime = new AtomicLong(0);
-    writeTime = new AtomicLong(0);
-  }
-
-  /**
-   * Increment number of bytes read from the volume.
-   * @param bytesRead
-   */
-  public void incReadBytes(long bytesRead) {
-    readBytes.addAndGet(bytesRead);
-  }
-
-  /**
-   * Increment the read operations performed on the volume.
-   */
-  public void incReadOpCount() {
-    readOpCount.incrementAndGet();
-  }
-
-  /**
-   * Increment number of bytes written on to the volume.
-   * @param bytesWritten
-   */
-  public void incWriteBytes(long bytesWritten) {
-    writeBytes.addAndGet(bytesWritten);
-  }
-
-  /**
-   * Increment the write operations performed on the volume.
-   */
-  public void incWriteOpCount() {
-    writeOpCount.incrementAndGet();
-  }
-
-  /**
-   * Increment the time taken by read operation on the volume.
-   * @param time
-   */
-  public void incReadTime(long time) {
-    readTime.addAndGet(time);
-  }
-
-  /**
-   * Increment the time taken by write operation on the volume.
-   * @param time
-   */
-  public void incWriteTime(long time) {
-    writeTime.addAndGet(time);
-  }
-
-  /**
-   * Returns total number of bytes read from the volume.
-   * @return long
-   */
-  public long getReadBytes() {
-    return readBytes.get();
-  }
-
-  /**
-   * Returns total number of bytes written to the volume.
-   * @return long
-   */
-  public long getWriteBytes() {
-    return writeBytes.get();
-  }
-
-  /**
-   * Returns total number of read operations performed on the volume.
-   * @return long
-   */
-  public long getReadOpCount() {
-    return readOpCount.get();
-  }
-
-  /**
-   * Returns total number of write operations performed on the volume.
-   * @return long
-   */
-  public long getWriteOpCount() {
-    return writeOpCount.get();
-  }
-
-  /**
-   * Returns total read operations time on the volume.
-   * @return long
-   */
-  public long getReadTime() {
-    return readTime.get();
-  }
-
-  /**
-   * Returns total write operations time on the volume.
-   * @return long
-   */
-  public long getWriteTime() {
-    return writeTime.get();
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
deleted file mode 100644
index 62fca63..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.GetSpaceUsed;
-import org.apache.hadoop.fs.StorageType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-
-/**
- * Stores information about a disk/volume.
- */
-public class VolumeInfo {
-
-  private static final Logger LOG = LoggerFactory.getLogger(VolumeInfo.class);
-
-  private final String rootDir;
-  private final StorageType storageType;
-
-  // Space usage calculator
-  private VolumeUsage usage;
-  // Capacity configured. This is useful when we want to
-  // limit the visible capacity for tests. If negative, then we just
-  // query from the filesystem.
-  private long configuredCapacity;
-
-  /**
-   * Builder for VolumeInfo.
-   */
-  public static class Builder {
-    private final Configuration conf;
-    private final String rootDir;
-    private StorageType storageType;
-    private long configuredCapacity;
-
-    public Builder(String root, Configuration config) {
-      this.rootDir = root;
-      this.conf = config;
-    }
-
-    public Builder storageType(StorageType st) {
-      this.storageType = st;
-      return this;
-    }
-
-    public Builder configuredCapacity(long capacity) {
-      this.configuredCapacity = capacity;
-      return this;
-    }
-
-    public VolumeInfo build() throws IOException {
-      return new VolumeInfo(this);
-    }
-  }
-
-  private VolumeInfo(Builder b) throws IOException {
-
-    this.rootDir = b.rootDir;
-    File root = new File(this.rootDir);
-
-    Boolean succeeded = root.isDirectory() || root.mkdirs();
-
-    if (!succeeded) {
-      LOG.error("Unable to create the volume root dir at : {}", root);
-      throw new IOException("Unable to create the volume root dir at " + root);
-    }
-
-    this.storageType = (b.storageType != null ?
-        b.storageType : StorageType.DEFAULT);
-
-    this.configuredCapacity = (b.configuredCapacity != 0 ?
-        b.configuredCapacity : -1);
-
-    this.usage = new VolumeUsage(root, b.conf);
-  }
-
-  public long getCapacity() {
-    return configuredCapacity < 0 ? usage.getCapacity() : configuredCapacity;
-  }
-
-  public long getAvailable() throws IOException {
-    return usage.getAvailable();
-  }
-
-  public long getScmUsed() throws IOException {
-    return usage.getScmUsed();
-  }
-
-  protected void shutdownUsageThread() {
-    if (usage != null) {
-      usage.shutdown();
-    }
-    usage = null;
-  }
-
-  public String getRootDir() {
-    return this.rootDir;
-  }
-
-  public StorageType getStorageType() {
-    return this.storageType;
-  }
-
-  /**
-   * Only for testing. Do not use otherwise.
-   */
-  @VisibleForTesting
-  public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) {
-    usage.setScmUsageForTesting(scmUsageForTest);
-  }
-
-  /**
-   * Only for testing. Do not use otherwise.
-   */
-  @VisibleForTesting
-  public VolumeUsage getUsageForTesting() {
-    return usage;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
deleted file mode 100644
index 06f48fc..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
+++ /dev/null
@@ -1,406 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.StorageType;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
-import static org.apache.hadoop.util.RunJar.SHUTDOWN_HOOK_PRIORITY;
-
-import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
-import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
-import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume.VolumeState;
-import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
-import org.apache.hadoop.util.AutoCloseableLock;
-import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-import org.apache.hadoop.util.InstrumentedLock;
-import org.apache.hadoop.util.ShutdownHookManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.EnumMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.ReentrantLock;
-
-/**
- * VolumeSet to manage volumes in a DataNode.
- */
-public class VolumeSet {
-
-  private static final Logger LOG = LoggerFactory.getLogger(VolumeSet.class);
-
-  private Configuration conf;
-
-  /**
-   * {@link VolumeSet#volumeMap} maintains a map of all active volumes in the
-   * DataNode. Each volume has one-to-one mapping with a volumeInfo object.
-   */
-  private Map<String, HddsVolume> volumeMap;
-  /**
-   * {@link VolumeSet#failedVolumeMap} maintains a map of volumes which have
-   * failed. The keys in this map and {@link VolumeSet#volumeMap} are
-   * mutually exclusive.
-   */
-  private Map<String, HddsVolume> failedVolumeMap;
-
-  /**
-   * {@link VolumeSet#volumeStateMap} maintains a list of active volumes per
-   * StorageType.
-   */
-  private EnumMap<StorageType, List<HddsVolume>> volumeStateMap;
-
-  /**
-   * Lock to synchronize changes to the VolumeSet. Any update to
-   * {@link VolumeSet#volumeMap}, {@link VolumeSet#failedVolumeMap}, or
-   * {@link VolumeSet#volumeStateMap} should be done after acquiring this lock.
-   */
-  private final AutoCloseableLock volumeSetLock;
-
-  private final String datanodeUuid;
-  private String clusterID;
-
-  private Runnable shutdownHook;
-
-  public VolumeSet(String dnUuid, Configuration conf)
-      throws IOException {
-    this(dnUuid, null, conf);
-  }
-
-  public VolumeSet(String dnUuid, String clusterID, Configuration conf)
-      throws IOException {
-    this.datanodeUuid = dnUuid;
-    this.clusterID = clusterID;
-    this.conf = conf;
-    this.volumeSetLock = new AutoCloseableLock(
-        new InstrumentedLock(getClass().getName(), LOG,
-            new ReentrantLock(true),
-            conf.getTimeDuration(
-                OzoneConfigKeys.HDDS_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY,
-                OzoneConfigKeys.HDDS_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT,
-                TimeUnit.MILLISECONDS),
-            conf.getTimeDuration(
-                OzoneConfigKeys.HDDS_LOCK_SUPPRESS_WARNING_INTERVAL_MS_KEY,
-                OzoneConfigKeys.HDDS_LOCK_SUPPRESS_WARNING_INTERVAL_MS_DEAFULT,
-                TimeUnit.MILLISECONDS)));
-
-    initializeVolumeSet();
-  }
-
-  // Add DN volumes configured through ConfigKeys to volumeMap.
-  private void initializeVolumeSet() throws IOException {
-    volumeMap = new ConcurrentHashMap<>();
-    failedVolumeMap = new ConcurrentHashMap<>();
-    volumeStateMap = new EnumMap<>(StorageType.class);
-
-    Collection<String> rawLocations = conf.getTrimmedStringCollection(
-        HDDS_DATANODE_DIR_KEY);
-    if (rawLocations.isEmpty()) {
-      rawLocations = conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY);
-    }
-    if (rawLocations.isEmpty()) {
-      throw new IllegalArgumentException("No location configured in either "
-          + HDDS_DATANODE_DIR_KEY + " or " + DFS_DATANODE_DATA_DIR_KEY);
-    }
-
-    for (StorageType storageType : StorageType.values()) {
-      volumeStateMap.put(storageType, new ArrayList<HddsVolume>());
-    }
-
-    for (String locationString : rawLocations) {
-      try {
-        StorageLocation location = StorageLocation.parse(locationString);
-
-        HddsVolume hddsVolume = createVolume(location.getUri().getPath(),
-            location.getStorageType());
-
-        checkAndSetClusterID(hddsVolume.getClusterID());
-
-        volumeMap.put(hddsVolume.getHddsRootDir().getPath(), hddsVolume);
-        volumeStateMap.get(hddsVolume.getStorageType()).add(hddsVolume);
-        LOG.info("Added Volume : {} to VolumeSet",
-            hddsVolume.getHddsRootDir().getPath());
-      } catch (IOException e) {
-        HddsVolume volume = new HddsVolume.Builder(locationString)
-            .failedVolume(true).build();
-        failedVolumeMap.put(locationString, volume);
-        LOG.error("Failed to parse the storage location: " + locationString, e);
-      }
-    }
-
-    if (volumeMap.size() == 0) {
-      throw new DiskOutOfSpaceException("No storage location configured");
-    }
-
-    // Ensure volume threads are stopped and scm df is saved during shutdown.
-    shutdownHook = () -> {
-      saveVolumeSetUsed();
-    };
-    ShutdownHookManager.get().addShutdownHook(shutdownHook,
-        SHUTDOWN_HOOK_PRIORITY);
-  }
-
-  /**
-   * If Version file exists and the {@link VolumeSet#clusterID} is not set yet,
-   * assign it the value from Version file. Otherwise, check that the given
-   * id matches with the id from version file.
-   * @param idFromVersionFile value of the property from Version file
-   * @throws InconsistentStorageStateException
-   */
-  private void checkAndSetClusterID(String idFromVersionFile)
-      throws InconsistentStorageStateException {
-    // If the clusterID is null (not set), assign it the value
-    // from version file.
-    if (this.clusterID == null) {
-      this.clusterID = idFromVersionFile;
-      return;
-    }
-
-    // If the clusterID is already set, it should match with the value from the
-    // version file.
-    if (!idFromVersionFile.equals(this.clusterID)) {
-      throw new InconsistentStorageStateException(
-          "Mismatched ClusterIDs. VolumeSet has: " + this.clusterID +
-              ", and version file has: " + idFromVersionFile);
-    }
-  }
-
-  public void acquireLock() {
-    volumeSetLock.acquire();
-  }
-
-  public void releaseLock() {
-    volumeSetLock.release();
-  }
-
-  private HddsVolume createVolume(String locationString,
-      StorageType storageType) throws IOException {
-    HddsVolume.Builder volumeBuilder = new HddsVolume.Builder(locationString)
-        .conf(conf)
-        .datanodeUuid(datanodeUuid)
-        .clusterID(clusterID)
-        .storageType(storageType);
-    return volumeBuilder.build();
-  }
-
-
-  // Add a volume to VolumeSet
-  public boolean addVolume(String dataDir) {
-    return addVolume(dataDir, StorageType.DEFAULT);
-  }
-
-  // Add a volume to VolumeSet
-  public boolean addVolume(String volumeRoot, StorageType storageType) {
-    String hddsRoot = HddsVolumeUtil.getHddsRoot(volumeRoot);
-    boolean success;
-
-    try (AutoCloseableLock lock = volumeSetLock.acquire()) {
-      if (volumeMap.containsKey(hddsRoot)) {
-        LOG.warn("Volume : {} already exists in VolumeMap", hddsRoot);
-        success = false;
-      } else {
-        if (failedVolumeMap.containsKey(hddsRoot)) {
-          failedVolumeMap.remove(hddsRoot);
-        }
-
-        HddsVolume hddsVolume = createVolume(volumeRoot, storageType);
-        volumeMap.put(hddsVolume.getHddsRootDir().getPath(), hddsVolume);
-        volumeStateMap.get(hddsVolume.getStorageType()).add(hddsVolume);
-
-        LOG.info("Added Volume : {} to VolumeSet",
-            hddsVolume.getHddsRootDir().getPath());
-        success = true;
-      }
-    } catch (IOException ex) {
-      LOG.error("Failed to add volume " + volumeRoot + " to VolumeSet", ex);
-      success = false;
-    }
-    return success;
-  }
-
-  // Mark a volume as failed
-  public void failVolume(String dataDir) {
-    String hddsRoot = HddsVolumeUtil.getHddsRoot(dataDir);
-
-    try (AutoCloseableLock lock = volumeSetLock.acquire()) {
-      if (volumeMap.containsKey(hddsRoot)) {
-        HddsVolume hddsVolume = volumeMap.get(hddsRoot);
-        hddsVolume.failVolume();
-
-        volumeMap.remove(hddsRoot);
-        volumeStateMap.get(hddsVolume.getStorageType()).remove(hddsVolume);
-        failedVolumeMap.put(hddsRoot, hddsVolume);
-
-        LOG.info("Moving Volume : {} to failed Volumes", hddsRoot);
-      } else if (failedVolumeMap.containsKey(hddsRoot)) {
-        LOG.info("Volume : {} is not active", hddsRoot);
-      } else {
-        LOG.warn("Volume : {} does not exist in VolumeSet", hddsRoot);
-      }
-    }
-  }
-
-  // Remove a volume from the VolumeSet completely.
-  public void removeVolume(String dataDir) throws IOException {
-    String hddsRoot = HddsVolumeUtil.getHddsRoot(dataDir);
-
-    try (AutoCloseableLock lock = volumeSetLock.acquire()) {
-      if (volumeMap.containsKey(hddsRoot)) {
-        HddsVolume hddsVolume = volumeMap.get(hddsRoot);
-        hddsVolume.shutdown();
-
-        volumeMap.remove(hddsRoot);
-        volumeStateMap.get(hddsVolume.getStorageType()).remove(hddsVolume);
-
-        LOG.info("Removed Volume : {} from VolumeSet", hddsRoot);
-      } else if (failedVolumeMap.containsKey(hddsRoot)) {
-        HddsVolume hddsVolume = failedVolumeMap.get(hddsRoot);
-        hddsVolume.setState(VolumeState.NON_EXISTENT);
-
-        failedVolumeMap.remove(hddsRoot);
-        LOG.info("Removed Volume : {} from failed VolumeSet", hddsRoot);
-      } else {
-        LOG.warn("Volume : {} does not exist in VolumeSet", hddsRoot);
-      }
-    }
-  }
-
-  public HddsVolume chooseVolume(long containerSize,
-      VolumeChoosingPolicy choosingPolicy) throws IOException {
-    return choosingPolicy.chooseVolume(getVolumesList(), containerSize);
-  }
-
-  /**
-   * This method, call shutdown on each volume to shutdown volume usage
-   * thread and write scmUsed on each volume.
-   */
-  private void saveVolumeSetUsed() {
-    for (HddsVolume hddsVolume : volumeMap.values()) {
-      try {
-        hddsVolume.shutdown();
-      } catch (Exception ex) {
-        LOG.error("Failed to shutdown volume : " + hddsVolume.getHddsRootDir(),
-            ex);
-      }
-    }
-  }
-
-  /**
-   * Shutdown's the volumeset, if saveVolumeSetUsed is false, call's
-   * {@link VolumeSet#saveVolumeSetUsed}.
-   */
-  public void shutdown() {
-    saveVolumeSetUsed();
-    if (shutdownHook != null) {
-      ShutdownHookManager.get().removeShutdownHook(shutdownHook);
-    }
-  }
-
-  @VisibleForTesting
-  public List<HddsVolume> getVolumesList() {
-    return ImmutableList.copyOf(volumeMap.values());
-  }
-
-  @VisibleForTesting
-  public List<HddsVolume> getFailedVolumesList() {
-    return ImmutableList.copyOf(failedVolumeMap.values());
-  }
-
-  @VisibleForTesting
-  public Map<String, HddsVolume> getVolumeMap() {
-    return ImmutableMap.copyOf(volumeMap);
-  }
-
-  @VisibleForTesting
-  public Map<StorageType, List<HddsVolume>> getVolumeStateMap() {
-    return ImmutableMap.copyOf(volumeStateMap);
-  }
-
-  public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport()
-      throws IOException {
-    boolean failed;
-    StorageLocationReport[] reports = new StorageLocationReport[volumeMap
-        .size() + failedVolumeMap.size()];
-    int counter = 0;
-    HddsVolume hddsVolume;
-    for (Map.Entry<String, HddsVolume> entry : volumeMap.entrySet()) {
-      hddsVolume = entry.getValue();
-      VolumeInfo volumeInfo = hddsVolume.getVolumeInfo();
-      long scmUsed = 0;
-      long remaining = 0;
-      failed = false;
-      try {
-        scmUsed = volumeInfo.getScmUsed();
-        remaining = volumeInfo.getAvailable();
-      } catch (IOException ex) {
-        LOG.warn("Failed to get scmUsed and remaining for container " +
-            "storage location {}", volumeInfo.getRootDir());
-        // reset scmUsed and remaining if df/du failed.
-        scmUsed = 0;
-        remaining = 0;
-        failed = true;
-      }
-
-      StorageLocationReport.Builder builder =
-          StorageLocationReport.newBuilder();
-      builder.setStorageLocation(volumeInfo.getRootDir())
-          .setId(hddsVolume.getStorageID())
-          .setFailed(failed)
-          .setCapacity(hddsVolume.getCapacity())
-          .setRemaining(remaining)
-          .setScmUsed(scmUsed)
-          .setStorageType(hddsVolume.getStorageType());
-      StorageLocationReport r = builder.build();
-      reports[counter++] = r;
-    }
-    for (Map.Entry<String, HddsVolume> entry : failedVolumeMap.entrySet()) {
-      hddsVolume = entry.getValue();
-      StorageLocationReport.Builder builder = StorageLocationReport
-          .newBuilder();
-      builder.setStorageLocation(hddsVolume.getHddsRootDir()
-          .getAbsolutePath()).setId(hddsVolume.getStorageID()).setFailed(true)
-          .setCapacity(0).setRemaining(0).setScmUsed(0).setStorageType(
-              hddsVolume.getStorageType());
-      StorageLocationReport r = builder.build();
-      reports[counter++] = r;
-    }
-    NodeReportProto.Builder nrb = NodeReportProto.newBuilder();
-    for (int i = 0; i < reports.length; i++) {
-      nrb.addStorageReport(reports[i].getProtoBufMessage());
-    }
-    return nrb.build();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
deleted file mode 100644
index 2c7563e..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CachingGetSpaceUsed;
-import org.apache.hadoop.fs.DF;
-import org.apache.hadoop.fs.GetSpaceUsed;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.nio.charset.StandardCharsets;
-import java.util.Scanner;
-
-/**
- * Class that wraps the space df of the Datanode Volumes used by SCM
- * containers.
- */
-public class VolumeUsage {
-  private static final Logger LOG = LoggerFactory.getLogger(VolumeUsage.class);
-
-  private final File rootDir;
-  private final DF df;
-  private final File scmUsedFile;
-  private GetSpaceUsed scmUsage;
-
-  private static final String DU_CACHE_FILE = "scmUsed";
-  private volatile boolean scmUsedSaved = false;
-
-  VolumeUsage(File dataLoc, Configuration conf)
-      throws IOException {
-    this.rootDir = dataLoc;
-
-    // SCM used cache file
-    scmUsedFile = new File(rootDir, DU_CACHE_FILE);
-    // get overall disk df
-    this.df = new DF(rootDir, conf);
-
-    startScmUsageThread(conf);
-  }
-
-  void startScmUsageThread(Configuration conf) throws IOException {
-    // get SCM specific df
-    this.scmUsage = new CachingGetSpaceUsed.Builder().setPath(rootDir)
-        .setConf(conf)
-        .setInitialUsed(loadScmUsed())
-        .build();
-  }
-
-  long getCapacity() {
-    long capacity = df.getCapacity();
-    return (capacity > 0) ? capacity : 0;
-  }
-
-  /*
-   * Calculate the available space in the volume.
-   */
-  long getAvailable() throws IOException {
-    long remaining = getCapacity() - getScmUsed();
-    long available = df.getAvailable();
-    if (remaining > available) {
-      remaining = available;
-    }
-    return (remaining > 0) ? remaining : 0;
-  }
-
-  long getScmUsed() throws IOException{
-    return scmUsage.getUsed();
-  }
-
-  public void shutdown() {
-    saveScmUsed();
-
-    if (scmUsage instanceof CachingGetSpaceUsed) {
-      IOUtils.cleanupWithLogger(null, ((CachingGetSpaceUsed) scmUsage));
-    }
-  }
-
-  /**
-   * Read in the cached DU value and return it if it is less than 600 seconds
-   * old (DU update interval). Slight imprecision of scmUsed is not critical
-   * and skipping DU can significantly shorten the startup time.
-   * If the cached value is not available or too old, -1 is returned.
-   */
-  long loadScmUsed() {
-    long cachedScmUsed;
-    long mtime;
-    Scanner sc;
-
-    try {
-      sc = new Scanner(scmUsedFile, "UTF-8");
-    } catch (FileNotFoundException fnfe) {
-      return -1;
-    }
-
-    try {
-      // Get the recorded scmUsed from the file.
-      if (sc.hasNextLong()) {
-        cachedScmUsed = sc.nextLong();
-      } else {
-        return -1;
-      }
-      // Get the recorded mtime from the file.
-      if (sc.hasNextLong()) {
-        mtime = sc.nextLong();
-      } else {
-        return -1;
-      }
-
-      // Return the cached value if mtime is okay.
-      if (mtime > 0 && (Time.now() - mtime < 600000L)) {
-        LOG.info("Cached ScmUsed found for {} : {} ", rootDir,
-            cachedScmUsed);
-        return cachedScmUsed;
-      }
-      return -1;
-    } finally {
-      sc.close();
-    }
-  }
-
-  /**
-   * Write the current scmUsed to the cache file.
-   */
-  void saveScmUsed() {
-    if (scmUsedFile.exists() && !scmUsedFile.delete()) {
-      LOG.warn("Failed to delete old scmUsed file in {}.", rootDir);
-    }
-    OutputStreamWriter out = null;
-    try {
-      long used = getScmUsed();
-      if (used > 0) {
-        out = new OutputStreamWriter(new FileOutputStream(scmUsedFile),
-            StandardCharsets.UTF_8);
-        // mtime is written last, so that truncated writes won't be valid.
-        out.write(Long.toString(used) + " " + Long.toString(Time.now()));
-        out.flush();
-        out.close();
-        out = null;
-      }
-    } catch (IOException ioe) {
-      // If write failed, the volume might be bad. Since the cache file is
-      // not critical, log the error and continue.
-      LOG.warn("Failed to write scmUsed to " + scmUsedFile, ioe);
-    } finally {
-      IOUtils.cleanupWithLogger(null, out);
-    }
-  }
-
-  /**
-   * Only for testing. Do not use otherwise.
-   */
-  @VisibleForTesting
-  public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) {
-    this.scmUsage = scmUsageForTest;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java
deleted file mode 100644
index 86093c6..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-/**
- This package contains volume/ disk related classes.
- */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
deleted file mode 100644
index 535af29..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
-import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
-import org.apache.hadoop.utils.MetaStoreIterator;
-import org.apache.hadoop.utils.MetadataKeyFilters;
-import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.utils.MetadataStore;
-import org.apache.hadoop.utils.MetadataStore.KeyValue;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.NoSuchElementException;
-
-
-/**
- * Block Iterator for KeyValue Container. This block iterator returns blocks
- * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no
- * filter is specified, then default filter used is
- * {@link MetadataKeyFilters#getNormalKeyFilter()}
- */
-@InterfaceAudience.Public
-public class KeyValueBlockIterator implements BlockIterator<BlockData> {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      KeyValueBlockIterator.class);
-
-  private MetaStoreIterator<KeyValue> blockIterator;
-  private static KeyPrefixFilter defaultBlockFilter = MetadataKeyFilters
-      .getNormalKeyFilter();
-  private KeyPrefixFilter blockFilter;
-  private BlockData nextBlock;
-  private long containerId;
-
-  /**
-   * KeyValueBlockIterator to iterate blocks in a container.
-   * @param id - container id
-   * @param path -  container base path
-   * @throws IOException
-   */
-
-  public KeyValueBlockIterator(long id, File path)
-      throws IOException {
-    this(id, path, defaultBlockFilter);
-  }
-
-  /**
-   * KeyValueBlockIterator to iterate blocks in a container.
-   * @param id - container id
-   * @param path - container base path
-   * @param filter - Block filter, filter to be applied for blocks
-   * @throws IOException
-   */
-  public KeyValueBlockIterator(long id, File path, KeyPrefixFilter filter)
-      throws IOException {
-    containerId = id;
-    File metdataPath = new File(path, OzoneConsts.METADATA);
-    File containerFile = ContainerUtils.getContainerFile(metdataPath
-        .getParentFile());
-    ContainerData containerData = ContainerDataYaml.readContainerFile(
-        containerFile);
-    KeyValueContainerData keyValueContainerData = (KeyValueContainerData)
-        containerData;
-    keyValueContainerData.setDbFile(KeyValueContainerLocationUtil
-        .getContainerDBFile(metdataPath, containerId));
-    MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, new
-        OzoneConfiguration());
-    blockIterator = metadataStore.iterator();
-    blockFilter = filter;
-  }
-
-  /**
-   * This method returns blocks matching with the filter.
-   * @return next block or null if no more blocks
-   * @throws IOException
-   */
-  @Override
-  public BlockData nextBlock() throws IOException, NoSuchElementException {
-    if (nextBlock != null) {
-      BlockData currentBlock = nextBlock;
-      nextBlock = null;
-      return currentBlock;
-    }
-    if(hasNext()) {
-      return nextBlock();
-    }
-    throw new NoSuchElementException("Block Iterator reached end for " +
-        "ContainerID " + containerId);
-  }
-
-  @Override
-  public boolean hasNext() throws IOException {
-    if (nextBlock != null) {
-      return true;
-    }
-    if (blockIterator.hasNext()) {
-      KeyValue block = blockIterator.next();
-      if (blockFilter.filterKey(null, block.getKey(), null)) {
-        nextBlock = BlockUtils.getBlockData(block.getValue());
-        LOG.trace("Block matching with filter found: blockID is : {} for " +
-            "containerID {}", nextBlock.getLocalID(), containerId);
-        return true;
-      }
-      hasNext();
-    }
-    return false;
-  }
-
-  @Override
-  public void seekToFirst() {
-    nextBlock = null;
-    blockIterator.seekToFirst();
-  }
-
-  @Override
-  public void seekToLast() {
-    nextBlock = null;
-    blockIterator.seekToLast();
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org