You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vi...@apache.org on 2017/12/15 18:16:28 UTC

[2/2] hadoop git commit: HDFS-12712. [9806] Code style cleanup

HDFS-12712. [9806] Code style cleanup


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf772b81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf772b81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf772b81

Branch: refs/heads/HDFS-9806
Commit: cf772b81bde5ab96767b1ad7dfe20d481b842e07
Parents: 17464a6
Author: Virajith Jalaparti <vi...@apache.org>
Authored: Fri Dec 15 10:15:15 2017 -0800
Committer: Virajith Jalaparti <vi...@apache.org>
Committed: Fri Dec 15 10:15:15 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hdfs/protocol/HdfsConstants.java     |   1 -
 .../hadoop/hdfs/protocol/LocatedBlock.java      |  59 +-
 .../hdfs/server/blockmanagement/BlockInfo.java  |   2 +-
 .../server/blockmanagement/BlockManager.java    |   5 +-
 .../server/blockmanagement/DatanodeManager.java |   2 +-
 .../blockmanagement/ProvidedStorageMap.java     |   4 +-
 .../hadoop/hdfs/server/common/Storage.java      |   6 +-
 .../impl/TextFileRegionAliasMap.java            |   2 +-
 .../server/datanode/BlockPoolSliceStorage.java  |   3 +-
 .../hdfs/server/datanode/DataStorage.java       |   4 +-
 .../hdfs/server/datanode/ProvidedReplica.java   |   1 -
 .../hdfs/server/datanode/StorageLocation.java   |  12 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   6 +-
 .../fsdataset/impl/ProvidedVolumeImpl.java      |  21 +-
 .../hadoop/hdfs/TestBlockStoragePolicy.java     |   8 +-
 .../blockmanagement/TestDatanodeManager.java    |   5 +-
 .../blockmanagement/TestProvidedStorageMap.java |  12 +-
 .../datanode/TestProvidedReplicaImpl.java       |  13 +-
 .../fsdataset/impl/TestProvidedImpl.java        |  64 +-
 hadoop-tools/hadoop-fs2img/pom.xml              |   4 +-
 .../hdfs/server/namenode/FileSystemImage.java   |   3 +-
 .../hdfs/server/namenode/ImageWriter.java       |   7 +-
 .../hdfs/server/namenode/SingleUGIResolver.java |   4 +-
 .../hadoop/hdfs/server/namenode/TreePath.java   |   3 +-
 .../namenode/ITestProvidedImplementation.java   | 927 ++++++++++++++++++
 .../hdfs/server/namenode/RandomTreeWalk.java    |   4 +-
 .../TestNameNodeProvidedImplementation.java     | 934 -------------------
 27 files changed, 1040 insertions(+), 1076 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index e9e6103..fd7f9e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -47,7 +47,6 @@ public final class HdfsConstants {
   public static final String WARM_STORAGE_POLICY_NAME = "WARM";
   public static final byte COLD_STORAGE_POLICY_ID = 2;
   public static final String COLD_STORAGE_POLICY_NAME = "COLD";
-  // branch HDFS-9806 XXX temporary until HDFS-7076
   public static final byte PROVIDED_STORAGE_POLICY_ID = 1;
   public static final String PROVIDED_STORAGE_POLICY_NAME = "PROVIDED";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index 5ad0bca..29f1b6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import java.io.Serializable;
 import java.util.Arrays;
 import java.util.Comparator;
 import java.util.List;
@@ -40,6 +41,32 @@ import com.google.common.collect.Lists;
 @InterfaceStability.Evolving
 public class LocatedBlock {
 
+  /**
+   * Comparator that ensures that a PROVIDED storage type is greater than any
+   * other storage type. Any other storage types are considered equal.
+   */
+  private static class ProvidedLastComparator
+      implements Comparator<DatanodeInfoWithStorage>, Serializable {
+
+    private static final long serialVersionUID = 6441720011443190984L;
+
+    @Override
+    public int compare(DatanodeInfoWithStorage dns1,
+        DatanodeInfoWithStorage dns2) {
+      if (StorageType.PROVIDED.equals(dns1.getStorageType())
+          && !StorageType.PROVIDED.equals(dns2.getStorageType())) {
+        return 1;
+      }
+      if (!StorageType.PROVIDED.equals(dns1.getStorageType())
+          && StorageType.PROVIDED.equals(dns2.getStorageType())) {
+        return -1;
+      }
+      // Storage types of dns1 and dns2 are now both provided or not provided;
+      // thus, are essentially equal for the purpose of this comparator.
+      return 0;
+    }
+  }
+
   private final ExtendedBlock b;
   private long offset;  // offset of the first byte of the block in the file
   private final DatanodeInfoWithStorage[] locs;
@@ -52,6 +79,10 @@ public class LocatedBlock {
   // their locations are not part of this object
   private boolean corrupt;
   private Token<BlockTokenIdentifier> blockToken = new Token<>();
+
+  // use one instance of the Provided comparator as it uses no state.
+  private static ProvidedLastComparator providedLastComparator =
+      new ProvidedLastComparator();
   /**
    * List of cached datanode locations
    */
@@ -157,29 +188,6 @@ public class LocatedBlock {
   }
 
   /**
-   * Comparator that ensures that a PROVIDED storage type is greater than
-   * any other storage type. Any other storage types are considered equal.
-   */
-  private class ProvidedLastComparator
-    implements Comparator<DatanodeInfoWithStorage> {
-    @Override
-    public int compare(DatanodeInfoWithStorage dns1,
-        DatanodeInfoWithStorage dns2) {
-      if (StorageType.PROVIDED.equals(dns1.getStorageType())
-          && !StorageType.PROVIDED.equals(dns2.getStorageType())) {
-        return 1;
-      }
-      if (!StorageType.PROVIDED.equals(dns1.getStorageType())
-          && StorageType.PROVIDED.equals(dns2.getStorageType())) {
-        return -1;
-      }
-      // Storage types of dns1 and dns2 are now both provided or not provided;
-      // thus, are essentially equal for the purpose of this comparator.
-      return 0;
-    }
-  }
-
-  /**
    * Moves all locations that have {@link StorageType}
    * {@code PROVIDED} to the end of the locations array without
    * changing the relative ordering of the remaining locations
@@ -196,9 +204,8 @@ public class LocatedBlock {
     }
     // as this is a stable sort, for elements that are equal,
     // the current order of the elements is maintained
-    Arrays.sort(locs, 0,
-        (activeLen < locs.length) ? activeLen : locs.length,
-        new ProvidedLastComparator());
+    Arrays.sort(locs, 0, (activeLen < locs.length) ? activeLen : locs.length,
+        providedLastComparator);
   }
 
   public long getStartOffset() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 8f59df6..111ade1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -192,7 +192,7 @@ public abstract class BlockInfo extends Block
       DatanodeStorageInfo cur = getStorageInfo(idx);
       if(cur != null) {
         if (cur.getStorageType() == StorageType.PROVIDED) {
-          //if block resides on provided storage, only match the storage ids
+          // if block resides on provided storage, only match the storage ids
           if (dn.getStorageInfo(cur.getStorageID()) != null) {
             // do not return here as we have to check the other
             // DatanodeStorageInfos for this block which could be local

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ffdf1fc..a057092 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1229,7 +1229,6 @@ public class BlockManager implements BlockStatsMXBean {
         final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
         final ExtendedBlock eb = new ExtendedBlock(getBlockPoolId(),
             blk);
-        //TODO use locatedBlocks builder??
         return newLocatedStripedBlock(eb, storages, uc.getBlockIndices(), pos,
             false);
       } else {
@@ -2487,8 +2486,8 @@ public class BlockManager implements BlockStatsMXBean {
 
       // To minimize startup time, we discard any second (or later) block reports
       // that we receive while still in startup phase.
-      // !#! Register DN with provided storage, not with storage owned by DN
-      // !#! DN should still have a ref to the DNStorageInfo
+      // Register DN with provided storage, not with storage owned by DN
+      // DN should still have a ref to the DNStorageInfo.
       DatanodeStorageInfo storageInfo =
           providedStorageMap.getStorage(node, storage);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index a7e31a2..e6cd513 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -532,7 +532,7 @@ public class DatanodeManager {
     } else {
       networktopology.sortByDistance(client, lb.getLocations(), activeLen);
     }
-    //move PROVIDED storage to the end to prefer local replicas.
+    // move PROVIDED storage to the end to prefer local replicas.
     lb.moveProvidedToEnd(activeLen);
     // must update cache since we modified locations array
     lb.updateCachedStorageInfo();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
index 08d1434..6303775 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
@@ -294,6 +294,7 @@ public class ProvidedStorageMap {
 
     @Override
     LocatedBlocks build(DatanodeDescriptor client) {
+      // TODO choose provided locations close to the client.
       return new LocatedBlocks(
           flen, isUC, blocks, last, lastComplete, feInfo, ecPolicy);
     }
@@ -333,7 +334,6 @@ public class ProvidedStorageMap {
         DatanodeDescriptor dn, DatanodeStorage s) {
       dns.put(dn.getDatanodeUuid(), dn);
       dnR.add(dn);
-      // TODO: maintain separate RPC ident per dn
       return storageMap.get(s.getStorageID());
     }
 
@@ -522,7 +522,7 @@ public class ProvidedStorageMap {
 
     @Override
     public int getNumberOfBlocks() {
-      // VERIFY: only printed for debugging
+      // is ignored for ProvidedBlockList.
       return -1;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 9ad61d7..5409427 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -422,7 +422,7 @@ public abstract class Storage extends StorageInfo {
     public void clearDirectory() throws IOException {
       File curDir = this.getCurrentDir();
       if (curDir == null) {
-        //if the directory is null, there is nothing to do.
+        // if the directory is null, there is nothing to do.
         return;
       }
       if (curDir.exists()) {
@@ -638,7 +638,7 @@ public abstract class Storage extends StorageInfo {
 
       if (location != null &&
           location.getStorageType() == StorageType.PROVIDED) {
-        //currently we assume that PROVIDED storages are always NORMAL
+        // currently we assume that PROVIDED storages are always NORMAL
         return StorageState.NORMAL;
       }
 
@@ -764,7 +764,7 @@ public abstract class Storage extends StorageInfo {
     public void doRecover(StorageState curState) throws IOException {
       File curDir = getCurrentDir();
       if (curDir == null || root == null) {
-        //at this point, we do not support recovery on PROVIDED storages
+        // at this point, we do not support recovery on PROVIDED storages
         return;
       }
       String rootPath = root.getCanonicalPath();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
index 150371d..abe92e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
@@ -471,7 +471,7 @@ public class TextFileRegionAliasMap
 
   @Override
   public void close() throws IOException {
-    //nothing to do;
+    // nothing to do;
   }
 
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index 012d1f5..ac5c3ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -443,7 +443,7 @@ public class BlockPoolSliceStorage extends Storage {
         LayoutVersion.Feature.FEDERATION, layoutVersion)) {
       return;
     }
-    //no upgrades for storage directories that are PROVIDED
+    // no upgrades for storage directories that are PROVIDED
     if (bpSd.getRoot() == null) {
       return;
     }
@@ -640,7 +640,6 @@ public class BlockPoolSliceStorage extends Storage {
    * that holds the snapshot.
    */
   void doFinalize(File dnCurDir) throws IOException {
-    LOG.info("doFinalize: " + dnCurDir);
     if (dnCurDir == null) {
       return; //we do nothing if the directory is null
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index a1bde31..fc1dad1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -149,8 +149,8 @@ public class DataStorage extends Storage {
     final String oldStorageID = sd.getStorageUuid();
     if (sd.getStorageLocation() != null &&
         sd.getStorageLocation().getStorageType() == StorageType.PROVIDED) {
-      // We only support one provided storage per datanode for now.
-      // TODO support multiple provided storage ids per datanode.
+      // Only one provided storage id is supported.
+      // TODO support multiple provided storage ids
       sd.setStorageUuid(conf.get(DFSConfigKeys.DFS_PROVIDER_STORAGEUUID,
           DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT));
       return false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
index 5e02d4f..bd23021 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
@@ -310,7 +310,6 @@ public abstract class ProvidedReplica extends ReplicaInfo {
 
   @Override
   public int compareWith(ScanInfo info) {
-    //local scanning cannot find any provided blocks.
     if (info.getFileRegion().equals(
         new FileRegion(this.getBlockId(), new Path(getRemoteURI()),
             fileOffset, this.getNumBytes(), this.getGenerationStamp()))) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
index d72448d..8ad51de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
@@ -108,10 +108,10 @@ public class StorageLocation
     }
     if (sd.getStorageLocation().getStorageType() == StorageType.PROVIDED ||
         storageType == StorageType.PROVIDED) {
-      //only one of these is PROVIDED; so it cannot be a match!
+      // only one PROVIDED storage directory can exist; so this cannot match!
       return false;
     }
-    //both storage directories are local
+    // both storage directories are local
     return this.getBpURI(bpid, Storage.STORAGE_DIR_CURRENT).normalize()
         .equals(sd.getRoot().toURI().normalize());
   }
@@ -212,7 +212,9 @@ public class StorageLocation
       conf = new HdfsConfiguration();
     }
     if (storageType == StorageType.PROVIDED) {
-      //skip creation if the storage type is PROVIDED
+      // skip creation if the storage type is PROVIDED
+      Storage.LOG.info("Skipping creating directory for block pool "
+          + blockPoolID + " for PROVIDED storage location " + this);
       return;
     }
 
@@ -231,8 +233,8 @@ public class StorageLocation
 
   @Override  // Checkable
   public VolumeCheckResult check(CheckContext context) throws IOException {
-    //we assume provided storage locations are always healthy,
-    //and check only for local storages.
+    // assume provided storage locations are always healthy,
+    // and check only for local storages.
     if (storageType != StorageType.PROVIDED) {
       DiskChecker.checkDir(
           context.localFileSystem,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 82394f5..2242a26 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1758,7 +1758,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
       Set<String> missingVolumesReported = new HashSet<>();
       for (ReplicaInfo b : volumeMap.replicas(bpid)) {
-        //skip blocks in PROVIDED storage
+        // skip PROVIDED replicas.
         if (b.getVolume().getStorageType() == StorageType.PROVIDED) {
           continue;
         }
@@ -2279,7 +2279,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
       if (vol.getStorageType() == StorageType.PROVIDED) {
         if (memBlockInfo == null) {
-          //replica exists on provided store but not in memory
+          // replica exists on provided store but not in memory
           ReplicaInfo diskBlockInfo =
               new ReplicaBuilder(ReplicaState.FINALIZED)
               .setFileRegion(scanInfo.getFileRegion())
@@ -2290,7 +2290,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
           volumeMap.add(bpid, diskBlockInfo);
           LOG.warn("Added missing block to memory " + diskBlockInfo);
         } else {
-          //replica exists in memory but not in the provided store
+          // replica exists in memory but not in the provided store
           volumeMap.remove(bpid, blockId);
           LOG.warn("Deleting missing provided block " + memBlockInfo);
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
index 59ec100..ec1a8fd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
@@ -224,7 +224,7 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
     }
 
     public void shutdown(BlockListAsLongs blocksListsAsLongs) {
-      //nothing to do!
+      // nothing to do!
     }
 
     public void compileReport(LinkedList<ScanInfo> report,
@@ -264,7 +264,7 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
       new ConcurrentHashMap<String, ProvidedBlockPoolSlice>();
 
   private ProvidedVolumeDF df;
-  //the remote FileSystem to which this ProvidedVolume points to.
+  // the remote FileSystem to which this ProvidedVolume points to.
   private FileSystem remoteFS;
 
   ProvidedVolumeImpl(FsDatasetImpl dataset, String storageID,
@@ -395,9 +395,9 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
     @JsonProperty
     private boolean atEnd;
 
-    //The id of the last block read when the state of the iterator is saved.
-    //This implementation assumes that provided blocks are returned
-    //in sorted order of the block ids.
+    // The id of the last block read when the state of the iterator is saved.
+    // This implementation assumes that provided blocks are returned
+    // in sorted order of the block ids.
     @JsonProperty
     private long lastBlockId;
   }
@@ -421,7 +421,7 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
 
     @Override
     public void close() throws IOException {
-      //No action needed
+      blockAliasMap.close();
     }
 
     @Override
@@ -467,14 +467,14 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
 
     @Override
     public void save() throws IOException {
-      //We do not persist the state of this iterator anywhere, locally.
-      //We just re-scan provided volumes as necessary.
+      // We do not persist the state of this iterator locally.
+      // We just re-scan provided volumes as necessary.
       state.lastSavedMs = Time.now();
     }
 
     @Override
     public void setMaxStalenessMs(long maxStalenessMs) {
-      //do not use max staleness
+      // do not use max staleness
     }
 
     @Override
@@ -493,7 +493,7 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
     }
 
     public void load() throws IOException {
-      //on load, we just rewind the iterator for provided volumes.
+      // on load, we just rewind the iterator for provided volumes.
       rewind();
       LOG.trace("load({}, {}): loaded iterator {}: {}", getStorageID(),
           bpid, name, WRITER.writeValueAsString(state));
@@ -615,7 +615,6 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
       LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
       throws InterruptedException, IOException {
     LOG.info("Compiling report for volume: " + this + " bpid " + bpid);
-    //get the report from the appropriate block pool.
     if(bpSlices.containsKey(bpid)) {
       bpSlices.get(bpid).compileReport(report, reportCompiler);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
index 55a7b3e..76eb824 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
@@ -144,9 +144,11 @@ public class TestBlockStoragePolicy {
     expectedPolicyStrings.put(ALLSSD, "BlockStoragePolicy{ALL_SSD:" + ALLSSD +
         ", storageTypes=[SSD], creationFallbacks=[DISK], " +
         "replicationFallbacks=[DISK]}");
-    expectedPolicyStrings.put(PROVIDED, "BlockStoragePolicy{PROVIDED:" + PROVIDED +
-        ", storageTypes=[PROVIDED, DISK], creationFallbacks=[PROVIDED, DISK], " +
-        "replicationFallbacks=[PROVIDED, DISK]}");
+    expectedPolicyStrings.put(PROVIDED,
+        "BlockStoragePolicy{PROVIDED:" + PROVIDED
+            + ", storageTypes=[PROVIDED, DISK], "
+            + "creationFallbacks=[PROVIDED, DISK], "
+            + "replicationFallbacks=[PROVIDED, DISK]}");
 
     for(byte i = 1; i < 16; i++) {
       final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i); 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
index 81405eb..96841cf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
@@ -325,11 +325,12 @@ public class TestDatanodeManager {
    */
   @Test
   public void testBadScript() throws IOException, URISyntaxException {
-    HelperFunction("/"+ Shell.appendScriptExtension("topology-broken-script"), 0);
+    HelperFunction("/" + Shell.appendScriptExtension("topology-broken-script"),
+        0);
   }
 
   /**
-   * Test with different sorting functions but include datanodes
+   * Test with different sorting functions but include datanodes.
    * with provided storage
    * @throws IOException
    * @throws URISyntaxException

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
index b419c38..c7f8379 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
@@ -73,13 +73,13 @@ public class TestProvidedStorageMap {
         nameSystemLock, bm, conf);
     DatanodeStorageInfo providedMapStorage =
         providedMap.getProvidedStorageInfo();
-    //the provided storage cannot be null
+    // the provided storage cannot be null
     assertNotNull(providedMapStorage);
 
-    //create a datanode
+    // create a datanode
     DatanodeDescriptor dn1 = createDatanodeDescriptor(5000);
 
-    //associate two storages to the datanode
+    // associate two storages to the datanode
     DatanodeStorage dn1ProvidedStorage = new DatanodeStorage(
         providedStorageID,
         DatanodeStorage.State.NORMAL,
@@ -96,15 +96,15 @@ public class TestProvidedStorageMap {
         dns1Provided == providedMapStorage);
     assertTrue("Disk storage has not yet been registered with block manager",
         dns1Disk == null);
-    //add the disk storage to the datanode.
+    // add the disk storage to the datanode.
     DatanodeStorageInfo dnsDisk = new DatanodeStorageInfo(dn1, dn1DiskStorage);
     dn1.injectStorage(dnsDisk);
     assertTrue("Disk storage must match the injected storage info",
         dnsDisk == providedMap.getStorage(dn1, dn1DiskStorage));
 
-    //create a 2nd datanode
+    // create a 2nd datanode
     DatanodeDescriptor dn2 = createDatanodeDescriptor(5010);
-    //associate a provided storage with the datanode
+    // associate a provided storage with the datanode
     DatanodeStorage dn2ProvidedStorage = new DatanodeStorage(
         providedStorageID,
         DatanodeStorage.State.NORMAL,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java
index 210be6e..a7e8b1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java
@@ -50,9 +50,9 @@ public class TestProvidedReplicaImpl {
   private static final String BASE_DIR =
       new FileSystemTestHelper().getTestRootDir();
   private static final String FILE_NAME = "provided-test";
-  //length of the file that is associated with the provided blocks.
+  // length of the file that is associated with the provided blocks.
   private static final long FILE_LEN = 128 * 1024 * 10L + 64 * 1024;
-  //length of each provided block.
+  // length of each provided block.
   private static final long BLK_LEN = 128 * 1024L;
 
   private static List<ProvidedReplica> replicas;
@@ -63,7 +63,6 @@ public class TestProvidedReplicaImpl {
     if(!newFile.exists()) {
       newFile.createNewFile();
       OutputStream writer = new FileOutputStream(newFile.getAbsolutePath());
-      //FILE_LEN is length in bytes.
       byte[] bytes = new byte[1];
       bytes[0] = (byte) 0;
       for(int i=0; i< FILE_LEN; i++) {
@@ -106,7 +105,7 @@ public class TestProvidedReplicaImpl {
    * @param dataLength length
    * @throws IOException
    */
-  private void verifyReplicaContents(File file,
+  public static void verifyReplicaContents(File file,
       InputStream ins, long fileOffset, long dataLength)
           throws IOException {
 
@@ -142,9 +141,9 @@ public class TestProvidedReplicaImpl {
   public void testProvidedReplicaRead() throws IOException {
 
     File providedFile = new File(BASE_DIR, FILE_NAME);
-    for(int i=0; i < replicas.size(); i++) {
+    for (int i = 0; i < replicas.size(); i++) {
       ProvidedReplica replica = replicas.get(i);
-      //block data should exist!
+      // block data should exist!
       assertTrue(replica.blockDataExists());
       assertEquals(providedFile.toURI(), replica.getBlockURI());
       verifyReplicaContents(providedFile, replica.getDataInputStream(0),
@@ -153,7 +152,7 @@ public class TestProvidedReplicaImpl {
     LOG.info("All replica contents verified");
 
     providedFile.delete();
-    //the block data should no longer be found!
+    // the block data should no longer be found!
     for(int i=0; i < replicas.size(); i++) {
       ProvidedReplica replica = replicas.get(i);
       assertTrue(!replica.blockDataExists());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
index e057022..422acc3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
@@ -26,8 +26,6 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
@@ -35,9 +33,6 @@ import java.io.OutputStreamWriter;
 import java.io.Writer;
 import java.net.URI;
 import java.net.URISyntaxException;
-import java.nio.ByteBuffer;
-import java.nio.channels.Channels;
-import java.nio.channels.ReadableByteChannel;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -76,6 +71,7 @@ import org.apache.hadoop.hdfs.server.datanode.ProvidedReplica;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdfs.server.datanode.TestProvidedReplicaImpl;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator;
@@ -97,7 +93,7 @@ public class TestProvidedImpl {
   private static final String BASE_DIR =
       new FileSystemTestHelper().getTestRootDir();
   private static final int NUM_LOCAL_INIT_VOLUMES = 1;
-  //only support one provided volume for now.
+  // only support one provided volume for now.
   private static final int NUM_PROVIDED_INIT_VOLUMES = 1;
   private static final String[] BLOCK_POOL_IDS = {"bpid-0", "bpid-1"};
   private static final int NUM_PROVIDED_BLKS = 10;
@@ -168,7 +164,7 @@ public class TestProvidedImpl {
 
     @Override
     public void remove() {
-      //do nothing.
+      // do nothing.
     }
 
     public void resetMinBlockId(int minId) {
@@ -314,33 +310,6 @@ public class TestProvidedImpl {
     }
   }
 
-  private void compareBlkFile(InputStream ins, String filepath)
-      throws FileNotFoundException, IOException {
-    try (ReadableByteChannel i = Channels.newChannel(
-        new FileInputStream(new File(filepath)))) {
-      try (ReadableByteChannel j = Channels.newChannel(ins)) {
-        ByteBuffer ib = ByteBuffer.allocate(4096);
-        ByteBuffer jb = ByteBuffer.allocate(4096);
-        while (true) {
-          int il = i.read(ib);
-          int jl = j.read(jb);
-          if (il < 0 || jl < 0) {
-            assertEquals(il, jl);
-            break;
-          }
-          ib.flip();
-          jb.flip();
-          int cmp = Math.min(ib.remaining(), jb.remaining());
-          for (int k = 0; k < cmp; ++k) {
-            assertEquals(ib.get(), jb.get());
-          }
-          ib.compact();
-          jb.compact();
-        }
-      }
-    }
-  }
-
   @Before
   public void setUp() throws IOException {
     datanode = mock(DataNode.class);
@@ -392,7 +361,7 @@ public class TestProvidedImpl {
     assertEquals(0, dataset.getNumFailedVolumes());
 
     for (int i = 0; i < providedVolumes.size(); i++) {
-      //check basic information about provided volume
+      // check basic information about provided volume
       assertEquals(DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT,
           providedVolumes.get(i).getStorageID());
       assertEquals(StorageType.PROVIDED,
@@ -400,7 +369,7 @@ public class TestProvidedImpl {
 
       long space = providedVolumes.get(i).getBlockPoolUsed(
               BLOCK_POOL_IDS[CHOSEN_BP_ID]);
-      //check the df stats of the volume
+      // check the df stats of the volume
       assertEquals(spaceUsed, space);
       assertEquals(NUM_PROVIDED_BLKS, providedVolumes.get(i).getNumBlocks());
 
@@ -409,7 +378,7 @@ public class TestProvidedImpl {
       try {
         assertEquals(0, providedVolumes.get(i)
             .getBlockPoolUsed(BLOCK_POOL_IDS[1 - CHOSEN_BP_ID]));
-        //should not be triggered
+        // should not be triggered
         assertTrue(false);
       } catch (IOException e) {
         LOG.info("Expected exception: " + e);
@@ -428,7 +397,7 @@ public class TestProvidedImpl {
       assertEquals(vol.getBlockPoolList().length, BLOCK_POOL_IDS.length);
       for (int j = 0; j < BLOCK_POOL_IDS.length; j++) {
         if (j != CHOSEN_BP_ID) {
-          //this block pool should not have any blocks
+          // this block pool should not have any blocks
           assertEquals(null, volumeMap.replicas(BLOCK_POOL_IDS[j]));
         }
       }
@@ -445,7 +414,8 @@ public class TestProvidedImpl {
           HdfsConstants.GRANDFATHER_GENERATION_STAMP);
       InputStream ins = dataset.getBlockInputStream(eb, 0);
       String filepath = blkToPathMap.get((long) id);
-      compareBlkFile(ins, filepath);
+      TestProvidedReplicaImpl.verifyReplicaContents(new File(filepath), ins, 0,
+          BLK_LEN);
     }
   }
 
@@ -462,7 +432,7 @@ public class TestProvidedImpl {
         ExtendedBlock eb = iter.nextBlock();
         long blkId = eb.getBlockId();
         assertTrue(blkId >= MIN_BLK_ID && blkId < NUM_PROVIDED_BLKS);
-        //all block ids must be unique!
+        // all block ids must be unique!
         assertTrue(!blockIdsUsed.contains(blkId));
         blockIdsUsed.add(blkId);
       }
@@ -473,14 +443,14 @@ public class TestProvidedImpl {
       while(!iter.atEnd()) {
         ExtendedBlock eb = iter.nextBlock();
         long blkId = eb.getBlockId();
-        //the block should have already appeared in the first scan.
+        // the block should have already appeared in the first scan.
         assertTrue(blockIdsUsed.contains(blkId));
         blockIdsUsed.remove(blkId);
       }
-      //none of the blocks should remain in blockIdsUsed
+      // none of the blocks should remain in blockIdsUsed
       assertEquals(0, blockIdsUsed.size());
 
-      //the other block pool should not contain any blocks!
+      // the other block pool should not contain any blocks!
       BlockIterator nonProvidedBpIter =
           vol.newBlockIterator(BLOCK_POOL_IDS[1 - CHOSEN_BP_ID], "temp");
       assertEquals(null, nonProvidedBpIter.nextBlock());
@@ -513,8 +483,8 @@ public class TestProvidedImpl {
   public void testProvidedVolumeContents() throws IOException {
     int expectedBlocks = 5;
     int minId = 0;
-    //use a path which has the same prefix as providedBasePath
-    //all these blocks can belong to the provided volume
+    // use a path which has the same prefix as providedBasePath
+    // all these blocks can belong to the provided volume
     int blocksFound = getBlocksInProvidedVolumes(providedBasePath + "/test1/",
         expectedBlocks, minId);
     assertEquals(
@@ -525,8 +495,8 @@ public class TestProvidedImpl {
     assertEquals(
         "Number of blocks in provided volumes should be " + expectedBlocks,
         expectedBlocks, blocksFound);
-    //use a path that is entirely different from the providedBasePath
-    //none of these blocks can belong to the volume
+    // use a path that is entirely different from the providedBasePath
+    // none of these blocks can belong to the volume
     blocksFound =
         getBlocksInProvidedVolumes("randomtest1/", expectedBlocks, minId);
     assertEquals("Number of blocks in provided volumes should be 0", 0,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-tools/hadoop-fs2img/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/pom.xml b/hadoop-tools/hadoop-fs2img/pom.xml
index 8661c82..2e3e66a 100644
--- a/hadoop-tools/hadoop-fs2img/pom.xml
+++ b/hadoop-tools/hadoop-fs2img/pom.xml
@@ -23,8 +23,8 @@
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-fs2img</artifactId>
   <version>3.1.0-SNAPSHOT</version>
-  <description>fs2img</description>
-  <name>fs2img</name>
+  <description>Apache Hadoop Image Generation Tool</description>
+  <name>Apache Hadoop Image Generation Tool</name>
   <packaging>jar</packaging>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileSystemImage.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileSystemImage.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileSystemImage.java
index 80bbaf9..afe10ff 100644
--- a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileSystemImage.java
+++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileSystemImage.java
@@ -125,7 +125,8 @@ public class FileSystemImage implements Tool {
         opts.blockPoolID(o.getValue());
         break;
       default:
-        throw new UnsupportedOperationException("Internal error");
+        throw new UnsupportedOperationException(
+            "Unknown option: " + o.getOpt());
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
index c21c282..1be5190 100644
--- a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
+++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
@@ -236,7 +236,7 @@ public class ImageWriter implements Closeable {
       if (null == e) {
         return super.put(p, b);
       }
-      //merge
+      // merge
       e.addAllChildren(b.getChildrenList());
       // not strictly conforming
       return e;
@@ -265,7 +265,6 @@ public class ImageWriter implements Closeable {
     e.writeDelimitedTo(dirs);
   }
 
-  // from FSImageFormatProtobuf... why not just read position from the stream?
   private static int getOndiskSize(com.google.protobuf.GeneratedMessage s) {
     return CodedOutputStream.computeRawVarint32Size(s.getSerializedSize())
         + s.getSerializedSize();
@@ -283,7 +282,7 @@ public class ImageWriter implements Closeable {
     dircache.clear();
 
     // close side files
-    IOUtils.cleanup(null, dirs, inodes, blocks);
+    IOUtils.cleanupWithLogger(null, dirs, inodes, blocks);
     if (null == dirs || null == inodes) {
       // init failed
       if (raw != null) {
@@ -317,7 +316,6 @@ public class ImageWriter implements Closeable {
    */
   void writeMD5(String imagename) throws IOException {
     if (null == outdir) {
-      //LOG.warn("Not writing MD5");
       return;
     }
     MD5Hash md5 = new MD5Hash(digest.digest());
@@ -382,7 +380,6 @@ public class ImageWriter implements Closeable {
 
   void writeDirSection() throws IOException {
     // No header, so dirs can be written/compressed independently
-    //INodeDirectorySection.Builder b = INodeDirectorySection.newBuilder();
     OutputStream sec = raw;
     // copy dirs
     try (FileInputStream in = new FileInputStream(dirsTmp)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/SingleUGIResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/SingleUGIResolver.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/SingleUGIResolver.java
index d60806f..9c42c11 100644
--- a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/SingleUGIResolver.java
+++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/SingleUGIResolver.java
@@ -84,11 +84,11 @@ public class SingleUGIResolver extends UGIResolver implements Configurable {
 
   @Override
   public void addUser(String name) {
-    //do nothing
+    // do nothing
   }
 
   @Override
   public void addGroup(String name) {
-    //do nothing
+    // do nothing
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/TreePath.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/TreePath.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/TreePath.java
index dde351f..fd4dbff 100644
--- a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/TreePath.java
+++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/TreePath.java
@@ -121,7 +121,6 @@ public class TreePath {
   INode toFile(UGIResolver ugi, BlockResolver blk,
       BlockAliasMap.Writer<FileRegion> out) throws IOException {
     final FileStatus s = getFileStatus();
-    // TODO should this store resolver's user/group?
     ugi.addUser(s.getOwner());
     ugi.addGroup(s.getGroup());
     INodeFile.Builder b = INodeFile.newBuilder()
@@ -142,7 +141,7 @@ public class TreePath {
             "Exact path handle not supported by filesystem " + fs.toString());
       }
     }
-    //TODO: storage policy should be configurable per path; use BlockResolver
+    // TODO: storage policy should be configurable per path; use BlockResolver
     long off = 0L;
     for (BlockProto block : blk.resolve(s)) {
       b.addBlocks(block);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java
new file mode 100644
index 0000000..49c9bcf
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java
@@ -0,0 +1,927 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.nio.channels.Channels;
+import java.nio.channels.ReadableByteChannel;
+import java.nio.file.Files;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Random;
+import java.util.Set;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap;
+import org.apache.hadoop.hdfs.server.aliasmap.InMemoryLevelDBAliasMapServer;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.ProvidedStorageMap;
+import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
+import org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.InMemoryLevelDBAliasMapClient;
+import org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
+
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.net.NodeBase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap.fileNameFromBlockPoolID;
+import static org.apache.hadoop.net.NodeBase.PATH_SEPARATOR_STR;
+import static org.junit.Assert.*;
+
+/**
+ * Integration tests for the Provided implementation.
+ */
+public class ITestProvidedImplementation {
+
+  @Rule public TestName name = new TestName();
+  public static final Logger LOG =
+      LoggerFactory.getLogger(ITestProvidedImplementation.class);
+
+  private final Random r = new Random();
+  private final File fBASE = new File(MiniDFSCluster.getBaseDirectory());
+  private final Path pBASE = new Path(fBASE.toURI().toString());
+  private final Path providedPath = new Path(pBASE, "providedDir");
+  private final Path nnDirPath = new Path(pBASE, "nnDir");
+  private final String singleUser = "usr1";
+  private final String singleGroup = "grp1";
+  private final int numFiles = 10;
+  private final String filePrefix = "file";
+  private final String fileSuffix = ".dat";
+  private final int baseFileLen = 1024;
+  private long providedDataSize = 0;
+  private final String bpid = "BP-1234-10.1.1.1-1224";
+
+  private Configuration conf;
+  private MiniDFSCluster cluster;
+
+  @Before
+  public void setSeed() throws Exception {
+    if (fBASE.exists() && !FileUtil.fullyDelete(fBASE)) {
+      throw new IOException("Could not fully delete " + fBASE);
+    }
+    long seed = r.nextLong();
+    r.setSeed(seed);
+    System.out.println(name.getMethodName() + " seed: " + seed);
+    conf = new HdfsConfiguration();
+    conf.set(SingleUGIResolver.USER, singleUser);
+    conf.set(SingleUGIResolver.GROUP, singleGroup);
+
+    conf.set(DFSConfigKeys.DFS_PROVIDER_STORAGEUUID,
+        DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_PROVIDED_ENABLED, true);
+
+    conf.setClass(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_CLASS,
+        TextFileRegionAliasMap.class, BlockAliasMap.class);
+    conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_TEXT_WRITE_DIR,
+        nnDirPath.toString());
+    conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_TEXT_READ_FILE,
+        new Path(nnDirPath, fileNameFromBlockPoolID(bpid)).toString());
+    conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_TEXT_DELIMITER, ",");
+
+    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR_PROVIDED,
+        new File(providedPath.toUri()).toString());
+    File imageDir = new File(providedPath.toUri());
+    if (!imageDir.exists()) {
+      LOG.info("Creating directory: " + imageDir);
+      imageDir.mkdirs();
+    }
+
+    File nnDir = new File(nnDirPath.toUri());
+    if (!nnDir.exists()) {
+      nnDir.mkdirs();
+    }
+
+    // create 10 random files under pBASE
+    for (int i=0; i < numFiles; i++) {
+      File newFile = new File(
+          new Path(providedPath, filePrefix + i + fileSuffix).toUri());
+      if(!newFile.exists()) {
+        try {
+          LOG.info("Creating " + newFile.toString());
+          newFile.createNewFile();
+          Writer writer = new OutputStreamWriter(
+              new FileOutputStream(newFile.getAbsolutePath()), "utf-8");
+          for(int j=0; j < baseFileLen*i; j++) {
+            writer.write("0");
+          }
+          writer.flush();
+          writer.close();
+          providedDataSize += newFile.length();
+        } catch (IOException e) {
+          e.printStackTrace();
+        }
+      }
+    }
+  }
+
+  @After
+  public void shutdown() throws Exception {
+    try {
+      if (cluster != null) {
+        cluster.shutdown(true, true);
+      }
+    } finally {
+      cluster = null;
+    }
+  }
+
+  void createImage(TreeWalk t, Path out,
+      Class<? extends BlockResolver> blockIdsClass) throws Exception {
+    createImage(t, out, blockIdsClass, "", TextFileRegionAliasMap.class);
+  }
+
+  void createImage(TreeWalk t, Path out,
+      Class<? extends BlockResolver> blockIdsClass, String clusterID,
+      Class<? extends BlockAliasMap> aliasMapClass) throws Exception {
+    ImageWriter.Options opts = ImageWriter.defaults();
+    opts.setConf(conf);
+    opts.output(out.toString())
+        .blocks(aliasMapClass)
+        .blockIds(blockIdsClass)
+        .clusterID(clusterID)
+        .blockPoolID(bpid);
+    try (ImageWriter w = new ImageWriter(opts)) {
+      for (TreePath e : t) {
+        w.accept(e);
+      }
+    }
+  }
+  void startCluster(Path nspath, int numDatanodes,
+      StorageType[] storageTypes,
+      StorageType[][] storageTypesPerDatanode,
+      boolean doFormat) throws IOException {
+    startCluster(nspath, numDatanodes, storageTypes, storageTypesPerDatanode,
+        doFormat, null);
+  }
+
+  void startCluster(Path nspath, int numDatanodes,
+      StorageType[] storageTypes,
+      StorageType[][] storageTypesPerDatanode,
+      boolean doFormat, String[] racks) throws IOException {
+    conf.set(DFS_NAMENODE_NAME_DIR_KEY, nspath.toString());
+
+    if (storageTypesPerDatanode != null) {
+      cluster = new MiniDFSCluster.Builder(conf)
+          .format(doFormat)
+          .manageNameDfsDirs(doFormat)
+          .numDataNodes(numDatanodes)
+          .storageTypes(storageTypesPerDatanode)
+          .racks(racks)
+          .build();
+    } else if (storageTypes != null) {
+      cluster = new MiniDFSCluster.Builder(conf)
+          .format(doFormat)
+          .manageNameDfsDirs(doFormat)
+          .numDataNodes(numDatanodes)
+          .storagesPerDatanode(storageTypes.length)
+          .storageTypes(storageTypes)
+          .racks(racks)
+          .build();
+    } else {
+      cluster = new MiniDFSCluster.Builder(conf)
+          .format(doFormat)
+          .manageNameDfsDirs(doFormat)
+          .numDataNodes(numDatanodes)
+          .racks(racks)
+          .build();
+    }
+    cluster.waitActive();
+  }
+
+  @Test(timeout=20000)
+  public void testLoadImage() throws Exception {
+    final long seed = r.nextLong();
+    LOG.info("providedPath: " + providedPath);
+    createImage(new RandomTreeWalk(seed), nnDirPath, FixedBlockResolver.class);
+    startCluster(nnDirPath, 0,
+        new StorageType[] {StorageType.PROVIDED, StorageType.DISK}, null,
+        false);
+
+    FileSystem fs = cluster.getFileSystem();
+    for (TreePath e : new RandomTreeWalk(seed)) {
+      FileStatus rs = e.getFileStatus();
+      Path hp = new Path(rs.getPath().toUri().getPath());
+      assertTrue(fs.exists(hp));
+      FileStatus hs = fs.getFileStatus(hp);
+      assertEquals(rs.getPath().toUri().getPath(),
+                   hs.getPath().toUri().getPath());
+      assertEquals(rs.getPermission(), hs.getPermission());
+      assertEquals(rs.getLen(), hs.getLen());
+      assertEquals(singleUser, hs.getOwner());
+      assertEquals(singleGroup, hs.getGroup());
+      assertEquals(rs.getAccessTime(), hs.getAccessTime());
+      assertEquals(rs.getModificationTime(), hs.getModificationTime());
+    }
+  }
+
+  @Test(timeout=30000)
+  public void testProvidedReporting() throws Exception {
+    conf.setClass(ImageWriter.Options.UGI_CLASS,
+        SingleUGIResolver.class, UGIResolver.class);
+    createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
+        FixedBlockResolver.class);
+    int numDatanodes = 10;
+    startCluster(nnDirPath, numDatanodes,
+        new StorageType[] {StorageType.PROVIDED, StorageType.DISK}, null,
+        false);
+    long diskCapacity = 1000;
+    // set the DISK capacity for testing
+    for (DataNode dn: cluster.getDataNodes()) {
+      for (FsVolumeSpi ref : dn.getFSDataset().getFsVolumeReferences()) {
+        if (ref.getStorageType() == StorageType.DISK) {
+          ((FsVolumeImpl) ref).setCapacityForTesting(diskCapacity);
+        }
+      }
+    }
+    // trigger heartbeats to update the capacities
+    cluster.triggerHeartbeats();
+    Thread.sleep(10000);
+    // verify namenode stats
+    FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
+    DatanodeStatistics dnStats = namesystem.getBlockManager()
+        .getDatanodeManager().getDatanodeStatistics();
+
+    // total capacity reported includes only the local volumes and
+    // not the provided capacity
+    assertEquals(diskCapacity * numDatanodes, namesystem.getTotal());
+
+    // total storage used should be equal to the totalProvidedStorage
+    // no capacity should be remaining!
+    assertEquals(providedDataSize, dnStats.getProvidedCapacity());
+    assertEquals(providedDataSize, namesystem.getProvidedCapacityTotal());
+    assertEquals(providedDataSize, dnStats.getStorageTypeStats()
+        .get(StorageType.PROVIDED).getCapacityTotal());
+    assertEquals(providedDataSize, dnStats.getStorageTypeStats()
+        .get(StorageType.PROVIDED).getCapacityUsed());
+
+    // verify datanode stats
+    for (DataNode dn: cluster.getDataNodes()) {
+      for (StorageReport report : dn.getFSDataset()
+          .getStorageReports(namesystem.getBlockPoolId())) {
+        if (report.getStorage().getStorageType() == StorageType.PROVIDED) {
+          assertEquals(providedDataSize, report.getCapacity());
+          assertEquals(providedDataSize, report.getDfsUsed());
+          assertEquals(providedDataSize, report.getBlockPoolUsed());
+          assertEquals(0, report.getNonDfsUsed());
+          assertEquals(0, report.getRemaining());
+        }
+      }
+    }
+
+    DFSClient client = new DFSClient(new InetSocketAddress("localhost",
+            cluster.getNameNodePort()), cluster.getConfiguration(0));
+    BlockManager bm = namesystem.getBlockManager();
+    for (int fileId = 0; fileId < numFiles; fileId++) {
+      String filename = "/" + filePrefix + fileId + fileSuffix;
+      LocatedBlocks locatedBlocks = client.getLocatedBlocks(
+          filename, 0, baseFileLen);
+      for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
+        BlockInfo blockInfo =
+            bm.getStoredBlock(locatedBlock.getBlock().getLocalBlock());
+        Iterator<DatanodeStorageInfo> storagesItr = blockInfo.getStorageInfos();
+
+        DatanodeStorageInfo info = storagesItr.next();
+        assertEquals(StorageType.PROVIDED, info.getStorageType());
+        DatanodeDescriptor dnDesc = info.getDatanodeDescriptor();
+        // check the locations that are returned by FSCK have the right name
+        assertEquals(ProvidedStorageMap.ProvidedDescriptor.NETWORK_LOCATION
+            + PATH_SEPARATOR_STR + ProvidedStorageMap.ProvidedDescriptor.NAME,
+            NodeBase.getPath(dnDesc));
+        // no DatanodeStorageInfos should remain
+        assertFalse(storagesItr.hasNext());
+      }
+    }
+  }
+
+  @Test(timeout=500000)
+  public void testDefaultReplication() throws Exception {
+    int targetReplication = 2;
+    conf.setInt(FixedBlockMultiReplicaResolver.REPLICATION, targetReplication);
+    createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
+        FixedBlockMultiReplicaResolver.class);
+    // make the last Datanode with only DISK
+    startCluster(nnDirPath, 3, null,
+        new StorageType[][] {
+            {StorageType.PROVIDED, StorageType.DISK},
+            {StorageType.PROVIDED, StorageType.DISK},
+            {StorageType.DISK}},
+        false);
+    // wait for the replication to finish
+    Thread.sleep(50000);
+
+    FileSystem fs = cluster.getFileSystem();
+    int count = 0;
+    for (TreePath e : new FSTreeWalk(providedPath, conf)) {
+      FileStatus rs = e.getFileStatus();
+      Path hp = removePrefix(providedPath, rs.getPath());
+      LOG.info("path: " + hp.toUri().getPath());
+      e.accept(count++);
+      assertTrue(fs.exists(hp));
+      FileStatus hs = fs.getFileStatus(hp);
+
+      if (rs.isFile()) {
+        BlockLocation[] bl = fs.getFileBlockLocations(
+            hs.getPath(), 0, hs.getLen());
+        int i = 0;
+        for(; i < bl.length; i++) {
+          int currentRep = bl[i].getHosts().length;
+          assertEquals(targetReplication, currentRep);
+        }
+      }
+    }
+  }
+
+
+  static Path removePrefix(Path base, Path walk) {
+    Path wpath = new Path(walk.toUri().getPath());
+    Path bpath = new Path(base.toUri().getPath());
+    Path ret = new Path("/");
+    while (!(bpath.equals(wpath) || "".equals(wpath.getName()))) {
+      ret = "".equals(ret.getName())
+        ? new Path("/", wpath.getName())
+        : new Path(new Path("/", wpath.getName()),
+                   new Path(ret.toString().substring(1)));
+      wpath = wpath.getParent();
+    }
+    if (!bpath.equals(wpath)) {
+      throw new IllegalArgumentException(base + " not a prefix of " + walk);
+    }
+    return ret;
+  }
+
+  private void verifyFileSystemContents() throws Exception {
+    FileSystem fs = cluster.getFileSystem();
+    int count = 0;
+    // read NN metadata, verify contents match
+    for (TreePath e : new FSTreeWalk(providedPath, conf)) {
+      FileStatus rs = e.getFileStatus();
+      Path hp = removePrefix(providedPath, rs.getPath());
+      LOG.info("path: " + hp.toUri().getPath());
+      e.accept(count++);
+      assertTrue(fs.exists(hp));
+      FileStatus hs = fs.getFileStatus(hp);
+      assertEquals(hp.toUri().getPath(), hs.getPath().toUri().getPath());
+      assertEquals(rs.getPermission(), hs.getPermission());
+      assertEquals(rs.getOwner(), hs.getOwner());
+      assertEquals(rs.getGroup(), hs.getGroup());
+
+      if (rs.isFile()) {
+        assertEquals(rs.getLen(), hs.getLen());
+        try (ReadableByteChannel i = Channels.newChannel(
+              new FileInputStream(new File(rs.getPath().toUri())))) {
+          try (ReadableByteChannel j = Channels.newChannel(
+                fs.open(hs.getPath()))) {
+            ByteBuffer ib = ByteBuffer.allocate(4096);
+            ByteBuffer jb = ByteBuffer.allocate(4096);
+            while (true) {
+              int il = i.read(ib);
+              int jl = j.read(jb);
+              if (il < 0 || jl < 0) {
+                assertEquals(il, jl);
+                break;
+              }
+              ib.flip();
+              jb.flip();
+              int cmp = Math.min(ib.remaining(), jb.remaining());
+              for (int k = 0; k < cmp; ++k) {
+                assertEquals(ib.get(), jb.get());
+              }
+              ib.compact();
+              jb.compact();
+            }
+
+          }
+        }
+      }
+    }
+  }
+
+  private BlockLocation[] createFile(Path path, short replication,
+      long fileLen, long blockLen) throws IOException {
+    FileSystem fs = cluster.getFileSystem();
+    // create a file that is not provided
+    DFSTestUtil.createFile(fs, path, false, (int) blockLen,
+        fileLen, blockLen, replication, 0, true);
+    return fs.getFileBlockLocations(path, 0, fileLen);
+  }
+
+  @Test(timeout=30000)
+  public void testClusterWithEmptyImage() throws IOException {
+    // start a cluster with 2 datanodes without any provided storage
+    startCluster(nnDirPath, 2, null,
+        new StorageType[][] {
+            {StorageType.DISK},
+            {StorageType.DISK}},
+        true);
+    assertTrue(cluster.isClusterUp());
+    assertTrue(cluster.isDataNodeUp());
+
+    BlockLocation[] locations = createFile(new Path("/testFile1.dat"),
+        (short) 2, 1024*1024, 1024*1024);
+    assertEquals(1, locations.length);
+    assertEquals(2, locations[0].getHosts().length);
+  }
+
+  private DatanodeInfo[] getAndCheckBlockLocations(DFSClient client,
+      String filename, long fileLen, long expectedBlocks, int expectedLocations)
+      throws IOException {
+    LocatedBlocks locatedBlocks = client.getLocatedBlocks(filename, 0, fileLen);
+    // given the start and length in the above call,
+    // only one LocatedBlock in LocatedBlocks
+    assertEquals(expectedBlocks, locatedBlocks.getLocatedBlocks().size());
+    DatanodeInfo[] locations =
+        locatedBlocks.getLocatedBlocks().get(0).getLocations();
+    assertEquals(expectedLocations, locations.length);
+    checkUniqueness(locations);
+    return locations;
+  }
+
+  /**
+   * verify that the given locations are all unique.
+   * @param locations
+   */
+  private void checkUniqueness(DatanodeInfo[] locations) {
+    Set<String> set = new HashSet<>();
+    for (DatanodeInfo info: locations) {
+      assertFalse("All locations should be unique",
+          set.contains(info.getDatanodeUuid()));
+      set.add(info.getDatanodeUuid());
+    }
+  }
+
+  /**
+   * Tests setting replication of provided files.
+   * @throws Exception
+   */
+  @Test(timeout=50000)
+  public void testSetReplicationForProvidedFiles() throws Exception {
+    createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
+        FixedBlockResolver.class);
+    // 10 Datanodes with both DISK and PROVIDED storage
+    startCluster(nnDirPath, 10,
+        new StorageType[]{
+            StorageType.PROVIDED, StorageType.DISK},
+        null,
+        false);
+    setAndUnsetReplication("/" + filePrefix + (numFiles - 1) + fileSuffix);
+  }
+
+  private void setAndUnsetReplication(String filename) throws Exception {
+    Path file = new Path(filename);
+    FileSystem fs = cluster.getFileSystem();
+    // set the replication to 4, and test that the file has
+    // the required replication.
+    short newReplication = 4;
+    LOG.info("Setting replication of file {} to {}", filename, newReplication);
+    fs.setReplication(file, newReplication);
+    DFSTestUtil.waitForReplication((DistributedFileSystem) fs,
+        file, newReplication, 10000);
+    DFSClient client = new DFSClient(new InetSocketAddress("localhost",
+        cluster.getNameNodePort()), cluster.getConfiguration(0));
+    getAndCheckBlockLocations(client, filename, baseFileLen, 1, newReplication);
+
+    // set the replication back to 1
+    newReplication = 1;
+    LOG.info("Setting replication of file {} back to {}",
+        filename, newReplication);
+    fs.setReplication(file, newReplication);
+    // defaultReplication number of replicas should be returned
+    int defaultReplication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
+        DFSConfigKeys.DFS_REPLICATION_DEFAULT);
+    DFSTestUtil.waitForReplication((DistributedFileSystem) fs,
+        file, (short) defaultReplication, 10000);
+    getAndCheckBlockLocations(client, filename, baseFileLen, 1,
+        defaultReplication);
+  }
+
+  @Test(timeout=30000)
+  public void testProvidedDatanodeFailures() throws Exception {
+    createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
+            FixedBlockResolver.class);
+    startCluster(nnDirPath, 3, null,
+        new StorageType[][] {
+            {StorageType.PROVIDED, StorageType.DISK},
+            {StorageType.PROVIDED, StorageType.DISK},
+            {StorageType.DISK}},
+        false);
+
+    DataNode providedDatanode1 = cluster.getDataNodes().get(0);
+    DataNode providedDatanode2 = cluster.getDataNodes().get(1);
+
+    DFSClient client = new DFSClient(new InetSocketAddress("localhost",
+        cluster.getNameNodePort()), cluster.getConfiguration(0));
+
+    DatanodeStorageInfo providedDNInfo = getProvidedDatanodeStorageInfo();
+
+    if (numFiles >= 1) {
+      String filename = "/" + filePrefix + (numFiles - 1) + fileSuffix;
+      // 2 locations returned as there are 2 PROVIDED datanodes
+      DatanodeInfo[] dnInfos =
+          getAndCheckBlockLocations(client, filename, baseFileLen, 1, 2);
+      // the location should be one of the provided DNs available
+      assertTrue(
+          dnInfos[0].getDatanodeUuid().equals(
+              providedDatanode1.getDatanodeUuid())
+          || dnInfos[0].getDatanodeUuid().equals(
+              providedDatanode2.getDatanodeUuid()));
+
+      // stop the 1st provided datanode
+      MiniDFSCluster.DataNodeProperties providedDNProperties1 =
+          cluster.stopDataNode(0);
+
+      // make NameNode detect that datanode is down
+      BlockManagerTestUtil.noticeDeadDatanode(
+          cluster.getNameNode(),
+          providedDatanode1.getDatanodeId().getXferAddr());
+
+      // should find the block on the 2nd provided datanode
+      dnInfos = getAndCheckBlockLocations(client, filename, baseFileLen, 1, 1);
+      assertEquals(providedDatanode2.getDatanodeUuid(),
+          dnInfos[0].getDatanodeUuid());
+
+      // stop the 2nd provided datanode
+      MiniDFSCluster.DataNodeProperties providedDNProperties2 =
+          cluster.stopDataNode(0);
+      // make NameNode detect that datanode is down
+      BlockManagerTestUtil.noticeDeadDatanode(
+          cluster.getNameNode(),
+          providedDatanode2.getDatanodeId().getXferAddr());
+      getAndCheckBlockLocations(client, filename, baseFileLen, 1, 0);
+
+      // BR count for the provided ProvidedDatanodeStorageInfo should reset to
+      // 0, when all DNs with PROVIDED storage fail.
+      assertEquals(0, providedDNInfo.getBlockReportCount());
+      // restart the provided datanode
+      cluster.restartDataNode(providedDNProperties1, true);
+      cluster.waitActive();
+
+      assertEquals(1, providedDNInfo.getBlockReportCount());
+
+      // should find the block on the 1st provided datanode now
+      dnInfos = getAndCheckBlockLocations(client, filename, baseFileLen, 1, 1);
+      // not comparing UUIDs as the datanode can now have a different one.
+      assertEquals(providedDatanode1.getDatanodeId().getXferAddr(),
+          dnInfos[0].getXferAddr());
+    }
+  }
+
+  @Test(timeout=300000)
+  public void testTransientDeadDatanodes() throws Exception {
+    createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
+            FixedBlockResolver.class);
+    // 3 Datanodes, 2 PROVIDED and other DISK
+    startCluster(nnDirPath, 3, null,
+        new StorageType[][] {
+            {StorageType.PROVIDED, StorageType.DISK},
+            {StorageType.PROVIDED, StorageType.DISK},
+            {StorageType.DISK}},
+        false);
+
+    DataNode providedDatanode = cluster.getDataNodes().get(0);
+    DatanodeStorageInfo providedDNInfo = getProvidedDatanodeStorageInfo();
+    int initialBRCount = providedDNInfo.getBlockReportCount();
+    for (int i= 0; i < numFiles; i++) {
+      // expect to have 2 locations as we have 2 provided Datanodes.
+      verifyFileLocation(i, 2);
+      // NameNode thinks the datanode is down
+      BlockManagerTestUtil.noticeDeadDatanode(
+          cluster.getNameNode(),
+          providedDatanode.getDatanodeId().getXferAddr());
+      cluster.waitActive();
+      cluster.triggerHeartbeats();
+      Thread.sleep(1000);
+      // the report count should just continue to increase.
+      assertEquals(initialBRCount + i + 1,
+          providedDNInfo.getBlockReportCount());
+      verifyFileLocation(i, 2);
+    }
+  }
+
+  private DatanodeStorageInfo getProvidedDatanodeStorageInfo() {
+    ProvidedStorageMap providedStorageMap =
+        cluster.getNamesystem().getBlockManager().getProvidedStorageMap();
+    return providedStorageMap.getProvidedStorageInfo();
+  }
+
+  @Test(timeout=30000)
+  public void testNamenodeRestart() throws Exception {
+    createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
+        FixedBlockResolver.class);
+    // 3 Datanodes, 2 PROVIDED and other DISK
+    startCluster(nnDirPath, 3, null,
+        new StorageType[][] {
+            {StorageType.PROVIDED, StorageType.DISK},
+            {StorageType.PROVIDED, StorageType.DISK},
+            {StorageType.DISK}},
+        false);
+
+    verifyFileLocation(numFiles - 1, 2);
+    cluster.restartNameNodes();
+    cluster.waitActive();
+    verifyFileLocation(numFiles - 1, 2);
+  }
+
+  /**
+   * verify that the specified file has a valid provided location.
+   * @param fileIndex the index of the file to verify.
+   * @throws Exception
+   */
+  private void verifyFileLocation(int fileIndex, int replication)
+      throws Exception {
+    DFSClient client = new DFSClient(
+        new InetSocketAddress("localhost", cluster.getNameNodePort()),
+        cluster.getConfiguration(0));
+    if (fileIndex < numFiles && fileIndex >= 0) {
+      String filename = filePrefix + fileIndex + fileSuffix;
+      File file = new File(new Path(providedPath, filename).toUri());
+      long fileLen = file.length();
+      long blockSize = conf.getLong(FixedBlockResolver.BLOCKSIZE,
+          FixedBlockResolver.BLOCKSIZE_DEFAULT);
+      long numLocatedBlocks =
+          fileLen == 0 ? 1 : (long) Math.ceil(fileLen * 1.0 / blockSize);
+      getAndCheckBlockLocations(client, "/" + filename, fileLen,
+          numLocatedBlocks, replication);
+    }
+  }
+
+  @Test(timeout=30000)
+  public void testSetClusterID() throws Exception {
+    String clusterID = "PROVIDED-CLUSTER";
+    createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
+        FixedBlockResolver.class, clusterID, TextFileRegionAliasMap.class);
+    // 2 Datanodes, 1 PROVIDED and other DISK
+    startCluster(nnDirPath, 2, null,
+        new StorageType[][] {
+            {StorageType.PROVIDED, StorageType.DISK},
+            {StorageType.DISK}},
+        false);
+    NameNode nn = cluster.getNameNode();
+    assertEquals(clusterID, nn.getNamesystem().getClusterId());
+  }
+
+  @Test(timeout=30000)
+  public void testNumberOfProvidedLocations() throws Exception {
+    // set default replication to 4
+    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 4);
+    createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
+        FixedBlockResolver.class);
+    // start with 4 PROVIDED location
+    startCluster(nnDirPath, 4,
+        new StorageType[]{
+            StorageType.PROVIDED, StorageType.DISK},
+        null,
+        false);
+    int expectedLocations = 4;
+    for (int i = 0; i < numFiles; i++) {
+      verifyFileLocation(i, expectedLocations);
+    }
+    // stop 2 datanodes, one after the other and verify number of locations.
+    for (int i = 1; i <= 2; i++) {
+      DataNode dn = cluster.getDataNodes().get(0);
+      cluster.stopDataNode(0);
+      // make NameNode detect that datanode is down
+      BlockManagerTestUtil.noticeDeadDatanode(cluster.getNameNode(),
+          dn.getDatanodeId().getXferAddr());
+
+      expectedLocations = 4 - i;
+      for (int j = 0; j < numFiles; j++) {
+        verifyFileLocation(j, expectedLocations);
+      }
+    }
+  }
+
+  @Test(timeout=30000)
+  public void testNumberOfProvidedLocationsManyBlocks() throws Exception {
+    // increase number of blocks per file to at least 10 blocks per file
+    conf.setLong(FixedBlockResolver.BLOCKSIZE, baseFileLen/10);
+    // set default replication to 4
+    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 4);
+    createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
+        FixedBlockResolver.class);
+    // start with 4 PROVIDED location
+    startCluster(nnDirPath, 4,
+        new StorageType[]{
+            StorageType.PROVIDED, StorageType.DISK},
+        null,
+        false);
+    int expectedLocations = 4;
+    for (int i = 0; i < numFiles; i++) {
+      verifyFileLocation(i, expectedLocations);
+    }
+  }
+
+
+  @Test
+  public void testInMemoryAliasMap() throws Exception {
+    conf.setClass(ImageWriter.Options.UGI_CLASS,
+        FsUGIResolver.class, UGIResolver.class);
+    conf.setClass(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_CLASS,
+        InMemoryLevelDBAliasMapClient.class, BlockAliasMap.class);
+    conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS,
+        "localhost:32445");
+    File tempDirectory =
+        Files.createTempDirectory("in-memory-alias-map").toFile();
+    File leveDBPath = new File(tempDirectory, bpid);
+    leveDBPath.mkdirs();
+    conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_LEVELDB_DIR,
+        tempDirectory.getAbsolutePath());
+    conf.setBoolean(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_ENABLED, true);
+    conf.setInt(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_LOAD_RETRIES, 10);
+    InMemoryLevelDBAliasMapServer levelDBAliasMapServer =
+        new InMemoryLevelDBAliasMapServer(InMemoryAliasMap::init, bpid);
+    levelDBAliasMapServer.setConf(conf);
+    levelDBAliasMapServer.start();
+
+    createImage(new FSTreeWalk(providedPath, conf),
+        nnDirPath,
+        FixedBlockResolver.class, "",
+        InMemoryLevelDBAliasMapClient.class);
+    levelDBAliasMapServer.close();
+
+    // start cluster with two datanodes,
+    // each with 1 PROVIDED volume and other DISK volume
+    startCluster(nnDirPath, 2,
+        new StorageType[] {StorageType.PROVIDED, StorageType.DISK},
+        null, false);
+    verifyFileSystemContents();
+    FileUtils.deleteDirectory(tempDirectory);
+  }
+
+  private DatanodeDescriptor getDatanodeDescriptor(DatanodeManager dnm,
+      int dnIndex) throws Exception {
+    return dnm.getDatanode(cluster.getDataNodes().get(dnIndex).getDatanodeId());
+  }
+
+  private void startDecommission(FSNamesystem namesystem, DatanodeManager dnm,
+      int dnIndex) throws Exception {
+    namesystem.writeLock();
+    DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
+    dnm.getDatanodeAdminManager().startDecommission(dnDesc);
+    namesystem.writeUnlock();
+  }
+
+  private void startMaintenance(FSNamesystem namesystem, DatanodeManager dnm,
+      int dnIndex) throws Exception {
+    namesystem.writeLock();
+    DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
+    dnm.getDatanodeAdminManager().startMaintenance(dnDesc, Long.MAX_VALUE);
+    namesystem.writeUnlock();
+  }
+
+  private void stopMaintenance(FSNamesystem namesystem, DatanodeManager dnm,
+      int dnIndex) throws Exception {
+    namesystem.writeLock();
+    DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
+    dnm.getDatanodeAdminManager().stopMaintenance(dnDesc);
+    namesystem.writeUnlock();
+  }
+
+  @Test
+  public void testDatanodeLifeCycle() throws Exception {
+    createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
+        FixedBlockResolver.class);
+    startCluster(nnDirPath, 3,
+        new StorageType[] {StorageType.PROVIDED, StorageType.DISK},
+        null, false);
+
+    int fileIndex = numFiles - 1;
+
+    final BlockManager blockManager = cluster.getNamesystem().getBlockManager();
+    final DatanodeManager dnm = blockManager.getDatanodeManager();
+
+    // to start, all 3 DNs are live in ProvidedDatanodeDescriptor.
+    verifyFileLocation(fileIndex, 3);
+
+    // de-commision first DN; still get 3 replicas.
+    startDecommission(cluster.getNamesystem(), dnm, 0);
+    verifyFileLocation(fileIndex, 3);
+
+    // remains the same even after heartbeats.
+    cluster.triggerHeartbeats();
+    verifyFileLocation(fileIndex, 3);
+
+    // start maintenance for 2nd DN; still get 3 replicas.
+    startMaintenance(cluster.getNamesystem(), dnm, 1);
+    verifyFileLocation(fileIndex, 3);
+
+    DataNode dn1 = cluster.getDataNodes().get(0);
+    DataNode dn2 = cluster.getDataNodes().get(1);
+
+    // stop the 1st DN while being decomissioned.
+    MiniDFSCluster.DataNodeProperties dn1Properties = cluster.stopDataNode(0);
+    BlockManagerTestUtil.noticeDeadDatanode(cluster.getNameNode(),
+        dn1.getDatanodeId().getXferAddr());
+
+    // get 2 locations
+    verifyFileLocation(fileIndex, 2);
+
+    // stop dn2 while in maintenance.
+    MiniDFSCluster.DataNodeProperties dn2Properties = cluster.stopDataNode(1);
+    BlockManagerTestUtil.noticeDeadDatanode(cluster.getNameNode(),
+        dn2.getDatanodeId().getXferAddr());
+
+    // 2 valid locations will be found as blocks on nodes that die during
+    // maintenance are not marked for removal.
+    verifyFileLocation(fileIndex, 2);
+
+    // stop the maintenance; get only 1 replicas
+    stopMaintenance(cluster.getNamesystem(), dnm, 0);
+    verifyFileLocation(fileIndex, 1);
+
+    // restart the stopped DN.
+    cluster.restartDataNode(dn1Properties, true);
+    cluster.waitActive();
+
+    // reports all 3 replicas
+    verifyFileLocation(fileIndex, 2);
+
+    cluster.restartDataNode(dn2Properties, true);
+    cluster.waitActive();
+
+    // reports all 3 replicas
+    verifyFileLocation(fileIndex, 3);
+  }
+
+  @Test
+  public void testProvidedWithHierarchicalTopology() throws Exception {
+    conf.setClass(ImageWriter.Options.UGI_CLASS, FsUGIResolver.class,
+        UGIResolver.class);
+    String packageName = "org.apache.hadoop.hdfs.server.blockmanagement";
+    String[] policies = new String[] {
+        "BlockPlacementPolicyDefault",
+        "BlockPlacementPolicyRackFaultTolerant",
+        "BlockPlacementPolicyWithNodeGroup",
+        "BlockPlacementPolicyWithUpgradeDomain"};
+    createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
+        FixedBlockResolver.class);
+    String[] racks =
+        {"/pod0/rack0", "/pod0/rack0", "/pod0/rack1", "/pod0/rack1",
+            "/pod1/rack0", "/pod1/rack0", "/pod1/rack1", "/pod1/rack1" };
+    for (String policy: policies) {
+      LOG.info("Using policy: " + packageName + "." + policy);
+      conf.set(DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, packageName + "." + policy);
+      startCluster(nnDirPath, racks.length,
+          new StorageType[]{StorageType.PROVIDED, StorageType.DISK},
+          null, false, racks);
+      verifyFileSystemContents();
+      setAndUnsetReplication("/" + filePrefix + (numFiles - 1) + fileSuffix);
+      cluster.shutdown();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf772b81/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/RandomTreeWalk.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/RandomTreeWalk.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/RandomTreeWalk.java
index c9d109a..6e5b166 100644
--- a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/RandomTreeWalk.java
+++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/RandomTreeWalk.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -43,7 +42,6 @@ public class RandomTreeWalk extends TreeWalk {
   private final float depth;
   private final int children;
   private final Map<Long, Long> mSeed;
-  //private final AtomicLong blockIds = new AtomicLong(1L << 30);
 
   RandomTreeWalk(long seed) {
     this(seed, 10);
@@ -54,7 +52,7 @@ public class RandomTreeWalk extends TreeWalk {
   }
 
   RandomTreeWalk(long seed, int children, float depth) {
-    this(randomRoot(seed), seed, children, 0.15f);
+    this(randomRoot(seed), seed, children, depth);
   }
 
   RandomTreeWalk(Path root, long seed, int children, float depth) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org