You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2015/07/09 23:22:59 UTC

hadoop git commit: HDFS-8679. Move DatasetSpi to new package. (Arpit Agarwal)

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 83f39a32c -> 6fbad4255


HDFS-8679. Move DatasetSpi to new package. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6fbad425
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6fbad425
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6fbad425

Branch: refs/heads/HDFS-7240
Commit: 6fbad42552699775140f17eb290234279b63f28b
Parents: 83f39a3
Author: Arpit Agarwal <ar...@apache.org>
Authored: Thu Jul 9 14:22:30 2015 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Thu Jul 9 14:22:30 2015 -0700

----------------------------------------------------------------------
 .../hdfs/server/datanode/BPOfferService.java    |   2 +-
 .../hdfs/server/datanode/BPServiceActor.java    |   4 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  13 +-
 .../hdfs/server/datanode/DirectoryScanner.java  |   2 +-
 .../server/datanode/dataset/DatasetSpi.java     | 249 +++++++++++++++++++
 .../hdfs/server/datanode/dataset/VolumeSpi.java |  72 ++++++
 .../server/datanode/fsdataset/DatasetSpi.java   | 242 ------------------
 .../server/datanode/fsdataset/FsDatasetSpi.java |   1 +
 .../server/datanode/fsdataset/FsVolumeSpi.java  |   1 +
 .../server/datanode/fsdataset/VolumeSpi.java    |  72 ------
 .../fsdataset/impl/FsDatasetFactory.java        |  20 +-
 .../org/apache/hadoop/hdfs/DataNodeCluster.java |   9 +-
 .../hdfs/TestWriteBlockGetsBlockLengthHint.java |   3 +-
 .../server/datanode/SimulatedFSDataset.java     |   4 +-
 .../datanode/TestDataNodeInitStorage.java       |   2 +-
 .../server/datanode/TestSimulatedFSDataset.java |   7 +-
 16 files changed, 353 insertions(+), 350 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fbad425/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 0392a2f..766aa59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.DatasetSpi;
+import org.apache.hadoop.hdfs.server.datanode.dataset.DatasetSpi;
 import org.apache.hadoop.hdfs.server.protocol.*;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fbad425/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 8db0ed5..11869b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -44,8 +44,8 @@ import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
 import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.DatasetSpi;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.dataset.DatasetSpi;
+import org.apache.hadoop.hdfs.server.datanode.dataset.VolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fbad425/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 5581d71..869ab05 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -155,10 +155,10 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage.VolumeBuilder;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.DatasetSpi;
+import org.apache.hadoop.hdfs.server.datanode.dataset.DatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.dataset.VolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.hdfs.server.datanode.web.DatanodeHttpServer;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
@@ -296,8 +296,6 @@ public class DataNode extends ReconfigurableBase
   private boolean shutdownInProgress = false;
   private BlockPoolManager blockPoolManager;
 
-  private final DatasetSpi.Factory datasetFactory;
-
   // This is an onto (many-one) mapping. Multiple block pool IDs may share
   // the same dataset.
   private volatile Map<String,
@@ -406,7 +404,6 @@ public class DataNode extends ReconfigurableBase
     this.getHdfsBlockLocationsEnabled = false;
     this.blockScanner = new BlockScanner(this, conf);
     this.pipelineSupportECN = false;
-    this.datasetFactory = null;
   }
 
   /**
@@ -421,7 +418,6 @@ public class DataNode extends ReconfigurableBase
     this.lastDiskErrorCheck = 0;
     this.maxNumberOfBlocksToLog = conf.getLong(DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
         DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT);
-    datasetFactory = FsDatasetSpi.Factory.getFactory(conf);
     this.usersWithLocalPathAccess = Arrays.asList(
         conf.getTrimmedStrings(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY));
     this.connectToDnViaHostname = conf.getBoolean(
@@ -1494,7 +1490,7 @@ public class DataNode extends ReconfigurableBase
    */
   private DatasetSpi<?> initStorage(
       final String blockPoolId, final NamespaceInfo nsInfo) throws IOException {
-    if (!datasetFactory.isSimulated()) {
+    if (!DatasetSpi.Factory.getFactory(conf, nsInfo.getNodeType()).isSimulated()) {
       final StartupOption startOpt = getStartupOption(conf);
       if (startOpt == null) {
         throw new IOException("Startup option not set.");
@@ -2642,7 +2638,8 @@ public class DataNode extends ReconfigurableBase
       return dataset;
     }
 
-    dataset = datasetFactory.newInstance(this, storage, conf, serviceType);
+    dataset = DatasetSpi.Factory.getFactory(conf, serviceType)
+                                .newInstance(this, storage, conf);
     datasets.put(serviceType, dataset);
     datasetsMap.put(bpid, dataset);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fbad425/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index cb0a6ae..ff537b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -44,7 +44,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.DatasetSpi;
+import org.apache.hadoop.hdfs.server.datanode.dataset.DatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.util.Daemon;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fbad425/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/dataset/DatasetSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/dataset/DatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/dataset/DatasetSpi.java
new file mode 100644
index 0000000..a210155
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/dataset/DatasetSpi.java
@@ -0,0 +1,249 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.dataset;
+
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataStorage;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
+import org.apache.hadoop.hdfs.server.protocol.*;
+import org.apache.hadoop.util.ReflectionUtils;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * This is a service provider interface for the underlying storage that
+ * stores replicas for a data node.
+ * The default implementation stores replicas on local drives.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface DatasetSpi<V extends VolumeSpi> {
+  /**
+   * A factory for creating {@link FsDatasetSpi} objects.
+   */
+  abstract class Factory {
+    /**
+     * @return the configured factory.
+     */
+    public static Factory getFactory(Configuration conf, NodeType nodeType) {
+      switch(nodeType) {
+      case NAME_NODE:
+        @SuppressWarnings("rawtypes")
+        final Class<? extends Factory> clazz = conf.getClass(
+            DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY,
+            FsDatasetFactory.class,
+            Factory.class);
+        return ReflectionUtils.newInstance(clazz, conf);
+      default:
+        throw new IllegalArgumentException("Unsupported NODE_TYPE " + nodeType);
+      }
+    }
+
+    /**
+     * Create a new dataset object for a specific service type.
+     * The caller must perform synchronization, if required.
+     */
+    public abstract DatasetSpi<? extends VolumeSpi> newInstance(
+        DataNode datanode, DataStorage storage, Configuration conf)
+        throws IOException;
+
+    /** Does the factory create simulated objects? */
+    public boolean isSimulated() {
+      return false;
+    }
+  }
+
+  /**
+   * @return the volume that contains a replica of the block.
+   */
+  V getVolume(ExtendedBlock b);
+
+  /**
+   * Does the dataset contain the block?
+   */
+  boolean contains(ExtendedBlock block);
+
+
+  /**
+   * Add a new volume to the Dataset.<p/>
+   *
+   * If the Dataset supports block scanning, this function registers
+   * the new volume with the block scanner.
+   *
+   * @param location      The storage location for the new volume.
+   * @param nsInfos       Namespace information for the new volume.
+   */
+  void addVolume(
+      final StorageLocation location,
+      final Storage.StorageDirectory sd,
+      final List<NamespaceInfo> nsInfos) throws IOException;
+
+  /**
+   * Record a failure to bring up a volume. Primarily for reporting
+   * purposes.
+   *
+   * @param location StorageLocation corresponding to the failed volume.
+   */
+  void recordFailedVolume(final StorageLocation location);
+
+  /**
+   * Removes a collection of volumes from FsDataset.
+   *
+   * If the FSDataset supports block scanning, this function removes
+   * the volumes from the block scanner.
+   *
+   * @param volumes  The paths of the volumes to be removed.
+   * @param clearFailure set true to clear the failure information about the
+   *                     volumes.
+   */
+  void removeVolumes(Set<File> volumes, boolean clearFailure);
+
+  /** @return a storage with the given storage ID */
+  DatanodeStorage getStorage(final String storageUuid);
+
+  /** @return one or more storage reports for attached volumes. */
+  StorageReport[] getStorageReports(String bpid)
+      throws IOException;
+
+  /**
+   * Returns one block report per volume.
+   * @param bpid Block Pool Id
+   * @return - a map of DatanodeStorage to block report for the volume.
+   */
+  Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid);
+
+  /**
+   * Invalidates the specified blocks
+   * @param bpid Block pool Id
+   * @param invalidBlks - the blocks to be invalidated
+   * @throws IOException
+   */
+  void invalidate(String bpid, Block[] invalidBlks) throws IOException;
+
+  /**
+   * Returns info about volume failures.
+   *
+   * @return info about volume failures, possibly null
+   */
+  VolumeFailureSummary getVolumeFailureSummary();
+
+  /**
+   * Check if all the data directories are healthy
+   * @return A set of unhealthy data directories.
+   */
+  Set<File> checkDataDir();
+
+  /**
+   * Shutdown the Dataset
+   */
+  void shutdown();
+
+  /**
+   * add new block pool ID
+   * @param bpid Block pool Id
+   * @param conf Configuration
+   */
+  void addBlockPool(String bpid, Configuration conf) throws IOException;
+
+  /**
+   * Shutdown and remove the block pool from underlying storage.
+   * @param bpid Block pool Id to be removed
+   */
+  void shutdownBlockPool(String bpid);
+
+  /**
+   * Checks how many valid storage volumes there are in the DataNode.
+   * @return true if more than the minimum number of valid volumes are left
+   * in the FSDataSet.
+   */
+  boolean hasEnoughResource();
+
+  /**
+   * Does the dataset support caching blocks?
+   *
+   * @return
+   */
+  boolean isCachingSupported();
+
+  /**
+   * Caches the specified blocks
+   * @param bpid Block pool id
+   * @param blockIds - block ids to cache
+   */
+  void cache(String bpid, long[] blockIds);
+
+  /**
+   * Uncaches the specified blocks
+   * @param bpid Block pool id
+   * @param blockIds - blocks ids to uncache
+   */
+  void uncache(String bpid, long[] blockIds);
+
+
+  /**
+   * Returns the cache report - the full list of cached block IDs of a
+   * block pool.
+   * @param   bpid Block Pool Id
+   * @return  the cache report - the full list of cached block IDs.
+   */
+  List<Long> getCacheReport(String bpid);
+
+  /**
+   * Enable 'trash' for the given dataset. When trash is enabled, files are
+   * moved to a separate trash directory instead of being deleted immediately.
+   * This can be useful for example during rolling upgrades.
+   */
+  void enableTrash(String bpid);
+
+  /**
+   * Restore trash
+   */
+  void clearTrash(String bpid);
+
+  /**
+   * @return true when trash is enabled
+   */
+  boolean trashEnabled(String bpid);
+
+  /**
+   * Create a marker file indicating that a rolling upgrade is in progress.
+   */
+  void setRollingUpgradeMarker(String bpid) throws IOException;
+
+  /**
+   * Delete the rolling upgrade marker file if it exists.
+   * @param bpid
+   */
+  void clearRollingUpgradeMarker(String bpid) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fbad425/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/dataset/VolumeSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/dataset/VolumeSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/dataset/VolumeSpi.java
new file mode 100644
index 0000000..4b1cc41
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/dataset/VolumeSpi.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.dataset;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.StorageType;
+
+/**
+ * This is an interface for the underlying volume.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface VolumeSpi {
+  /**
+   * @return the available storage space in bytes.
+   */
+  long getAvailable() throws IOException;
+
+  /**
+   * @return the base path to the volume
+   */
+  String getBasePath();
+
+  /**
+   * @return the StorageUuid of the volume
+   */
+  String getStorageID();
+
+  /**
+   * Returns true if the volume is NOT backed by persistent storage.
+   */
+  boolean isTransientStorage();
+
+  /**
+   * @return a list of block pools.
+   */
+  String[] getBlockPoolList();
+
+  /**
+   * @return the path to the volume
+   */
+  String getPath(String bpid) throws IOException;
+
+  /**
+   * Return the StorageType i.e. media type of this volume.
+   * @return
+   */
+  StorageType getStorageType();
+
+  /**
+   * Get the DatasetSpi which this volume is a part of.
+   */
+  DatasetSpi<? extends VolumeSpi> getDataset();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fbad425/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DatasetSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DatasetSpi.java
deleted file mode 100644
index f9c61cb..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DatasetSpi.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.datanode.fsdataset;
-
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataStorage;
-import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
-import org.apache.hadoop.hdfs.server.protocol.*;
-import org.apache.hadoop.util.ReflectionUtils;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * This is a service provider interface for the underlying storage that
- * stores replicas for a data node.
- * The default implementation stores replicas on local drives.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public interface DatasetSpi<V extends VolumeSpi> {
-  /**
-   * A factory for creating {@link FsDatasetSpi} objects.
-   */
-  abstract class Factory {
-    /**
-     * @return the configured factory.
-     */
-    public static Factory getFactory(Configuration conf) {
-      @SuppressWarnings("rawtypes")
-      final Class<? extends Factory> clazz = conf.getClass(
-          DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY,
-          FsDatasetFactory.class,
-          Factory.class);
-      return ReflectionUtils.newInstance(clazz, conf);
-    }
-
-    /**
-     * Create a new dataset object for a specific service type
-     */
-    public abstract DatasetSpi<? extends VolumeSpi> newInstance(
-        DataNode datanode, DataStorage storage, Configuration conf,
-        HdfsServerConstants.NodeType serviceType) throws IOException;
-
-    /** Does the factory create simulated objects? */
-    public boolean isSimulated() {
-      return false;
-    }
-  }
-
-  /**
-   * @return the volume that contains a replica of the block.
-   */
-  V getVolume(ExtendedBlock b);
-
-  /**
-   * Does the dataset contain the block?
-   */
-  boolean contains(ExtendedBlock block);
-
-
-  /**
-   * Add a new volume to the Dataset.<p/>
-   *
-   * If the Dataset supports block scanning, this function registers
-   * the new volume with the block scanner.
-   *
-   * @param location      The storage location for the new volume.
-   * @param nsInfos       Namespace information for the new volume.
-   */
-  void addVolume(
-      final StorageLocation location,
-      final Storage.StorageDirectory sd,
-      final List<NamespaceInfo> nsInfos) throws IOException;
-
-  /**
-   * Record a failure to bring up a volume. Primarily for reporting
-   * purposes.
-   *
-   * @param location StorageLocation corresponding to the failed volume.
-   */
-  void recordFailedVolume(final StorageLocation location);
-
-  /**
-   * Removes a collection of volumes from FsDataset.
-   *
-   * If the FSDataset supports block scanning, this function removes
-   * the volumes from the block scanner.
-   *
-   * @param volumes  The paths of the volumes to be removed.
-   * @param clearFailure set true to clear the failure information about the
-   *                     volumes.
-   */
-  void removeVolumes(Set<File> volumes, boolean clearFailure);
-
-  /** @return a storage with the given storage ID */
-  DatanodeStorage getStorage(final String storageUuid);
-
-  /** @return one or more storage reports for attached volumes. */
-  StorageReport[] getStorageReports(String bpid)
-      throws IOException;
-
-  /**
-   * Returns one block report per volume.
-   * @param bpid Block Pool Id
-   * @return - a map of DatanodeStorage to block report for the volume.
-   */
-  Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid);
-
-  /**
-   * Invalidates the specified blocks
-   * @param bpid Block pool Id
-   * @param invalidBlks - the blocks to be invalidated
-   * @throws IOException
-   */
-  void invalidate(String bpid, Block[] invalidBlks) throws IOException;
-
-  /**
-   * Returns info about volume failures.
-   *
-   * @return info about volume failures, possibly null
-   */
-  VolumeFailureSummary getVolumeFailureSummary();
-
-  /**
-   * Check if all the data directories are healthy
-   * @return A set of unhealthy data directories.
-   */
-  Set<File> checkDataDir();
-
-  /**
-   * Shutdown the Dataset
-   */
-  void shutdown();
-
-  /**
-   * add new block pool ID
-   * @param bpid Block pool Id
-   * @param conf Configuration
-   */
-  void addBlockPool(String bpid, Configuration conf) throws IOException;
-
-  /**
-   * Shutdown and remove the block pool from underlying storage.
-   * @param bpid Block pool Id to be removed
-   */
-  void shutdownBlockPool(String bpid);
-
-  /**
-   * Checks how many valid storage volumes there are in the DataNode.
-   * @return true if more than the minimum number of valid volumes are left
-   * in the FSDataSet.
-   */
-  boolean hasEnoughResource();
-
-  /**
-   * Does the dataset support caching blocks?
-   *
-   * @return
-   */
-  boolean isCachingSupported();
-
-  /**
-   * Caches the specified blocks
-   * @param bpid Block pool id
-   * @param blockIds - block ids to cache
-   */
-  void cache(String bpid, long[] blockIds);
-
-  /**
-   * Uncaches the specified blocks
-   * @param bpid Block pool id
-   * @param blockIds - blocks ids to uncache
-   */
-  void uncache(String bpid, long[] blockIds);
-
-
-  /**
-   * Returns the cache report - the full list of cached block IDs of a
-   * block pool.
-   * @param   bpid Block Pool Id
-   * @return  the cache report - the full list of cached block IDs.
-   */
-  List<Long> getCacheReport(String bpid);
-
-  /**
-   * Enable 'trash' for the given dataset. When trash is enabled, files are
-   * moved to a separate trash directory instead of being deleted immediately.
-   * This can be useful for example during rolling upgrades.
-   */
-  void enableTrash(String bpid);
-
-  /**
-   * Restore trash
-   */
-  void clearTrash(String bpid);
-
-  /**
-   * @return true when trash is enabled
-   */
-  boolean trashEnabled(String bpid);
-
-  /**
-   * Create a marker file indicating that a rolling upgrade is in progress.
-   */
-  void setRollingUpgradeMarker(String bpid) throws IOException;
-
-  /**
-   * Delete the rolling upgrade marker file if it exists.
-   * @param bpid
-   */
-  void clearRollingUpgradeMarker(String bpid) throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fbad425/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
index 7052f54..3c39ace 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaHandler;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
 import org.apache.hadoop.hdfs.server.datanode.UnexpectedReplicaStateException;
+import org.apache.hadoop.hdfs.server.datanode.dataset.DatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fbad425/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
index c083a95..64ad701 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
@@ -25,6 +25,7 @@ import java.nio.channels.ClosedChannelException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.dataset.VolumeSpi;
 
 /**
  * This is an interface for the underlying volume used by DFS.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fbad425/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/VolumeSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/VolumeSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/VolumeSpi.java
deleted file mode 100644
index e7fd741..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/VolumeSpi.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.datanode.fsdataset;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.StorageType;
-
-/**
- * This is an interface for the underlying volume.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public interface VolumeSpi {
-  /**
-   * @return the available storage space in bytes.
-   */
-  long getAvailable() throws IOException;
-
-  /**
-   * @return the base path to the volume
-   */
-  String getBasePath();
-
-  /**
-   * @return the StorageUuid of the volume
-   */
-  String getStorageID();
-
-  /**
-   * Returns true if the volume is NOT backed by persistent storage.
-   */
-  boolean isTransientStorage();
-
-  /**
-   * @return a list of block pools.
-   */
-  String[] getBlockPoolList();
-
-  /**
-   * @return the path to the volume
-   */
-  String getPath(String bpid) throws IOException;
-
-  /**
-   * Return the StorageType i.e. media type of this volume.
-   * @return
-   */
-  StorageType getStorageType();
-
-  /**
-   * Get the DatasetSpi which this volume is a part of.
-   */
-  DatasetSpi getDataset();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fbad425/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetFactory.java
index 4efc0b1..5c014dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetFactory.java
@@ -20,28 +20,18 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.DatasetSpi;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.dataset.DatasetSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 
 /**
  * A factory for creating {@link FsDatasetImpl} objects.
  */
 public class FsDatasetFactory extends DatasetSpi.Factory {
-
-
   @Override
-  public synchronized DatasetSpi<? extends VolumeSpi> newInstance(
-      DataNode datanode, DataStorage storage, Configuration conf,
-      NodeType serviceType) throws IOException {
-    switch (serviceType) {
-    case NAME_NODE:
-      return new FsDatasetImpl(datanode, storage, conf);
-    default:
-      throw new IllegalArgumentException(
-          "Unsupported node type " + serviceType);
-    }
+  public FsDatasetImpl newInstance(DataNode datanode,
+      DataStorage storage, Configuration conf) throws IOException {
+    return new FsDatasetImpl(datanode, storage, conf);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fbad425/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
index 01d2c85..15814c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
@@ -24,13 +24,17 @@ import java.util.Arrays;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.server.datanode.dataset.DatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.namenode.CreateEditsLog;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.util.Time;
 
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
+
 
 /**
  * 
@@ -137,7 +141,7 @@ public class DataNodeCluster {
         }
         bpid = args[i];
       } else if (args[i].equals("-inject")) {
-        if (!FsDatasetSpi.Factory.getFactory(conf).isSimulated()) {
+        if (!DatasetSpi.Factory.getFactory(conf, NAME_NODE).isSimulated()) {
           System.out.print("-inject is valid only for simulated");
           printUsageExit(); 
         }
@@ -172,7 +176,8 @@ public class DataNodeCluster {
       System.out.println("No name node address and port in config");
       System.exit(-1);
     }
-    boolean simulated = FsDatasetSpi.Factory.getFactory(conf).isSimulated();
+    boolean simulated =
+        DatasetSpi.Factory.getFactory(conf, NAME_NODE).isSimulated();
     System.out.println("Starting " + numDataNodes + 
           (simulated ? " Simulated " : " ") +
           " Data Nodes that will connect to Name Node at " + nameNodeAdr);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fbad425/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java
index 12e6741..2b35824 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java
@@ -74,8 +74,7 @@ public class TestWriteBlockGetsBlockLengthHint {
     static class Factory extends FsDatasetSpi.Factory {
       @Override
       public SimulatedFSDataset newInstance(DataNode datanode,
-          DataStorage storage, Configuration conf,
-          HdfsServerConstants.NodeType serviceType) throws IOException {
+          DataStorage storage, Configuration conf) throws IOException {
         return new FsDatasetChecker(storage, conf);
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fbad425/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 9def302..b05be39 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.datanode.dataset.DatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.*;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
@@ -86,8 +87,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   static class Factory extends DatasetSpi.Factory {
     @Override
     public SimulatedFSDataset newInstance(DataNode datanode,
-        DataStorage storage, Configuration conf,
-        HdfsServerConstants.NodeType serviceType) throws IOException {
+        DataStorage storage, Configuration conf) throws IOException {
       return new SimulatedFSDataset(datanode, storage, conf);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fbad425/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeInitStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeInitStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeInitStorage.java
index 017223c..42e4ce7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeInitStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeInitStorage.java
@@ -46,7 +46,7 @@ public class TestDataNodeInitStorage {
       @Override
       public SimulatedFsDatasetVerifier newInstance(
           DataNode datanode, DataStorage storage,
-          Configuration conf, HdfsServerConstants.NodeType serviceType) throws IOException {
+          Configuration conf) throws IOException {
         return new SimulatedFsDatasetVerifier(storage, conf);
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fbad425/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
index 461d5ad..1218840 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -34,6 +35,8 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.SequentialBlockIdGenerator;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.datanode.dataset.DatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
@@ -114,12 +117,12 @@ public class TestSimulatedFSDataset {
   @Test
   public void testFSDatasetFactory() {
     final Configuration conf = new Configuration();
-    FsDatasetSpi.Factory f = FsDatasetSpi.Factory.getFactory(conf);
+    FsDatasetSpi.Factory f = DatasetSpi.Factory.getFactory(conf, NAME_NODE);
     assertEquals(FsDatasetFactory.class, f.getClass());
     assertFalse(f.isSimulated());
 
     SimulatedFSDataset.setFactory(conf);
-    FsDatasetSpi.Factory s = FsDatasetSpi.Factory.getFactory(conf);
+    FsDatasetSpi.Factory s = DatasetSpi.Factory.getFactory(conf, NAME_NODE);
     assertEquals(SimulatedFSDataset.Factory.class, s.getClass());
     assertTrue(s.isSimulated());
   }