You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ar...@apache.org on 2014/08/21 02:42:27 UTC

svn commit: r1619271 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/test/java/org/apache/hadoop/hdfs/ src/test/java/org/apache/hadoop/hdfs/server/datanode/

Author: arp
Date: Thu Aug 21 00:42:27 2014
New Revision: 1619271

URL: http://svn.apache.org/r1619271
Log:
HDFS-6878. Change MiniDFSCluster to support StorageType configuration for individual directories. (Arpit Agarwal)

Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1619271&r1=1619270&r2=1619271&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Aug 21 00:42:27 2014
@@ -499,6 +499,9 @@ Release 2.6.0 - UNRELEASED
     HDFS-6858. Allow dfs.data.transfer.saslproperties.resolver.class default to
     hadoop.security.saslproperties.resolver.class. (Benoy Antony via cnauroth)
 
+    HDFS-6878. Change MiniDFSCluster to support StorageType configuration
+    for individual directories (Arpit Agarwal)
+
   OPTIMIZATIONS
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1619271&r1=1619270&r2=1619271&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Thu Aug 21 00:42:27 2014
@@ -144,7 +144,7 @@ public class MiniDFSCluster {
     private int nameNodeHttpPort = 0;
     private final Configuration conf;
     private int numDataNodes = 1;
-    private StorageType storageType = StorageType.DEFAULT;
+    private StorageType[][] storageTypes = null;
     private boolean format = true;
     private boolean manageNameDfsDirs = true;
     private boolean manageNameDfsSharedDirs = true;
@@ -193,10 +193,26 @@ public class MiniDFSCluster {
     }
 
     /**
-     * Default: StorageType.DEFAULT
+     * Set the same storage type configuration for each datanode.
+     * If storageTypes is uninitialized or passed null then
+     * StorageType.DEFAULT is used.
      */
-    public Builder storageType(StorageType type) {
-      this.storageType = type;
+    public Builder storageTypes(StorageType[] types) {
+      assert types.length == DIRS_PER_DATANODE;
+      this.storageTypes = new StorageType[numDataNodes][types.length];
+      for (int i = 0; i < numDataNodes; ++i) {
+        this.storageTypes[i] = types;
+      }
+      return this;
+    }
+
+    /**
+     * Set custom storage type configuration for each datanode.
+     * If storageTypes is uninitialized or passed null then
+     * StorageType.DEFAULT is used.
+     */
+    public Builder storageTypes(StorageType[][] types) {
+      this.storageTypes = types;
       return this;
     }
 
@@ -369,7 +385,8 @@ public class MiniDFSCluster {
       builder.nnTopology = MiniDFSNNTopology.simpleSingleNN(
           builder.nameNodePort, builder.nameNodeHttpPort);
     }
-    
+    assert builder.storageTypes == null ||
+           builder.storageTypes.length == builder.numDataNodes;
     final int numNameNodes = builder.nnTopology.countNameNodes();
     LOG.info("starting cluster: numNameNodes=" + numNameNodes
         + ", numDataNodes=" + builder.numDataNodes);
@@ -377,7 +394,7 @@ public class MiniDFSCluster {
       
     initMiniDFSCluster(builder.conf,
                        builder.numDataNodes,
-                       builder.storageType,
+                       builder.storageTypes,
                        builder.format,
                        builder.manageNameDfsDirs,
                        builder.manageNameDfsSharedDirs,
@@ -477,8 +494,8 @@ public class MiniDFSCluster {
    * Servers will be started on free ports.
    * <p>
    * The caller must manage the creation of NameNode and DataNode directories
-   * and have already set {@link #DFS_NAMENODE_NAME_DIR_KEY} and 
-   * {@link #DFS_DATANODE_DATA_DIR_KEY} in the given conf.
+   * and have already set {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
+   * {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} in the given conf.
    * 
    * @param conf the base configuration to use in starting the servers.  This
    *          will be modified as necessary.
@@ -554,8 +571,8 @@ public class MiniDFSCluster {
    * @param format if true, format the NameNode and DataNodes before starting 
    *          up
    * @param manageDfsDirs if true, the data directories for servers will be
-   *          created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and 
-   *          {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in 
+   *          created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
+   *          {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in
    *          the conf
    * @param operation the operation with which to start the servers.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@@ -586,8 +603,8 @@ public class MiniDFSCluster {
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param format if true, format the NameNode and DataNodes before starting up
    * @param manageDfsDirs if true, the data directories for servers will be
-   *          created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and 
-   *          {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in 
+   *          created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
+   *          {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in
    *          the conf
    * @param operation the operation with which to start the servers.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@@ -620,11 +637,11 @@ public class MiniDFSCluster {
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param format if true, format the NameNode and DataNodes before starting up
    * @param manageNameDfsDirs if true, the data directories for servers will be
-   *          created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and 
-   *          {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in 
+   *          created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
+   *          {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in
    *          the conf
    * @param manageDataDfsDirs if true, the data directories for datanodes will
-   *          be created and {@link #DFS_DATANODE_DATA_DIR_KEY} 
+   *          be created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY}
    *          set to same in the conf
    * @param operation the operation with which to start the servers.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@@ -643,7 +660,7 @@ public class MiniDFSCluster {
                         String[] racks, String hosts[],
                         long[] simulatedCapacities) throws IOException {
     this.nameNodes = new NameNodeInfo[1]; // Single namenode in the cluster
-    initMiniDFSCluster(conf, numDataNodes, StorageType.DEFAULT, format,
+    initMiniDFSCluster(conf, numDataNodes, null, format,
         manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs, 
         operation, null, racks, hosts,
         simulatedCapacities, null, true, false,
@@ -652,7 +669,7 @@ public class MiniDFSCluster {
 
   private void initMiniDFSCluster(
       Configuration conf,
-      int numDataNodes, StorageType storageType, boolean format, boolean manageNameDfsDirs,
+      int numDataNodes, StorageType[][] storageTypes, boolean format, boolean manageNameDfsDirs,
       boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
       boolean manageDataDfsDirs, StartupOption startOpt,
       StartupOption dnStartOpt, String[] racks,
@@ -725,7 +742,7 @@ public class MiniDFSCluster {
       }
 
       // Start the DataNodes
-      startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs,
+      startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs,
           dnStartOpt != null ? dnStartOpt : startOpt,
           racks, hosts, simulatedCapacities, setupHostsFile,
           checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
@@ -1100,15 +1117,18 @@ public class MiniDFSCluster {
     }
   }
 
-  String makeDataNodeDirs(int dnIndex, StorageType storageType) throws IOException {
+  String makeDataNodeDirs(int dnIndex, StorageType[] storageTypes) throws IOException {
     StringBuilder sb = new StringBuilder();
+    assert storageTypes == null || storageTypes.length == DIRS_PER_DATANODE;
     for (int j = 0; j < DIRS_PER_DATANODE; ++j) {
       File dir = getInstanceStorageDir(dnIndex, j);
       dir.mkdirs();
       if (!dir.isDirectory()) {
         throw new IOException("Mkdirs failed to create directory for DataNode " + dir);
       }
-      sb.append((j > 0 ? "," : "") + "[" + storageType + "]" + fileAsURI(dir));
+      sb.append((j > 0 ? "," : "") + "[" +
+          (storageTypes == null ? StorageType.DEFAULT : storageTypes[j]) +
+          "]" + fileAsURI(dir));
     }
     return sb.toString();
   }
@@ -1127,7 +1147,7 @@ public class MiniDFSCluster {
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be set 
+   *          created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set
    *          in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@@ -1159,7 +1179,7 @@ public class MiniDFSCluster {
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be 
+   *          created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be
    *          set in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@@ -1175,21 +1195,17 @@ public class MiniDFSCluster {
                              String[] racks, String[] hosts,
                              long[] simulatedCapacities,
                              boolean setupHostsFile) throws IOException {
-    startDataNodes(conf, numDataNodes, StorageType.DEFAULT, manageDfsDirs, operation, racks, hosts,
+    startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts,
         simulatedCapacities, setupHostsFile, false, false, null);
   }
 
-  /**
-   * @see MiniDFSCluster#startDataNodes(Configuration, int, boolean, StartupOption,
-   * String[], String[], long[], boolean, boolean, boolean)
-   */
   public synchronized void startDataNodes(Configuration conf, int numDataNodes,
       boolean manageDfsDirs, StartupOption operation, 
       String[] racks, String[] hosts,
       long[] simulatedCapacities,
       boolean setupHostsFile,
       boolean checkDataNodeAddrConfig) throws IOException {
-    startDataNodes(conf, numDataNodes, StorageType.DEFAULT, manageDfsDirs, operation, racks, hosts,
+    startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts,
         simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false, null);
   }
 
@@ -1207,7 +1223,7 @@ public class MiniDFSCluster {
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be 
+   *          created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be
    *          set in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@@ -1222,13 +1238,15 @@ public class MiniDFSCluster {
    * @throws IllegalStateException if NameNode has been shutdown
    */
   public synchronized void startDataNodes(Configuration conf, int numDataNodes,
-      StorageType storageType, boolean manageDfsDirs, StartupOption operation,
+      StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
       String[] racks, String[] hosts,
       long[] simulatedCapacities,
       boolean setupHostsFile,
       boolean checkDataNodeAddrConfig,
       boolean checkDataNodeHostConfig,
       Configuration[] dnConfOverlays) throws IOException {
+    assert storageTypes == null || storageTypes.length == numDataNodes;
+
     if (operation == StartupOption.RECOVER) {
       return;
     }
@@ -1289,7 +1307,7 @@ public class MiniDFSCluster {
       // Set up datanode address
       setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
       if (manageDfsDirs) {
-        String dirs = makeDataNodeDirs(i, storageType);
+        String dirs = makeDataNodeDirs(i, storageTypes == null ? null : storageTypes[i]);
         dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
         conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
       }
@@ -2173,7 +2191,7 @@ public class MiniDFSCluster {
   }
 
   /**
-   * Multiple-NameNode version of {@link #injectBlocks(Iterable[])}.
+   * Multiple-NameNode version of injectBlocks.
    */
   public void injectBlocks(int nameNodeIndex, int dataNodeIndex,
       Iterable<Block> blocksToInject) throws IOException {

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java?rev=1619271&r1=1619270&r2=1619271&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java Thu Aug 21 00:42:27 2014
@@ -50,12 +50,14 @@ public class MiniDFSClusterWithNodeGroup
   }
 
   public synchronized void startDataNodes(Configuration conf, int numDataNodes,
-      StorageType storageType, boolean manageDfsDirs, StartupOption operation,
+      StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
       String[] racks, String[] nodeGroups, String[] hosts,
       long[] simulatedCapacities,
       boolean setupHostsFile,
       boolean checkDataNodeAddrConfig,
       boolean checkDataNodeHostConfig) throws IOException {
+    assert storageTypes == null || storageTypes.length == numDataNodes;
+
     if (operation == StartupOption.RECOVER) {
       return;
     }
@@ -112,7 +114,7 @@ public class MiniDFSClusterWithNodeGroup
       // Set up datanode address
       setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
       if (manageDfsDirs) {
-        String dirs = makeDataNodeDirs(i, storageType);
+        String dirs = makeDataNodeDirs(i, storageTypes == null ? null : storageTypes[i]);
         dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
         conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
       }
@@ -190,7 +192,7 @@ public class MiniDFSClusterWithNodeGroup
       String[] racks, String[] nodeGroups, String[] hosts,
       long[] simulatedCapacities,
       boolean setupHostsFile) throws IOException {
-    startDataNodes(conf, numDataNodes, StorageType.DEFAULT, manageDfsDirs, operation, racks, nodeGroups,
+    startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, nodeGroups,
         hosts, simulatedCapacities, setupHostsFile, false, false);
   }
 
@@ -205,14 +207,14 @@ public class MiniDFSClusterWithNodeGroup
   // This is for initialize from parent class.
   @Override
   public synchronized void startDataNodes(Configuration conf, int numDataNodes, 
-      StorageType storageType, boolean manageDfsDirs, StartupOption operation,
+      StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
       String[] racks, String[] hosts,
       long[] simulatedCapacities,
       boolean setupHostsFile,
       boolean checkDataNodeAddrConfig,
       boolean checkDataNodeHostConfig,
       Configuration[] dnConfOverlays) throws IOException {
-    startDataNodes(conf, numDataNodes, storageType, manageDfsDirs, operation, racks,
+    startDataNodes(conf, numDataNodes, storageTypes, manageDfsDirs, operation, racks,
         NODE_GROUPS, hosts, simulatedCapacities, setupHostsFile, 
         checkDataNodeAddrConfig, checkDataNodeHostConfig);
   }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java?rev=1619271&r1=1619270&r2=1619271&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java Thu Aug 21 00:42:27 2014
@@ -58,7 +58,7 @@ public class TestStorageReport {
     conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(REPL_FACTOR)
-        .storageType(storageType)
+        .storageTypes(new StorageType[] { storageType, storageType } )
         .build();
     fs = cluster.getFileSystem();
     bpid = cluster.getNamesystem().getBlockPoolId();