You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by jg...@apache.org on 2010/11/17 06:10:58 UTC

svn commit: r1035920 - in /hadoop/hdfs/trunk: ./ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/

Author: jghoman
Date: Wed Nov 17 05:10:58 2010
New Revision: 1035920

URL: http://svn.apache.org/viewvc?rev=1035920&view=rev
Log:
HDFS-718. configuration parameter to prevent accidental formatting of HDFS filesystem.  Contributed by Andrew Ryan.

Added:
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/hdfs-default.xml
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1035920&r1=1035919&r2=1035920&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Wed Nov 17 05:10:58 2010
@@ -206,6 +206,9 @@ Trunk (unreleased changes)
     HDFS-1055. Improve thread naming for DataXceivers. 
     (Todd Lipcon and Ramkumar Vadali via eli).
 
+    HDFS-718. Configuration parameter to prevent accidental formatting of 
+    HDFS filesystem. (Andrew Ryan via jghoman)
+
   OPTIMIZATIONS
 
     HDFS-1140. Speedup INode.getPathComponents. (Dmytro Molkov via shv)

Modified: hadoop/hdfs/trunk/src/java/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/hdfs-default.xml?rev=1035920&r1=1035919&r2=1035920&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/hdfs-default.xml (original)
+++ hadoop/hdfs/trunk/src/java/hdfs-default.xml Wed Nov 17 05:10:58 2010
@@ -554,4 +554,13 @@ creations/deletions), or "all".</descrip
   </description>
 </property>
 
+<property>
+  <name>dfs.namenode.support.allow.format</name>
+  <value>true</value>
+  <description>Does HDFS namenode allow itself to be formatted?
+               You may consider setting this to false for any production
+               cluster, to avoid any possibility of formatting a running DFS.
+  </description>
+</property>
+
 </configuration>

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1035920&r1=1035919&r2=1035920&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Wed Nov 17 05:10:58 2010
@@ -92,6 +92,8 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-server.xml";
   public static final String  DFS_NAMENODE_NAME_DIR_RESTORE_KEY = "dfs.namenode.name.dir.restore";
   public static final boolean DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT = false;
+  public static final String  DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY = "dfs.namenode.support.allow.format";
+  public static final boolean DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT = true;
   public static final String  DFS_LIST_LIMIT = "dfs.ls.limit";
   public static final int     DFS_LIST_LIMIT_DEFAULT = 1000;
   public static final String  DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY = "dfs.datanode.failed.volumes.tolerated";

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1035920&r1=1035919&r2=1035920&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Wed Nov 17 05:10:58 2010
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT;
+
 import java.io.File;
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -1369,6 +1372,15 @@ public class NameNode implements Namenod
   private static boolean format(Configuration conf,
                                 boolean isConfirmationNeeded)
       throws IOException {
+    if (!conf.getBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, 
+                         DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT)) {
+      throw new IOException("The option " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY
+                             + " is set to false for this filesystem, so it "
+                             + "cannot be formatted. You will need to set "
+                             + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY +" parameter "
+                             + "to true in order to format this filesystem");
+    }
+    
     Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
     Collection<URI> editDirsToFormat = 
                  FSNamesystem.getNamespaceEditsDirs(conf);

Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java?rev=1035920&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java (added)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java Wed Nov 17 05:10:58 2010
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Startup and format tests
+ * 
+ */
+public class TestAllowFormat {
+  public static final String NAME_NODE_HOST = "localhost:";
+  public static final String NAME_NODE_HTTP_HOST = "0.0.0.0:";
+  private static final Log LOG =
+    LogFactory.getLog(TestAllowFormat.class.getName());
+  private static Configuration config;
+  private static MiniDFSCluster cluster = null;
+  private static File hdfsDir=null;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    config = new Configuration();
+    String baseDir = System.getProperty("test.build.data", "build/test/data");
+
+    hdfsDir = new File(baseDir, "dfs");
+    if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
+      throw new IOException("Could not delete hdfs directory '" + hdfsDir +
+                            "'");
+    }
+    LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
+    config.set(DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name").getPath());
+    config.set(DFS_DATANODE_DATA_DIR_KEY, new File(hdfsDir, "data").getPath());
+
+    config.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(hdfsDir, "secondary").getPath());
+
+    FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
+  }
+
+  /**
+   * clean up
+   */
+  @AfterClass
+  public static void tearDown() throws Exception {
+    if (cluster!=null) {
+      cluster.shutdown();
+      LOG.info("Stopping mini cluster");
+    }
+    
+    if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
+      throw new IOException("Could not delete hdfs directory in tearDown '"
+                            + hdfsDir + "'");
+    }	
+  }
+
+   /**
+   * start MiniDFScluster, try formatting with different settings
+   * @throws IOException
+   * @throws InterruptedException 
+   */
+  @Test
+  public void testAllowFormat() throws IOException {
+    LOG.info("--starting mini cluster");
+    // manage dirs parameter set to false 
+
+    NameNode nn;
+    // 1. Create a new cluster and format DFS
+    config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
+    cluster = new MiniDFSCluster.Builder(config).manageDataDfsDirs(false)
+                                                .manageNameDfsDirs(false)
+                                                .build();
+    cluster.waitActive();
+    assertNotNull(cluster);
+
+    nn = cluster.getNameNode();
+    assertNotNull(nn);
+    LOG.info("Mini cluster created OK");
+    
+    // 2. Try formatting DFS with allowformat false.
+    // NOTE: the cluster must be shut down for format to work.
+    LOG.info("Verifying format will fail with allowformat false");
+    config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, false);
+    try {
+      cluster.shutdown();
+      NameNode.format(config);
+      fail("Format succeeded, when it should have failed");
+    } catch (IOException e) { // expected to fail
+      // Verify we got message we expected
+      assertTrue("Exception was not about formatting Namenode", 
+          e.getMessage().startsWith("The option " + 
+                                    DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY));
+      LOG.info("Expected failure: " + StringUtils.stringifyException(e));
+      LOG.info("Done verifying format will fail with allowformat false");
+    }
+    // 3. Try formatting DFS with allowformat true
+    LOG.info("Verifying format will succeed with allowformat true");
+    config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
+    NameNode.format(config);
+    LOG.info("Done verifying format will succeed with allowformat true");
+  }
+}