You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sh...@apache.org on 2008/12/12 23:02:58 UTC

svn commit: r726129 - in /hadoop/core/trunk: ./ src/hdfs/org/apache/hadoop/hdfs/ src/hdfs/org/apache/hadoop/hdfs/protocol/ src/hdfs/org/apache/hadoop/hdfs/server/namenode/ src/hdfs/org/apache/hadoop/hdfs/tools/ src/test/org/apache/hadoop/hdfs/server/na...

Author: shv
Date: Fri Dec 12 14:02:58 2008
New Revision: 726129

URL: http://svn.apache.org/viewvc?rev=726129&view=rev
Log:
HADOOP-4826. Introduce admin command saveNamespace. Contributed by Konstantin Shvachko.

Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=726129&r1=726128&r2=726129&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Fri Dec 12 14:02:58 2008
@@ -84,6 +84,8 @@
 
     HADOOP-4348. Add service-level authorization for Hadoop. (acmurthy) 
 
+    HADOOP-4826. Introduce admin command saveNamespace. (shv)
+
   IMPROVEMENTS
 
     HADOOP-4749. Added a new counter REDUCE_INPUT_BYTES. (Yongqiang He via 
@@ -395,7 +397,7 @@
     HADOOP-4727. Fix a group checking bug in fill_stat_structure(...) in
     fuse-dfs.  (Brian Bockelman via szetszwo)
 
-    HADOOP-4836. Correct typos in mapred related documentation.  (Jordà Polo
+    HADOOP-4836. Correct typos in mapred related documentation.  (Jord? Polo
     via szetszwo)
 
 Release 0.19.0 - 2008-11-18

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=726129&r1=726128&r2=726129&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java Fri Dec 12 14:02:58 2008
@@ -471,7 +471,7 @@
    * @param progress for reporting write-progress
    * @return an output stream for writing into the file
    * @throws IOException
-   * @see {@link ClientProtocol#append(String, String)}
+   * @see ClientProtocol#append(String, String)
    */
   OutputStream append(String src, int buffersize, Progressable progress
       ) throws IOException {
@@ -773,6 +773,21 @@
   }
 
   /**
+   * Save namespace image.
+   * See {@link ClientProtocol#saveNamespace()} 
+   * for more details.
+   * 
+   * @see ClientProtocol#saveNamespace()
+   */
+  void saveNamespace() throws AccessControlException, IOException {
+    try {
+      namenode.saveNamespace();
+    } catch(RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class);
+    }
+  }
+
+  /**
    * Refresh the hosts and exclude files.  (Rereads them.)
    * See {@link ClientProtocol#refreshNodes()} 
    * for more details.

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=726129&r1=726128&r2=726129&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java Fri Dec 12 14:02:58 2008
@@ -33,6 +33,7 @@
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.DFSClient.DFSOutputStream;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.*;
 
 
@@ -329,7 +330,16 @@
     return dfs.setSafeMode(action);
   }
 
-  /*
+  /**
+   * Save namespace image.
+   * 
+   * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()
+   */
+  public void saveNamespace() throws AccessControlException, IOException {
+    dfs.saveNamespace();
+  }
+
+  /**
    * Refreshes the list of hosts and excluded hosts from the configured 
    * files.  
    */

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=726129&r1=726128&r2=726129&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Fri Dec 12 14:02:58 2008
@@ -40,9 +40,9 @@
    * Compared to the previous version the following changes have been introduced:
    * (Only the latest change is reflected.
    * The log of historical changes can be retrieved from the svn).
-   * 40: added disk space quotas.
+   * 41: saveNamespace introduced.
    */
-  public static final long versionID = 40L;
+  public static final long versionID = 41L;
   
   ///////////////////////////////////////
   // File contents
@@ -361,6 +361,17 @@
   public boolean setSafeMode(FSConstants.SafeModeAction action) throws IOException;
 
   /**
+   * Save namespace image.
+   * <p>
+   * Saves current namespace into storage directories and reset edits log.
+   * Requires superuser privilege and safe mode.
+   * 
+   * @throws AccessControlException if the superuser privilege is violated.
+   * @throws IOException if image creation failed.
+   */
+  public void saveNamespace() throws IOException;
+
+  /**
    * Tells the namenode to reread the hosts and exclude files. 
    * @throws IOException
    */

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=726129&r1=726128&r2=726129&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Dec 12 14:02:58 2008
@@ -1384,7 +1384,7 @@
    * Allocate a block at the given pending filename
    * 
    * @param src path to the file
-   * @param indoes INode representing each of the components of src. 
+   * @param inodes INode representing each of the components of src. 
    *        <code>inodes[inodes.length-1]</code> is the INode for the file.
    */
   private Block allocateBlock(String src, INode[] inodes) throws IOException {
@@ -1764,7 +1764,7 @@
   /**
    * Move a file that is being written to be immutable.
    * @param src The filename
-   * @param holder The datanode that was creating the file
+   * @param lease The lease for the client creating the file
    */
   void internalReleaseLease(Lease lease, String src) throws IOException {
     LOG.info("Recovering lease=" + lease + ", src=" + src);
@@ -3372,7 +3372,25 @@
     }
     return arr;
   }
-    
+
+  /**
+   * Save namespace image.
+   * This will save current namespace into fsimage file and empty edits file.
+   * Requires superuser privilege and safe mode.
+   * 
+   * @throws AccessControlException if superuser privilege is violated.
+   * @throws IOException if 
+   */
+  synchronized void saveNamespace() throws AccessControlException, IOException {
+    checkSuperuserPrivilege();
+    if(!isInSafeMode()) {
+      throw new IOException("Safe mode should be turned ON " +
+                            "in order to create namespace image.");
+    }
+    getFSImage().saveFSImage();
+    LOG.info("New namespace image has been created.");
+  }
+
   /**
    */
   public synchronized void DFSNodesStatus(ArrayList<DatanodeDescriptor> live, 

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=726129&r1=726128&r2=726129&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Dec 12 14:02:58 2008
@@ -24,7 +24,8 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.permission.*;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.protocol.*;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
@@ -595,7 +596,14 @@
     return namesystem.isInSafeMode();
   }
 
-  /*
+  /**
+   * @inheritDoc
+   */
+  public void saveNamespace() throws IOException {
+    namesystem.saveNamespace();
+  }
+
+  /**
    * Refresh the list of datanodes that the namenode should allow to  
    * connect.  Re-reads conf by creating new Configuration object and 
    * uses the files list in the configuration to update the list. 

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=726129&r1=726128&r2=726129&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java Fri Dec 12 14:02:58 2008
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.tools;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.List;
 
 import javax.security.auth.login.LoginException;
@@ -364,6 +363,27 @@
   }
 
   /**
+   * Command to ask the namenode to save the namespace.
+   * Usage: java DFSAdmin -saveNamespace
+   * @exception IOException 
+   * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()
+   */
+  public int saveNamespace() throws IOException {
+    int exitCode = -1;
+
+    if (!(fs instanceof DistributedFileSystem)) {
+      System.err.println("FileSystem is " + fs.getUri());
+      return exitCode;
+    }
+
+    DistributedFileSystem dfs = (DistributedFileSystem) fs;
+    dfs.saveNamespace();
+    exitCode = 0;
+   
+    return exitCode;
+  }
+
+  /**
    * Command to ask the namenode to reread the hosts and excluded hosts 
    * file.
    * Usage: java DFSAdmin -refreshNodes
@@ -373,7 +393,7 @@
     int exitCode = -1;
 
     if (!(fs instanceof DistributedFileSystem)) {
-      System.err.println("FileSystem is " + fs.getName());
+      System.err.println("FileSystem is " + fs.getUri());
       return exitCode;
     }
 
@@ -388,6 +408,7 @@
     String summary = "hadoop dfsadmin is the command to execute DFS administrative commands.\n" +
       "The full syntax is: \n\n" +
       "hadoop dfsadmin [-report] [-safemode <enter | leave | get | wait>]\n" +
+      "\t[-saveNamespace]\n" +
       "\t[-refreshNodes]\n" +
       "\t[" + SetQuotaCommand.USAGE + "]\n" +
       "\t[" + ClearQuotaCommand.USAGE +"]\n" +
@@ -408,6 +429,10 @@
       "\t\tcondition.  Safe mode can also be entered manually, but then\n" +
       "\t\tit can only be turned off manually as well.\n";
 
+    String saveNamespace = "-saveNamespace:\t" +
+    "Save current namespace into storage directories and reset edits log.\n" +
+    "\t\tRequires superuser permissions and safe mode.\n";
+
     String refreshNodes = "-refreshNodes: \tUpdates the set of hosts allowed " +
                           "to connect to namenode.\n\n" +
       "\t\tRe-reads the config file to update values defined by \n" +
@@ -419,7 +444,7 @@
       "\t\tdecommissioning if it has aleady been marked for decommission.\n" + 
       "\t\tEntires not present in both the lists are decommissioned.\n";
 
-    String finalizeUpgrade = "-finalizeUpgrade: Finalize upgrade of DFS.\n" +
+    String finalizeUpgrade = "-finalizeUpgrade: Finalize upgrade of HDFS.\n" +
       "\t\tDatanodes delete their previous version working directories,\n" +
       "\t\tfollowed by Namenode doing the same.\n" + 
       "\t\tThis completes the upgrade process.\n";
@@ -446,6 +471,8 @@
       System.out.println(report);
     } else if ("safemode".equals(cmd)) {
       System.out.println(safemode);
+    } else if ("saveNamespace".equals(cmd)) {
+      System.out.println(saveNamespace);
     } else if ("refreshNodes".equals(cmd)) {
       System.out.println(refreshNodes);
     } else if ("finalizeUpgrade".equals(cmd)) {
@@ -470,6 +497,7 @@
       System.out.println(summary);
       System.out.println(report);
       System.out.println(safemode);
+      System.out.println(saveNamespace);
       System.out.println(refreshNodes);
       System.out.println(finalizeUpgrade);
       System.out.println(upgradeProgress);
@@ -609,6 +637,9 @@
     } else if ("-safemode".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
                          + " [-safemode enter | leave | get | wait]");
+    } else if ("-saveNamespace".equals(cmd)) {
+      System.err.println("Usage: java DFSAdmin"
+                         + " [-saveNamespace]");
     } else if ("-refreshNodes".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
                          + " [-refreshNodes]");
@@ -640,6 +671,7 @@
       System.err.println("Usage: java DFSAdmin");
       System.err.println("           [-report]");
       System.err.println("           [-safemode enter | leave | get | wait]");
+      System.err.println("           [-saveNamespace]");
       System.err.println("           [-refreshNodes]");
       System.err.println("           [-finalizeUpgrade]");
       System.err.println("           [-upgradeProgress status | details | force]");
@@ -685,6 +717,11 @@
         printUsage(cmd);
         return exitCode;
       }
+    } else if ("-saveNamespace".equals(cmd)) {
+      if (argv.length != 1) {
+        printUsage(cmd);
+        return exitCode;
+      }
     } else if ("-refreshNodes".equals(cmd)) {
       if (argv.length != 1) {
         printUsage(cmd);
@@ -730,6 +767,8 @@
         report();
       } else if ("-safemode".equals(cmd)) {
         setSafeMode(argv, i);
+      } else if ("-saveNamespace".equals(cmd)) {
+        exitCode = saveNamespace();
       } else if ("-refreshNodes".equals(cmd)) {
         exitCode = refreshNodes();
       } else if ("-finalizeUpgrade".equals(cmd)) {

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=726129&r1=726128&r2=726129&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Fri Dec 12 14:02:58 2008
@@ -23,15 +23,18 @@
 import java.util.List;
 import java.util.Iterator;
 import java.util.Random;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.ErrorSimulator;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -176,7 +179,6 @@
     // Also check that the edits file is empty here
     // and that temporary checkpoint files are gone.
     FSImage image = cluster.getNameNode().getFSImage();
-    int nrDirs = image.getNumStorageDirs();
     for (Iterator<StorageDirectory> it = 
              image.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
       StorageDirectory sd = it.next();
@@ -558,7 +560,7 @@
   }
 
   /**
-   * Tests checkpoint in DFS.
+   * Tests checkpoint in HDFS.
    */
   public void testCheckpoint() throws IOException {
     Path file1 = new Path("checkpoint.dat");
@@ -651,4 +653,62 @@
     testSecondaryFailsToReturnImage(conf);
     testStartup(conf);
   }
+
+  /**
+   * Tests save namepsace.
+   */
+  public void testSaveNamespace() throws IOException {
+    MiniDFSCluster cluster = null;
+    DistributedFileSystem fs = null;
+    try {
+      Configuration conf = new Configuration();
+      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+      cluster.waitActive();
+      fs = (DistributedFileSystem)(cluster.getFileSystem());
+
+      // Saving image without safe mode should fail
+      DFSAdmin admin = new DFSAdmin(conf);
+      String[] args = new String[]{"-saveNamespace"};
+      try {
+        admin.run(args);
+      } catch(IOException eIO) {
+        assertTrue(eIO.getLocalizedMessage().contains("Safe mode should be turned ON"));
+      } catch(Exception e) {
+        throw new IOException(e);
+      }
+      // create new file
+      Path file = new Path("namespace.dat");
+      writeFile(fs, file, replication);
+      checkFile(fs, file, replication);
+      // verify that the edits file is NOT empty
+      Collection<File> editsDirs = cluster.getNameEditsDirs();
+      for(File ed : editsDirs) {
+        assertTrue(new File(ed, "current/edits").length() > Integer.SIZE/Byte.SIZE);
+      }
+
+      // Saving image in safe mode should succeed
+      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      try {
+        admin.run(args);
+      } catch(Exception e) {
+        throw new IOException(e);
+      }
+      // verify that the edits file is empty
+      for(File ed : editsDirs) {
+        assertTrue(new File(ed, "current/edits").length() == Integer.SIZE/Byte.SIZE);
+      }
+
+      // restart cluster and verify file exists
+      cluster.shutdown();
+      cluster = null;
+
+      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+      cluster.waitActive();
+      fs = (DistributedFileSystem)(cluster.getFileSystem());
+      checkFile(fs, file, replication);
+    } finally {
+      if(fs != null) fs.close();
+      if(cluster!= null) cluster.shutdown();
+    }
+  }
 }