You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ha...@apache.org on 2008/05/19 22:35:35 UTC

svn commit: r657965 - in /hadoop/core/trunk: CHANGES.txt src/java/org/apache/hadoop/dfs/FSImage.java src/test/org/apache/hadoop/dfs/TestRestartDFS.java

Author: hairong
Date: Mon May 19 13:35:34 2008
New Revision: 657965

URL: http://svn.apache.org/viewvc?rev=657965&view=rev
Log:
HADOOP-3409. Namenode should save the root inode into fsimage. Contributed by Hairong Kuang.

Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSImage.java
    hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestRestartDFS.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=657965&r1=657964&r2=657965&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Mon May 19 13:35:34 2008
@@ -282,6 +282,8 @@
     are not detected properly. This could lead to hard failure of client's
     write operation. (rangadi)
 
+    HADOOP-3409. Namenode should save the root inode into fsimage. (hairong)
+
 Release 0.17.0 - 2008-05-18
 
   INCOMPATIBLE CHANGES

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSImage.java?rev=657965&r1=657964&r2=657965&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSImage.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSImage.java Mon May 19 13:35:34 2008
@@ -773,6 +773,13 @@
         if (imgVersion <= -11) {
           permissions = PermissionStatus.read(in);
         }
+        // check if this is a root node
+        if (path.length() == 0) {
+          // update the root's attributes
+          fsDir.rootDir.setModificationTime(modificationTime);
+          fsDir.rootDir.setPermissionStatus(permissions);
+          continue;
+        }
         // check if the new inode belongs to the same parent
         if(!isParent(path, parentPath)) {
           parentINode = null;
@@ -840,10 +847,13 @@
     try {
       out.writeInt(FSConstants.LAYOUT_VERSION);
       out.writeInt(namespaceID);
-      out.writeInt(fsDir.rootDir.numItemsInTree() - 1);
+      out.writeInt(fsDir.rootDir.numItemsInTree());
       out.writeLong(fsNamesys.getGenerationStamp());
       byte[] byteStore = new byte[4*FSConstants.MAX_PATH_LENGTH];
       ByteBuffer strbuf = ByteBuffer.wrap(byteStore);
+      // save the root
+      saveINode2Image(strbuf, fsDir.rootDir, out);
+      // save the rest of the nodes
       saveImage(strbuf, 0, fsDir.rootDir, out);
       fsNamesys.saveFilesUnderConstruction(out);
       strbuf = null;
@@ -921,6 +931,39 @@
     }
   }
 
+  /*
+   * Save one inode's attributes to the image.
+   */
+  private static void saveINode2Image(ByteBuffer name,
+                                      INode node,
+                                      DataOutputStream out) throws IOException {
+    int nameLen = name.position();
+    out.writeShort(nameLen);
+    out.write(name.array(), name.arrayOffset(), nameLen);
+    if (!node.isDirectory()) {  // write file inode
+      INodeFile fileINode = (INodeFile)node;
+      out.writeShort(fileINode.getReplication());
+      out.writeLong(fileINode.getModificationTime());
+      out.writeLong(fileINode.getPreferredBlockSize());
+      Block[] blocks = fileINode.getBlocks();
+      out.writeInt(blocks.length);
+      for (Block blk : blocks)
+        blk.write(out);
+      FILE_PERM.fromShort(fileINode.getFsPermissionShort());
+      PermissionStatus.write(out, fileINode.getUserName(),
+                             fileINode.getGroupName(),
+                             FILE_PERM);
+    } else {   // write directory inode
+      out.writeShort(0);  // replication
+      out.writeLong(node.getModificationTime());
+      out.writeLong(0);   // preferred block size
+      out.writeInt(-1);    // # of blocks
+      FILE_PERM.fromShort(node.getFsPermissionShort());
+      PermissionStatus.write(out, node.getUserName(),
+                             node.getGroupName(),
+                             FILE_PERM);
+    }
+  }
   /**
    * Save file tree image starting from the given root.
    * This is a recursive procedure, which first saves all children of
@@ -934,37 +977,10 @@
     if (current.getChildrenRaw() == null)
       return;
     for(INode child : current.getChildren()) {
-    // print all children first
+      // print all children first
       parentPrefix.position(prefixLength);
       parentPrefix.put(PATH_SEPARATOR).put(child.getLocalNameBytes());
-      newPrefixLength = parentPrefix.position();
-      out.writeShort(newPrefixLength);
-      out.write(parentPrefix.array(), parentPrefix.arrayOffset(),
-                newPrefixLength);
-      if (!child.isDirectory()) {  // write file inode
-        INodeFile fileINode = (INodeFile)child;
-        out.writeShort(fileINode.getReplication());
-        out.writeLong(fileINode.getModificationTime());
-        out.writeLong(fileINode.getPreferredBlockSize());
-        Block[] blocks = fileINode.getBlocks();
-        out.writeInt(blocks.length);
-        for (Block blk : blocks)
-          blk.write(out);
-        FILE_PERM.fromShort(fileINode.getFsPermissionShort());
-        PermissionStatus.write(out, fileINode.getUserName(),
-                               fileINode.getGroupName(),
-                               FILE_PERM);
-        continue;
-      }
-      // write directory inode
-      out.writeShort(0);  // replication
-      out.writeLong(child.getModificationTime());
-      out.writeLong(0);   // preferred block size
-      out.writeInt(-1);    // # of blocks
-      FILE_PERM.fromShort(child.getFsPermissionShort());
-      PermissionStatus.write(out, child.getUserName(),
-                             child.getGroupName(),
-                             FILE_PERM);
+      saveINode2Image(parentPrefix, child, out);
     }
     for(INode child : current.getChildren()) {
       if(!child.isDirectory())

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestRestartDFS.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestRestartDFS.java?rev=657965&r1=657964&r2=657965&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestRestartDFS.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestRestartDFS.java Mon May 19 13:35:34 2008
@@ -48,10 +48,13 @@
   public void testRestartDFS() throws Exception {
     MiniDFSCluster cluster = null;
     DFSTestUtil files = new DFSTestUtil("TestRestartDFS", 20, 3, 8*1024);
+    Path root = new Path("/");
+    long modificationTime;
     try {
       cluster = new MiniDFSCluster(conf, 4, true, null);
       FileSystem fs = cluster.getFileSystem();
       files.createFiles(fs, "/srcdat");
+      modificationTime = fs.getFileStatus(root).getModificationTime();
     } finally {
       if (cluster != null) { cluster.shutdown(); }
     }
@@ -59,6 +62,8 @@
       // Here we restart the MiniDFScluster without formatting namenode
       cluster = new MiniDFSCluster(conf, 4, false, null);
       FileSystem fs = cluster.getFileSystem();
+      assertEquals(modificationTime,
+                   fs.getFileStatus(root).getModificationTime());
       assertTrue("Filesystem corrupted after restart.",
                  files.checkFiles(fs, "/srcdat"));
       files.cleanup(fs, "/srcdat");