You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by el...@apache.org on 2010/10/29 22:54:18 UTC

svn commit: r1028906 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/server/common/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/

Author: eli
Date: Fri Oct 29 20:54:17 2010
New Revision: 1028906

URL: http://svn.apache.org/viewvc?rev=1028906&view=rev
Log:
Revert HDFS-259, need to update LAYOUT_VERSION.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1028906&r1=1028905&r2=1028906&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Fri Oct 29 20:54:17 2010
@@ -154,9 +154,6 @@ Trunk (unreleased changes)
     HADOOP-7007. Update the hudson-test-patch ant target to work with the
     latest test-patch.sh script (gkesavan)
 
-    HDFS-259. Remove intentionally corrupt 0.13 directory layout creation.
-    (Todd Lipcon via eli)
-
     HDFS-1462. Refactor edit log loading to a separate class from edit log writing.
     (Todd Lipcon via eli)
 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/Storage.java?rev=1028906&r1=1028905&r2=1028906&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/Storage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/Storage.java Fri Oct 29 20:54:17 2010
@@ -64,7 +64,7 @@ public abstract class Storage extends St
 
   // Constants
   
-  // last layout version that did not support upgrades
+  // last layout version that did not suppot upgrades
   protected static final int LAST_PRE_UPGRADE_LAYOUT_VERSION = -3;
   
   // this corresponds to Hadoop-0.14.
@@ -80,7 +80,7 @@ public abstract class Storage extends St
   
   private   static final String STORAGE_FILE_LOCK     = "in_use.lock";
   protected static final String STORAGE_FILE_VERSION  = "VERSION";
-  public    static final String STORAGE_DIR_CURRENT   = "current";
+  public static final String STORAGE_DIR_CURRENT   = "current";
   private   static final String STORAGE_DIR_PREVIOUS  = "previous";
   private   static final String STORAGE_TMP_REMOVED   = "removed.tmp";
   private   static final String STORAGE_TMP_PREVIOUS  = "previous.tmp";
@@ -256,6 +256,7 @@ public abstract class Storage extends St
      * @throws IOException
      */
     public void write() throws IOException {
+      corruptPreUpgradeStorage(root);
       write(getVersionFile());
     }
 
@@ -471,7 +472,8 @@ public abstract class Storage extends St
       if (startOpt == HdfsConstants.StartupOption.FORMAT)
         return StorageState.NOT_FORMATTED;
       if (startOpt != HdfsConstants.StartupOption.IMPORT) {
-        checkOldLayoutStorage(this);
+        //make sure no conversion is required
+        checkConversionNeeded(this);
       }
 
       // check whether current directory is valid
@@ -684,22 +686,16 @@ public abstract class Storage extends St
   protected void addStorageDir(StorageDirectory sd) {
     storageDirs.add(sd);
   }
+  
+  public abstract boolean isConversionNeeded(StorageDirectory sd) throws IOException;
 
-  /**
-   * Return true if the layout of the given storage directory is from a version
-   * of Hadoop prior to the introduction of the "current" and "previous"
-   * directories which allow upgrade and rollback.
-   */
-  public abstract boolean isPreUpgradableLayout(StorageDirectory sd)
-  throws IOException;
-
-  /**
-   * Check if the given storage directory comes from a version of Hadoop
-   * prior to when the directory layout changed (ie 0.13). If this is
-   * the case, this method throws an IOException.
-   */
-  private void checkOldLayoutStorage(StorageDirectory sd) throws IOException {
-    if (isPreUpgradableLayout(sd)) {
+  /*
+   * Coversion is no longer supported. So this should throw exception if
+   * conversion is needed.
+   */
+  private void checkConversionNeeded(StorageDirectory sd) throws IOException {
+    if (isConversionNeeded(sd)) {
+      //throw an exception
       checkVersionUpgradable(0);
     }
   }
@@ -855,4 +851,20 @@ public abstract class Storage extends St
       + "-" + Integer.toString(storage.getLayoutVersion())
       + "-" + Long.toString(storage.getCTime());
   }
+
+  // Pre-upgrade version compatibility
+  protected abstract void corruptPreUpgradeStorage(File rootDir) throws IOException;
+
+  protected void writeCorruptedData(RandomAccessFile file) throws IOException {
+    final String messageForPreUpgradeVersion =
+      "\nThis file is INTENTIONALLY CORRUPTED so that versions\n"
+      + "of Hadoop prior to 0.13 (which are incompatible\n"
+      + "with this directory layout) will fail to start.\n";
+  
+    file.seek(0);
+    file.writeInt(FSConstants.LAYOUT_VERSION);
+    org.apache.hadoop.hdfs.DeprecatedUTF8.writeString(file, "");
+    file.writeBytes(messageForPreUpgradeVersion);
+    file.getFD().sync();
+  }
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1028906&r1=1028905&r2=1028906&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Fri Oct 29 20:54:17 2010
@@ -190,8 +190,7 @@ public class DataStorage extends Storage
       storageID = ssid;
   }
 
-  @Override
-  public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException {
+  public boolean isConversionNeeded(StorageDirectory sd) throws IOException {
     File oldF = new File(sd.getRoot(), "storage");
     if (!oldF.exists())
       return false;
@@ -463,6 +462,22 @@ public class DataStorage extends Storage
                  new File(to, blockNames[i]), oldLV);
   }
 
+  protected void corruptPreUpgradeStorage(File rootDir) throws IOException {
+    File oldF = new File(rootDir, "storage");
+    if (oldF.exists())
+      return;
+    // recreate old storage file to let pre-upgrade versions fail
+    if (!oldF.createNewFile())
+      throw new IOException("Cannot create file " + oldF);
+    RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
+    // write new version into old storage file
+    try {
+      writeCorruptedData(oldFile);
+    } finally {
+      oldFile.close();
+    }
+  }
+
   private void verifyDistributedUpgradeProgress(
                   NamespaceInfo nsInfo
                 ) throws IOException {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java?rev=1028906&r1=1028905&r2=1028906&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java Fri Oct 29 20:54:17 2010
@@ -61,7 +61,7 @@ public class BackupStorage extends FSIma
   }
 
   @Override
-  public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException {
+  public boolean isConversionNeeded(StorageDirectory sd) {
     return false;
   }
 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1028906&r1=1028905&r2=1028906&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Fri Oct 29 20:54:17 2010
@@ -862,9 +862,12 @@ public class FSImage extends Storage {
     return editLog;
   }
 
-  public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException {
+  public boolean isConversionNeeded(StorageDirectory sd) throws IOException {
     File oldImageDir = new File(sd.getRoot(), "image");
     if (!oldImageDir.exists()) {
+      if(sd.getVersionFile().exists())
+        throw new InconsistentFSStateException(sd.getRoot(),
+            oldImageDir + " does not exist.");
       return false;
     }
     // check the layout version inside the image file
@@ -880,7 +883,7 @@ public class FSImage extends Storage {
     }
     return true;
   }
-
+  
   //
   // Atomic move sequence, to recover from interrupted checkpoint
   //
@@ -2098,6 +2101,25 @@ public class FSImage extends Storage {
     }
   }
 
+  protected void corruptPreUpgradeStorage(File rootDir) throws IOException {
+    File oldImageDir = new File(rootDir, "image");
+    if (!oldImageDir.exists())
+      if (!oldImageDir.mkdir())
+        throw new IOException("Cannot create directory " + oldImageDir);
+    File oldImage = new File(oldImageDir, "fsimage");
+    if (!oldImage.exists())
+      // recreate old image file to let pre-upgrade versions fail
+      if (!oldImage.createNewFile())
+        throw new IOException("Cannot create file " + oldImage);
+    RandomAccessFile oldFile = new RandomAccessFile(oldImage, "rws");
+    // write new version into old image file
+    try {
+      writeCorruptedData(oldFile);
+    } finally {
+      oldFile.close();
+    }
+  }
+
   private boolean getDistributedUpgradeState() {
     FSNamesystem ns = getFSNamesystem();
     return ns == null ? false : ns.getDistributedUpgradeState();

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1028906&r1=1028905&r2=1028906&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Fri Oct 29 20:54:17 2010
@@ -585,6 +585,12 @@ public class SecondaryNameNode implement
       super(conf);
     }
 
+    @Override
+    public
+    boolean isConversionNeeded(StorageDirectory sd) {
+      return false;
+    }
+
     /**
      * Analyze checkpoint directories.
      * Create directories if they do not exist.

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java?rev=1028906&r1=1028905&r2=1028906&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java Fri Oct 29 20:54:17 2010
@@ -32,7 +32,6 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
@@ -49,8 +48,6 @@ public class TestDFSUpgradeFromImage ext
   
   private static final Log LOG = LogFactory.getLog(
                     "org.apache.hadoop.hdfs.TestDFSUpgradeFromImage");
-  private static File TEST_ROOT_DIR =
-                      new File(MiniDFSCluster.getBaseDirectory());
   
   public int numDataNodes = 4;
   
@@ -206,47 +203,4 @@ public class TestDFSUpgradeFromImage ext
       if (cluster != null) { cluster.shutdown(); }
     }
   }
-
-  /**
-   * Test that sets up a fake image from Hadoop 0.3.0 and tries to start a
-   * NN, verifying that the correct error message is thrown.
-   */
-  public void testFailOnPreUpgradeImage() throws IOException {
-    Configuration conf = new HdfsConfiguration();
-
-    File namenodeStorage = new File(TEST_ROOT_DIR, "nnimage-0.3.0");
-    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeStorage.toString());
-
-    // Set up a fake NN storage that looks like an ancient Hadoop dir circa 0.3.0
-    FileUtil.fullyDelete(namenodeStorage);
-    assertTrue("Make " + namenodeStorage, namenodeStorage.mkdirs());
-    File imageDir = new File(namenodeStorage, "image");
-    assertTrue("Make " + imageDir, imageDir.mkdirs());
-
-    // Hex dump of a formatted image from Hadoop 0.3.0
-    File imageFile = new File(imageDir, "fsimage");
-    byte[] imageBytes = StringUtils.hexStringToByte(
-      "fffffffee17c0d2700000000");
-    FileOutputStream fos = new FileOutputStream(imageFile);
-    try {
-      fos.write(imageBytes);
-    } finally {
-      fos.close();
-    }
-
-    // Now try to start an NN from it
-
-    try {
-      new MiniDFSCluster.Builder(conf).numDataNodes(0)
-        .format(false)
-        .manageDataDfsDirs(false)
-        .manageNameDfsDirs(false)
-        .startupOption(StartupOption.REGULAR)
-        .build();
-      fail("Was able to start NN from 0.3.0 image");
-    } catch (IOException ioe) {
-      LOG.info("Got expected exception", ioe);
-      assertTrue(ioe.toString().contains("Old layout version is 'too old'"));
-    }
-  }
 }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1028906&r1=1028905&r2=1028906&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java Fri Oct 29 20:54:17 2010
@@ -279,11 +279,23 @@ public class UpgradeUtilities {
         localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"),
                                 new Path(newDir.toString()),
                                 false);
+        Path newImgDir = new Path(newDir.getParent(), "image");
+        if (!localFS.exists(newImgDir))
+          localFS.copyToLocalFile(
+              new Path(namenodeStorage.toString(), "image"),
+              newImgDir,
+              false);
         break;
       case DATA_NODE:
         localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"),
                                 new Path(newDir.toString()),
                                 false);
+        Path newStorageFile = new Path(newDir.getParent(), "storage");
+        if (!localFS.exists(newStorageFile))
+          localFS.copyToLocalFile(
+              new Path(datanodeStorage.toString(), "storage"),
+              newStorageFile,
+              false);
         break;
       }
       retVal[i] = newDir;