You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2011/06/01 00:18:31 UTC

svn commit: r1129942 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/common/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/java/org/apa...

Author: suresh
Date: Tue May 31 22:18:30 2011
New Revision: 1129942

URL: http://svn.apache.org/viewvc?rev=1129942&view=rev
Log:
HDFS-1936. Layout version change from HDFS-1822 causes upgrade failure. Contributed by Suresh Srinivas.


Added:
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/build.xml
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1129942&r1=1129941&r2=1129942&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Tue May 31 22:18:30 2011
@@ -644,6 +644,9 @@ Trunk (unreleased changes)
     HDFS-1920. libhdfs does not build for ARM processors.
     (Trevor Robinson via eli)
 
+    HDFS-1936. Layout version change from HDFS-1822 causes upgrade failure.
+    (suresh)
+
 Release 0.22.0 - Unreleased
 
   INCOMPATIBLE CHANGES

Modified: hadoop/hdfs/trunk/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/build.xml?rev=1129942&r1=1129941&r2=1129942&view=diff
==============================================================================
--- hadoop/hdfs/trunk/build.xml (original)
+++ hadoop/hdfs/trunk/build.xml Tue May 31 22:18:30 2011
@@ -434,6 +434,7 @@
     <delete dir="${test.cache.data}"/>
     <mkdir dir="${test.cache.data}"/>
     <copy file="${test.src.dir}/hdfs/org/apache/hadoop/hdfs/hadoop-14-dfs-dir.tgz" todir="${test.cache.data}"/>
+    <copy file="${test.src.dir}/hdfs/org/apache/hadoop/hdfs/hadoop-22-dfs-dir.tgz" todir="${test.cache.data}"/>
     <copy file="${test.src.dir}/hdfs/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt" todir="${test.cache.data}"/>
     <copy file="${test.src.dir}/hdfs/org/apache/hadoop/cli/testHDFSConf.xml" todir="${test.cache.data}"/>
     <copy file="${test.src.dir}/hdfs/org/apache/hadoop/cli/clitest_data/data15bytes" todir="${test.cache.data}"/>

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java?rev=1129942&r1=1129941&r2=1129942&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java Tue May 31 22:18:30 2011
@@ -84,11 +84,9 @@ public interface FSConstants {
    */
   public static final String HDFS_URI_SCHEME = "hdfs";
 
-  // Version is reflected in the dfs image and edit log files.
-  // Version is reflected in the data storage file.
-  // Versions are negative.
-  // Decrement LAYOUT_VERSION to define a new version.
-  public static final int LAYOUT_VERSION = -35;
-  // Current version: 
-  // -35: Adding support for block pools and multiple namenodes
+  /**
+   * Please see {@link LayoutVersion} on adding new layout version.
+   */
+  public static final int LAYOUT_VERSION = 
+    LayoutVersion.getCurrentLayoutVersion();
 }

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java?rev=1129942&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java Tue May 31 22:18:30 2011
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * This class tracks changes in the layout version of HDFS.
+ * 
+ * Layout version is changed for following reasons:
+ * <ol>
+ * <li>The layout of how namenode or datanode stores information 
+ * on disk changes.</li>
+ * <li>A new operation code is added to the editlog.</li>
+ * <li>Modification such as format of a record, content of a record 
+ * in editlog or fsimage.</li>
+ * </ol>
+ * <br>
+ * <b>How to update layout version:<br></b>
+ * When a change requires new layout version, please add an entry into
+ * {@link Feature} with a short enum name, new layout version and description
+ * of the change. Please see {@link Feature} for further details.
+ * <br>
+ */
+@InterfaceAudience.Private
+public class LayoutVersion {
+ 
+  /**
+   * Enums for features that change the layout version.
+   * <br><br>
+   * To add a new layout version:
+   * <ul>
+   * <li>Define a new enum constant with a short enum name, the new layout version 
+   * and description of the added feature.</li>
+   * <li>When adding a layout version with an ancestor that is not same as
+   * its immediate predecessor, use the constructor where a spacific ancestor
+   * can be passed.
+   * </li>
+   * </ul>
+   */
+  public static enum Feature {
+    NAMESPACE_QUOTA(-16, "Support for namespace quotas"),
+    FILE_ACCESS_TIME(-17, "Support for access time on files"),
+    DISKSPACE_QUOTA(-18, "Support for disk space quotas"),
+    STICKY_BIT(-19, "Support for sticky bits"),
+    APPEND_RBW_DIR(-20, "Datanode has \"rbw\" subdirectory for append"),
+    ATOMIC_RENAME(-21, "Support for atomic rename"),
+    CONCAT(-22, "Support for concat operation"),
+    SYMLINKS(-23, "Support for symbolic links"),
+    DELEGATION_TOKEN(-24, "Support for delegation tokens for security"),
+    FSIMAGE_COMPRESSION(-25, "Support for fsimage compression"),
+    FSIMAGE_CHECKSUM(-26, "Support checksum for fsimage"),
+    REMOVE_REL13_DISK_LAYOUT_SUPPORT(-27, "Remove support for 0.13 disk layout"),
+    EDITS_CHESKUM(-28, "Support checksum for editlog"),
+    UNUSED(-29, "Skipped version"),
+    FSIMAGE_NAME_OPTIMIZATION(-30, "Store only last part of path in fsimage"),
+    RESERVED_REL20_203(-31, -19, "Reserved for release 0.20.203"),
+    RESERVED_REL20_204(-32, "Reserved for release 0.20.204"),
+    RESERVED_REL22(-33, -27, "Reserved for release 0.22"),
+    RESERVED_REL23(-34, -30, "Reserved for release 0.23"),
+    FEDERATION(-35, "Support for namenode federation");
+    
+    final int lv;
+    final int ancestorLV;
+    final String description;
+    
+    /**
+     * Feature that is added at {@code currentLV}. 
+     * @param lv new layout version with the addition of this feature
+     * @param description description of the feature
+     */
+    Feature(final int lv, final String description) {
+      this(lv, lv + 1, description);
+    }
+
+    /**
+     * Feature that is added at {@code currentLV}.
+     * @param lv new layout version with the addition of this feature
+     * @param ancestorLV layout version from which the new lv is derived
+     *          from.
+     * @param description description of the feature
+     */
+    Feature(final int lv, final int ancestorLV,
+        final String description) {
+      this.lv = lv;
+      this.ancestorLV = ancestorLV;
+      this.description = description;
+    }
+  }
+  
+  // Build layout version and corresponding feature matrix
+  static final Map<Integer, EnumSet<Feature>>map = 
+    new HashMap<Integer, EnumSet<Feature>>();
+  
+  // Static initialization 
+  static {
+    initMap();
+  }
+  
+  /**
+   * Initialize the map of a layout version and EnumSet of {@link Feature}s 
+   * supported.
+   */
+  private static void initMap() {
+    // Go through all the enum constants and build a map of
+    // LayoutVersion <-> EnumSet of all supported features in that LayoutVersion
+    for (Feature f : Feature.values()) {
+      EnumSet<Feature> ancestorSet = map.get(f.ancestorLV);
+      if (ancestorSet == null) {
+        ancestorSet = EnumSet.noneOf(Feature.class); // Empty enum set
+        map.put(f.ancestorLV, ancestorSet);
+      }
+      EnumSet<Feature> featureSet = EnumSet.copyOf(ancestorSet);
+      featureSet.add(f);
+      map.put(f.lv, featureSet);
+    }
+    
+    // Special initialization for 0.20.203 and 0.20.204
+    // to add Feature#DELEGATION_TOKEN
+    specialInit(Feature.RESERVED_REL20_203.lv, Feature.DELEGATION_TOKEN);
+    specialInit(Feature.RESERVED_REL20_204.lv, Feature.DELEGATION_TOKEN);
+  }
+  
+  private static void specialInit(int lv, Feature f) {
+    EnumSet<Feature> set = map.get(lv);
+    set.add(f);
+  }
+  
+  /**
+   * Gets formatted string that describes {@link LayoutVersion} information.
+   */
+  public static String getString() {
+    final StringBuilder buf = new StringBuilder();
+    buf.append("Feature List:\n");
+    for (Feature f : Feature.values()) {
+      buf.append(f).append(" introduced in layout version ")
+          .append(f.lv).append(" (").
+      append(f.description).append(")\n");
+    }
+    
+    buf.append("\n\nLayoutVersion and supported features:\n");
+    for (Feature f : Feature.values()) {
+      buf.append(f.lv).append(": ").append(map.get(f.lv))
+          .append("\n");
+    }
+    return buf.toString();
+  }
+  
+  /**
+   * Returns true if a given feature is supported in the given layout version
+   * @param f Feature
+   * @param lv LayoutVersion
+   * @return true if {@code f} is supported in layout version {@code lv}
+   */
+  public static boolean supports(final Feature f, final int lv) {
+    final EnumSet<Feature> set =  map.get(lv);
+    return set != null && set.contains(f);
+  }
+  
+  /**
+   * Get the current layout version
+   */
+  public static int getCurrentLayoutVersion() {
+    Feature[] values = Feature.values();
+    return values[values.length - 1].lv;
+  }
+}

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/Storage.java?rev=1129942&r1=1129941&r2=1129942&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/Storage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/Storage.java Tue May 31 22:18:30 2011
@@ -33,6 +33,8 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.fs.FileUtil;
@@ -75,12 +77,6 @@ public abstract class Storage extends St
    * any upgrade code that uses this constant should also be removed. */
   public static final int PRE_GENERATIONSTAMP_LAYOUT_VERSION = -13;
   
-  // last layout version that did not support persistent rbw replicas
-  public static final int PRE_RBW_LAYOUT_VERSION = -19;
-  
-  // last layout version that is before federation
-  public static final int LAST_PRE_FEDERATION_LAYOUT_VERSION = -30;
-  
   /** Layout versions of 0.20.203 release */
   public static final int[] LAYOUT_VERSIONS_203 = {-19, -31};
 
@@ -781,8 +777,8 @@ public abstract class Storage extends St
     props.setProperty("layoutVersion", String.valueOf(layoutVersion));
     props.setProperty("storageType", storageType.toString());
     props.setProperty("namespaceID", String.valueOf(namespaceID));
-    // Set clusterID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
-    if (layoutVersion < LAST_PRE_FEDERATION_LAYOUT_VERSION) {
+    // Set clusterID in version with federation support
+    if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
       props.setProperty("clusterID", clusterID);
     }
     props.setProperty("cTime", String.valueOf(cTime));
@@ -902,8 +898,8 @@ public abstract class Storage extends St
   /** Validate and set clusterId from {@link Properties}*/
   protected void setClusterId(Properties props, int layoutVersion,
       StorageDirectory sd) throws InconsistentFSStateException {
-    // No Cluster ID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
-    if (layoutVersion < Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION) {
+    // Set cluster ID in version that supports federation
+    if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
       String cid = getProperty(props, sd, "clusterID");
       if (!(clusterID.equals("") || cid.equals("") || clusterID.equals(cid))) {
         throw new InconsistentFSStateException(sd.getRoot(),

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java?rev=1129942&r1=1129941&r2=1129942&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java Tue May 31 22:18:30 2011
@@ -30,6 +30,8 @@ import java.util.regex.Pattern;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
@@ -291,7 +293,7 @@ public class BlockPoolSliceStorage exten
    */
   void doUpgrade(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException {
     // Upgrading is applicable only to release with federation or after
-    if (!(this.getLayoutVersion() < LAST_PRE_FEDERATION_LAYOUT_VERSION)) {
+    if (!LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
       return;
     }
     LOG.info("Upgrading block pool storage directory " + bpSd.getRoot()
@@ -346,8 +348,8 @@ public class BlockPoolSliceStorage exten
    * @throws IOException if the directory is not empty or it can not be removed
    */
   private void cleanupDetachDir(File detachDir) throws IOException {
-    if (layoutVersion >= PRE_RBW_LAYOUT_VERSION && detachDir.exists()
-        && detachDir.isDirectory()) {
+    if (!LayoutVersion.supports(Feature.APPEND_RBW_DIR, layoutVersion)
+        && detachDir.exists() && detachDir.isDirectory()) {
 
       if (detachDir.list().length != 0) {
         throw new IOException("Detached directory " + detachDir

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1129942&r1=1129941&r2=1129942&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Tue May 31 22:18:30 2011
@@ -43,6 +43,8 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage;
@@ -287,8 +289,8 @@ public class DataStorage extends Storage
     props.setProperty("cTime", String.valueOf(cTime));
     props.setProperty("layoutVersion", String.valueOf(layoutVersion));
     props.setProperty("storageID", getStorageID());
-    // Set NamespaceID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
-    if (layoutVersion >= LAST_PRE_FEDERATION_LAYOUT_VERSION) {
+    // Set NamespaceID in version before federation
+    if (!LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
       props.setProperty("namespaceID", String.valueOf(namespaceID));
     }
   }
@@ -305,8 +307,8 @@ public class DataStorage extends Storage
     setStorageType(props, sd);
     setClusterId(props, layoutVersion, sd);
     
-    // Read NamespaceID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
-    if (layoutVersion >= LAST_PRE_FEDERATION_LAYOUT_VERSION) {
+    // Read NamespaceID in version before federation
+    if (!LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
       setNamespaceID(props, sd);
     }
     
@@ -373,8 +375,10 @@ public class DataStorage extends Storage
     assert this.layoutVersion >= FSConstants.LAYOUT_VERSION :
       "Future version is not allowed";
     
+    boolean federationSupported = 
+      LayoutVersion.supports(Feature.FEDERATION, layoutVersion);
     // For pre-federation version - validate the namespaceID
-    if (layoutVersion >= Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION &&
+    if (!federationSupported &&
         getNamespaceID() != nsInfo.getNamespaceID()) {
       throw new IOException("Incompatible namespaceIDs in "
           + sd.getRoot().getCanonicalPath() + ": namenode namespaceID = "
@@ -382,8 +386,8 @@ public class DataStorage extends Storage
           + getNamespaceID());
     }
     
-    // For post federation version, validate clusterID
-    if (layoutVersion < Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION
+    // For version that supports federation, validate clusterID
+    if (federationSupported
         && !getClusterID().equals(nsInfo.getClusterID())) {
       throw new IOException("Incompatible clusterIDs in "
           + sd.getRoot().getCanonicalPath() + ": namenode clusterID = "
@@ -435,7 +439,7 @@ public class DataStorage extends Storage
    * @throws IOException on error
    */
   void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
-    if (layoutVersion < Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION) {
+    if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
       clusterID = nsInfo.getClusterID();
       layoutVersion = nsInfo.getLayoutVersion();
       sd.write();
@@ -493,7 +497,7 @@ public class DataStorage extends Storage
    * @throws IOException if the directory is not empty or it can not be removed
    */
   private void cleanupDetachDir(File detachDir) throws IOException {
-    if (layoutVersion >= PRE_RBW_LAYOUT_VERSION &&
+    if (!LayoutVersion.supports(Feature.APPEND_RBW_DIR, layoutVersion) &&
         detachDir.exists() && detachDir.isDirectory() ) {
       
         if (detachDir.list().length != 0 ) {
@@ -626,7 +630,7 @@ public class DataStorage extends Storage
     HardLink hardLink = new HardLink();
     // do the link
     int diskLayoutVersion = this.getLayoutVersion();
-    if (diskLayoutVersion < PRE_RBW_LAYOUT_VERSION) { // RBW version
+    if (LayoutVersion.supports(Feature.APPEND_RBW_DIR, diskLayoutVersion)) {
       // hardlink finalized blocks in tmpDir/finalized
       linkBlocks(new File(fromDir, STORAGE_DIR_FINALIZED), 
           new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java?rev=1129942&r1=1129941&r2=1129942&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java Tue May 31 22:18:30 2011
@@ -28,6 +28,8 @@ import java.util.zip.CheckedInputStream;
 import java.util.zip.Checksum;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
@@ -233,7 +235,7 @@ public class BackupImage extends FSImage
           BufferedInputStream bin = new BufferedInputStream(backupInputStream);
           DataInputStream in = new DataInputStream(bin);
           Checksum checksum = null;
-          if (logVersion <= -28) { // support fsedits checksum
+          if (LayoutVersion.supports(Feature.EDITS_CHESKUM, logVersion)) {
             checksum = FSEditLog.getChecksum();
             in = new DataInputStream(new CheckedInputStream(bin, checksum));
           }
@@ -361,7 +363,7 @@ public class BackupImage extends FSImage
       FSEditLogLoader logLoader = new FSEditLogLoader(namesystem);
       int logVersion = logLoader.readLogVersion(in);
       Checksum checksum = null;
-      if (logVersion <= -28) { // support fsedits checksum
+      if (LayoutVersion.supports(Feature.EDITS_CHESKUM, logVersion)) {
         checksum = FSEditLog.getChecksum();
         in = new DataInputStream(new CheckedInputStream(bin, checksum));
       }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1129942&r1=1129941&r2=1129942&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Tue May 31 22:18:30 2011
@@ -37,6 +37,8 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
@@ -111,7 +113,7 @@ public class FSEditLogLoader {
     try {
       logVersion = readLogVersion(in);
       Checksum checksum = null;
-      if (logVersion <= -28) { // support fsedits checksum
+      if (LayoutVersion.supports(Feature.EDITS_CHESKUM, logVersion)) {
         checksum = FSEditLog.getChecksum();
         in = new DataInputStream(new CheckedInputStream(bin, checksum));
       }
@@ -191,7 +193,7 @@ public class FSEditLogLoader {
             path = FSImageSerialization.readString(in);
             short replication = fsNamesys.adjustReplication(readShort(in));
             mtime = readLong(in);
-            if (logVersion <= -17) {
+            if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) {
               atime = readLong(in);
             }
             if (logVersion < -7) {
@@ -277,10 +279,6 @@ public class FSEditLogLoader {
             break;
           } 
           case OP_CONCAT_DELETE: {
-            if (logVersion > -22) {
-              throw new IOException("Unexpected opCode " + opCode
-                  + " for version " + logVersion);
-            }
             numOpConcatDelete++;
             int length = in.readInt();
             if (length < 3) { // trg, srcs.., timestam
@@ -339,7 +337,7 @@ public class FSEditLogLoader {
             // The disk format stores atimes for directories as well.
             // However, currently this is not being updated/used because of
             // performance reasons.
-            if (logVersion <= -17) {
+            if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) {
               atime = readLong(in);
             }
   
@@ -370,9 +368,6 @@ public class FSEditLogLoader {
           }
           case OP_SET_PERMISSIONS: {
             numOpSetPerm++;
-            if (logVersion > -11)
-              throw new IOException("Unexpected opCode " + opCode
-                                    + " for version " + logVersion);
             fsDir.unprotectedSetPermission(
                 FSImageSerialization.readString(in), FsPermission.read(in));
             break;
@@ -388,20 +383,12 @@ public class FSEditLogLoader {
             break;
           }
           case OP_SET_NS_QUOTA: {
-            if (logVersion > -16) {
-              throw new IOException("Unexpected opCode " + opCode
-                  + " for version " + logVersion);
-            }
             fsDir.unprotectedSetQuota(FSImageSerialization.readString(in), 
                                       readLongWritable(in), 
                                       FSConstants.QUOTA_DONT_SET);
             break;
           }
           case OP_CLEAR_NS_QUOTA: {
-            if (logVersion > -16) {
-              throw new IOException("Unexpected opCode " + opCode
-                  + " for version " + logVersion);
-            }
             fsDir.unprotectedSetQuota(FSImageSerialization.readString(in),
                                       FSConstants.QUOTA_RESET,
                                       FSConstants.QUOTA_DONT_SET);
@@ -444,10 +431,6 @@ public class FSEditLogLoader {
             break;
           }
           case OP_RENAME: {
-            if (logVersion > -21) {
-              throw new IOException("Unexpected opCode " + opCode
-                  + " for version " + logVersion);
-            }
             numOpRename++;
             int length = in.readInt();
             if (length != 3) {
@@ -464,10 +447,6 @@ public class FSEditLogLoader {
             break;
           }
           case OP_GET_DELEGATION_TOKEN: {
-            if (logVersion > -24) {
-              throw new IOException("Unexpected opCode " + opCode
-                  + " for version " + logVersion);
-            }
             numOpGetDelegationToken++;
             DelegationTokenIdentifier delegationTokenId = 
                 new DelegationTokenIdentifier();
@@ -478,10 +457,6 @@ public class FSEditLogLoader {
             break;
           }
           case OP_RENEW_DELEGATION_TOKEN: {
-            if (logVersion > -24) {
-              throw new IOException("Unexpected opCode " + opCode
-                  + " for version " + logVersion);
-            }
             numOpRenewDelegationToken++;
             DelegationTokenIdentifier delegationTokenId = 
                 new DelegationTokenIdentifier();
@@ -492,10 +467,6 @@ public class FSEditLogLoader {
             break;
           }
           case OP_CANCEL_DELEGATION_TOKEN: {
-            if (logVersion > -24) {
-              throw new IOException("Unexpected opCode " + opCode
-                  + " for version " + logVersion);
-            }
             numOpCancelDelegationToken++;
             DelegationTokenIdentifier delegationTokenId = 
                 new DelegationTokenIdentifier();
@@ -505,10 +476,6 @@ public class FSEditLogLoader {
             break;
           }
           case OP_UPDATE_MASTER_KEY: {
-            if (logVersion > -24) {
-              throw new IOException("Unexpected opCode " + opCode
-                  + " for version " + logVersion);
-            }
             numOpUpdateMasterKey++;
             DelegationKey delegationKey = new DelegationKey();
             delegationKey.readFields(in);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1129942&r1=1129941&r2=1129942&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Tue May 31 22:18:30 2011
@@ -40,6 +40,8 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@@ -251,12 +253,13 @@ public class FSImage implements NNStorag
     if (!isFormatted && startOpt != StartupOption.ROLLBACK 
                      && startOpt != StartupOption.IMPORT)
       throw new IOException("NameNode is not formatted.");
-    if (storage.getLayoutVersion() < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION) {
+    int layoutVersion = storage.getLayoutVersion();
+    if (layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION) {
       NNStorage.checkVersionUpgradable(storage.getLayoutVersion());
     }
     if (startOpt != StartupOption.UPGRADE
-        && storage.getLayoutVersion() < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION
-        && storage.getLayoutVersion() != FSConstants.LAYOUT_VERSION) {
+        && layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION
+        && layoutVersion != FSConstants.LAYOUT_VERSION) {
       throw new IOException(
           "\nFile system image contains an old layout version " 
           + storage.getLayoutVersion() + ".\nAn upgrade to version "
@@ -265,12 +268,12 @@ public class FSImage implements NNStorag
     }
     
     // Upgrade to federation requires -upgrade -clusterid <clusterID> option
-    if (startOpt == StartupOption.UPGRADE
-        && storage.getLayoutVersion() > Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION) {
+    if (startOpt == StartupOption.UPGRADE && 
+        !LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
       if (startOpt.getClusterId() == null) {
         throw new IOException(
             "\nFile system image contains an old layout version "
-                + storage.getLayoutVersion() + ".\nAn upgrade to version "
+                + layoutVersion + ".\nAn upgrade to version "
                 + FSConstants.LAYOUT_VERSION
                 + " is required.\nPlease restart NameNode with "
                 + "-upgrade -clusterid <clusterID> option.");

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1129942&r1=1129941&r2=1129942&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Tue May 31 22:18:30 2011
@@ -40,6 +40,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
@@ -169,7 +171,7 @@ class FSImageFormat {
 
         // read compression related info
         FSImageCompression compression;
-        if (imgVersion <= -25) {  // -25: 1st version providing compression option
+        if (LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imgVersion)) {
           compression = FSImageCompression.readCompressionHeader(conf, in);
         } else {
           compression = FSImageCompression.createNoopCompression();
@@ -180,7 +182,8 @@ class FSImageFormat {
 
         // load all inodes
         LOG.info("Number of files = " + numFiles);
-        if (imgVersion <= -30) {
+        if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
+            imgVersion)) {
           loadLocalNameINodes(numFiles, in);
         } else {
           loadFullNameINodes(numFiles, in);
@@ -229,7 +232,8 @@ class FSImageFormat {
    */  
    private void loadLocalNameINodes(long numFiles, DataInputStream in) 
    throws IOException {
-     assert imgVersion <= -30; // -30: store only local name in image
+     assert LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
+         imgVersion);
      assert numFiles > 0;
 
      // load root
@@ -327,7 +331,7 @@ class FSImageFormat {
     short replication = in.readShort();
     replication = namesystem.adjustReplication(replication);
     modificationTime = in.readLong();
-    if (imgVersion <= -17) {
+    if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imgVersion)) {
       atime = in.readLong();
     }
     if (imgVersion <= -8) {
@@ -366,17 +370,19 @@ class FSImageFormat {
     
     // get quota only when the node is a directory
     long nsQuota = -1L;
-    if (imgVersion <= -16 && blocks == null  && numBlocks == -1) {
+      if (LayoutVersion.supports(Feature.NAMESPACE_QUOTA, imgVersion)
+          && blocks == null && numBlocks == -1) {
         nsQuota = in.readLong();
       }
       long dsQuota = -1L;
-      if (imgVersion <= -18 && blocks == null && numBlocks == -1) {
+      if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imgVersion)
+          && blocks == null && numBlocks == -1) {
         dsQuota = in.readLong();
       }
   
       // Read the symlink only when the node is a symlink
       String symlink = "";
-      if (imgVersion <= -23 && numBlocks == -2) {
+      if (numBlocks == -2) {
         symlink = Text.readString(in);
       }
       
@@ -431,7 +437,7 @@ class FSImageFormat {
     }
 
     private void loadSecretManagerState(DataInputStream in) throws IOException {
-      if (imgVersion > -23) {
+      if (!LayoutVersion.supports(Feature.DELEGATION_TOKEN, imgVersion)) {
         //SecretManagerState is not available.
         //This must not happen if security is turned on.
         return; 
@@ -441,7 +447,7 @@ class FSImageFormat {
 
 
     private long readNumFiles(DataInputStream in) throws IOException {
-      if (imgVersion <= -16) {
+      if (LayoutVersion.supports(Feature.NAMESPACE_QUOTA, imgVersion)) {
         return in.readLong();
       } else {
         return in.readInt();

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java?rev=1129942&r1=1129941&r2=1129942&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java Tue May 31 22:18:30 2011
@@ -44,6 +44,8 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.UpgradeManager;
@@ -196,8 +198,8 @@ public class NNStorage extends Storage i
     RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
     try {
       oldFile.seek(0);
-      int odlVersion = oldFile.readInt();
-      if (odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION)
+      int oldVersion = oldFile.readInt();
+      if (oldVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION)
         return false;
     } finally {
       oldFile.close();
@@ -674,8 +676,8 @@ public class NNStorage extends Storage i
                             + sd.getRoot() + " is not formatted.");
     }
 
-    // No Block pool ID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
-    if (layoutVersion < LAST_PRE_FEDERATION_LAYOUT_VERSION) {
+    // Set Block pool ID in version with federation support
+    if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
       String sbpid = props.getProperty("blockpoolID");
       setBlockPoolID(sd.getRoot(), sbpid);
     }
@@ -688,7 +690,7 @@ public class NNStorage extends Storage i
         sDUV == null? getLayoutVersion() : Integer.parseInt(sDUV));
 
     String sMd5 = props.getProperty(MESSAGE_DIGEST_PROPERTY);
-    if (layoutVersion <= -26) {
+    if (LayoutVersion.supports(Feature.FSIMAGE_CHECKSUM, layoutVersion)) {
       if (sMd5 == null) {
         throw new InconsistentFSStateException(sd.getRoot(),
             "file " + STORAGE_FILE_VERSION
@@ -719,8 +721,8 @@ public class NNStorage extends Storage i
                            StorageDirectory sd
                            ) throws IOException {
     super.setFields(props, sd);
-    // Set blockpoolID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
-    if (layoutVersion < LAST_PRE_FEDERATION_LAYOUT_VERSION) {
+    // Set blockpoolID in version with federation support
+    if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
       props.setProperty("blockpoolID", blockpoolID);
     }
     boolean uState = getDistributedUpgradeState();
@@ -1019,7 +1021,7 @@ public class NNStorage extends Storage i
       throw new InconsistentFSStateException(storage,
           "Unexepcted blockpoolID " + bpid + " . Expected " + blockpoolID);
     }
-    blockpoolID = bpid;
+    setBlockPoolID(bpid);
   }
   
   public String getBlockPoolID() {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java?rev=1129942&r1=1129941&r2=1129942&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java Tue May 31 22:18:30 2011
@@ -21,6 +21,8 @@ import java.io.IOException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
 
 import static org.apache.hadoop.hdfs.tools.offlineEditsViewer.Tokenizer.ByteToken;
@@ -221,11 +223,6 @@ class EditsLoaderCurrent implements Edit
    * Visit OP_RENAME
    */
   private void visit_OP_RENAME() throws IOException {
-    if(editsVersion > -21) {
-      throw new IOException("Unexpected op code " + FSEditLogOpCodes.OP_RENAME
-        + " for edit log version " + editsVersion
-        + " (op code 15 only expected for 21 and later)");
-    }
     v.visitInt(           EditsElement.LENGTH);
     v.visitStringUTF8(    EditsElement.SOURCE);
     v.visitStringUTF8(    EditsElement.DESTINATION);
@@ -237,12 +234,6 @@ class EditsLoaderCurrent implements Edit
    * Visit OP_CONCAT_DELETE
    */
   private void visit_OP_CONCAT_DELETE() throws IOException {
-    if(editsVersion > -22) {
-      throw new IOException("Unexpected op code "
-        + FSEditLogOpCodes.OP_CONCAT_DELETE
-        + " for edit log version " + editsVersion
-        + " (op code 16 only expected for 22 and later)");
-    }
     IntToken lengthToken = v.visitInt(EditsElement.LENGTH);
     v.visitStringUTF8(EditsElement.CONCAT_TARGET);
     // all except of CONCAT_TARGET and TIMESTAMP
@@ -276,12 +267,6 @@ class EditsLoaderCurrent implements Edit
    * Visit OP_GET_DELEGATION_TOKEN
    */
   private void visit_OP_GET_DELEGATION_TOKEN() throws IOException {
-      if(editsVersion > -24) {
-        throw new IOException("Unexpected op code "
-          + FSEditLogOpCodes.OP_GET_DELEGATION_TOKEN
-          + " for edit log version " + editsVersion
-          + " (op code 18 only expected for 24 and later)");
-      }
       v.visitByte(       EditsElement.T_VERSION);
       v.visitStringText( EditsElement.T_OWNER);
       v.visitStringText( EditsElement.T_RENEWER);
@@ -298,13 +283,6 @@ class EditsLoaderCurrent implements Edit
    */
   private void visit_OP_RENEW_DELEGATION_TOKEN()
     throws IOException {
-
-      if(editsVersion > -24) {
-        throw new IOException("Unexpected op code "
-          + FSEditLogOpCodes.OP_RENEW_DELEGATION_TOKEN
-          + " for edit log version " + editsVersion
-          + " (op code 19 only expected for 24 and later)");
-      }
       v.visitByte(       EditsElement.T_VERSION);
       v.visitStringText( EditsElement.T_OWNER);
       v.visitStringText( EditsElement.T_RENEWER);
@@ -321,13 +299,6 @@ class EditsLoaderCurrent implements Edit
    */
   private void visit_OP_CANCEL_DELEGATION_TOKEN()
     throws IOException {
-
-      if(editsVersion > -24) {
-        throw new IOException("Unexpected op code "
-          + FSEditLogOpCodes.OP_CANCEL_DELEGATION_TOKEN
-          + " for edit log version " + editsVersion
-          + " (op code 20 only expected for 24 and later)");
-      }
       v.visitByte(       EditsElement.T_VERSION);
       v.visitStringText( EditsElement.T_OWNER);
       v.visitStringText( EditsElement.T_RENEWER);
@@ -343,13 +314,6 @@ class EditsLoaderCurrent implements Edit
    */
   private void visit_OP_UPDATE_MASTER_KEY()
     throws IOException {
-
-      if(editsVersion > -24) {
-        throw new IOException("Unexpected op code "
-          + FSEditLogOpCodes.OP_UPDATE_MASTER_KEY
-          + " for edit log version " + editsVersion
-          + "(op code 21 only expected for 24 and later)");
-      }
       v.visitVInt(  EditsElement.KEY_ID);
       v.visitVLong( EditsElement.KEY_EXPIRY_DATE);
       VIntToken blobLengthToken = v.visitVInt(EditsElement.KEY_LENGTH);
@@ -454,7 +418,8 @@ class EditsLoaderCurrent implements Edit
 
         v.leaveEnclosingElement(); // DATA
         
-        if (editsOpCode != FSEditLogOpCodes.OP_INVALID && editsVersion <= -28) {
+        if (editsOpCode != FSEditLogOpCodes.OP_INVALID && 
+            LayoutVersion.supports(Feature.EDITS_CHESKUM, editsVersion)) {
           v.visitInt(EditsElement.CHECKSUM);
         }
         v.leaveEnclosingElement(); // RECORD

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java?rev=1129942&r1=1129941&r2=1129942&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java Tue May 31 22:18:30 2011
@@ -26,6 +26,8 @@ import java.util.Date;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
 import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
@@ -155,7 +157,7 @@ class ImageLoaderCurrent implements Imag
 
       v.visit(ImageElement.GENERATION_STAMP, in.readLong());
 
-      if (imageVersion <= -25) {
+      if (LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imageVersion)) {
         boolean isCompressed = in.readBoolean();
         v.visit(ImageElement.IS_COMPRESSED, imageVersion);
         if (isCompressed) {
@@ -175,7 +177,7 @@ class ImageLoaderCurrent implements Imag
 
       processINodesUC(in, v, skipBlocks);
 
-      if (imageVersion <= -24) {
+      if (LayoutVersion.supports(Feature.DELEGATION_TOKEN, imageVersion)) {
         processDelegationTokens(in, v);
       }
       
@@ -334,7 +336,7 @@ class ImageLoaderCurrent implements Imag
     v.visitEnclosingElement(ImageElement.INODES,
         ImageElement.NUM_INODES, numInodes);
     
-    if (imageVersion <= -30) { // local file name
+    if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION, imageVersion)) {
       processLocalNameINodes(in, v, numInodes, skipBlocks);
     } else { // full path name
       processFullNameINodes(in, v, numInodes, skipBlocks);
@@ -396,7 +398,6 @@ class ImageLoaderCurrent implements Imag
     * @param v visitor
     * @param skipBlocks skip blocks or not
     * @param parentName the name of its parent node
-    * @return the number of Children
     * @throws IOException
     */
   private void processINode(DataInputStream in, ImageVisitor v,
@@ -413,7 +414,7 @@ class ImageLoaderCurrent implements Imag
     v.visit(ImageElement.INODE_PATH, pathName);
     v.visit(ImageElement.REPLICATION, in.readShort());
     v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
-    if(imageVersion <= -17) // added in version -17
+    if(LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imageVersion))
       v.visit(ImageElement.ACCESS_TIME, formatDate(in.readLong()));
     v.visit(ImageElement.BLOCK_SIZE, in.readLong());
     int numBlocks = in.readInt();
@@ -423,10 +424,10 @@ class ImageLoaderCurrent implements Imag
     // File or directory
     if (numBlocks > 0 || numBlocks == -1) {
       v.visit(ImageElement.NS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
-      if(imageVersion <= -18) // added in version -18
+      if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imageVersion))
         v.visit(ImageElement.DS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
     }
-    if (imageVersion <= -23 && numBlocks == -2) {
+    if (numBlocks == -2) {
       v.visit(ImageElement.SYMLINK, Text.readString(in));
     }
 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java?rev=1129942&r1=1129941&r2=1129942&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java Tue May 31 22:18:30 2011
@@ -47,8 +47,7 @@ public class OfflineImageViewer {
     "saving the results in OUTPUTFILE.\n" +
     "\n" +
     "The oiv utility will attempt to parse correctly formed image files\n" +
-    "and will abort fail with mal-formed image files. Currently the\n" +
-    "supports FSImage layout versions -16 through -23.\n" +
+    "and will abort fail with mal-formed image files.\n" +
     "\n" +
     "The tool works offline and does not require a running cluster in\n" +
     "order to process an image file.\n" +

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java?rev=1129942&r1=1129941&r2=1129942&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java Tue May 31 22:18:30 2011
@@ -47,10 +47,13 @@ import org.apache.commons.logging.LogFac
  */
 public class TestDFSUpgradeFromImage extends TestCase {
   
-  private static final Log LOG = LogFactory.getLog(
-                    "org.apache.hadoop.hdfs.TestDFSUpgradeFromImage");
+  private static final Log LOG = LogFactory
+      .getLog(TestDFSUpgradeFromImage.class);
   private static File TEST_ROOT_DIR =
                       new File(MiniDFSCluster.getBaseDirectory());
+  private static final String HADOOP14_IMAGE = "hadoop-14-dfs-dir.tgz";
+  private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
+  private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
   
   public int numDataNodes = 4;
   
@@ -64,24 +67,26 @@ public class TestDFSUpgradeFromImage ext
   
   boolean printChecksum = false;
   
-  protected void setUp() throws IOException {
-    unpackStorage();
+  public void unpackStorage() throws IOException {
+    unpackStorage(HADOOP14_IMAGE);
   }
 
-  public void unpackStorage() throws IOException {
-    String tarFile = System.getProperty("test.cache.data", "build/test/cache") +
-                     "/hadoop-14-dfs-dir.tgz";
+  private void unpackStorage(String tarFileName)
+      throws IOException {
+    String tarFile = System.getProperty("test.cache.data", "build/test/cache")
+        + "/" + tarFileName;
     String dataDir = System.getProperty("test.build.data", "build/test/data");
     File dfsDir = new File(dataDir, "dfs");
     if ( dfsDir.exists() && !FileUtil.fullyDelete(dfsDir) ) {
       throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
     }
+    LOG.info("Unpacking " + tarFile);
     FileUtil.unTar(new File(tarFile), new File(dataDir));
     //Now read the reference info
     
-    BufferedReader reader = new BufferedReader( 
-                        new FileReader(System.getProperty("test.cache.data", "build/test/cache") +
-                                       "/hadoop-dfs-dir.txt"));
+    BufferedReader reader = new BufferedReader(new FileReader(
+        System.getProperty("test.cache.data", "build/test/cache")
+            + "/" + HADOOP_DFS_DIR_TXT));
     String line;
     while ( (line = reader.readLine()) != null ) {
       
@@ -177,7 +182,8 @@ public class TestDFSUpgradeFromImage ext
     }
   }
   
-  public void testUpgradeFromImage() throws IOException {
+  public void testUpgradeFromRel14Image() throws IOException {
+    unpackStorage();
     MiniDFSCluster cluster = null;
     try {
       Configuration conf = new HdfsConfiguration();
@@ -246,8 +252,40 @@ public class TestDFSUpgradeFromImage ext
         .build();
       fail("Was able to start NN from 0.3.0 image");
     } catch (IOException ioe) {
-      LOG.info("Got expected exception", ioe);
       assertTrue(ioe.toString().contains("Old layout version is 'too old'"));
     }
   }
+  
+  /**
+   * Test upgrade from 0.22 image
+   */
+  public void testUpgradeFromRel22Image() throws IOException {
+    unpackStorage(HADOOP22_IMAGE);
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new HdfsConfiguration();
+      if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Ant
+        System.setProperty("test.build.data", "build/test/data");
+      }
+      conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off
+      cluster = new MiniDFSCluster.Builder(conf)
+                                  .numDataNodes(numDataNodes)
+                                  .format(false)
+                                  .startupOption(StartupOption.UPGRADE)
+                                  .clusterId("testClusterId")
+                                  .build();
+      cluster.waitActive();
+      DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+      DFSClient dfsClient = dfs.dfs;
+      //Safemode will be off only after upgrade is complete. Wait for it.
+      while ( dfsClient.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET) ) {
+        LOG.info("Waiting for SafeMode to be OFF.");
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException ignored) {}
+      }
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
 }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1129942&r1=1129941&r2=1129942&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java Tue May 31 22:18:30 2011
@@ -34,7 +34,9 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 
@@ -48,7 +50,6 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.test.GenericTestUtils;
 
 /**
  * This class defines a number of static helper methods used by the
@@ -461,7 +462,7 @@ public class UpgradeUtilities {
   public static void createBlockPoolVersionFile(File bpDir,
       StorageInfo version, String bpid) throws IOException {
     // Create block pool version files
-    if (version.layoutVersion < Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION) {
+    if (LayoutVersion.supports(Feature.FEDERATION, version.layoutVersion)) {
       File bpCurDir = new File(bpDir, Storage.STORAGE_DIR_CURRENT);
       BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(version,
           bpid);

Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java?rev=1129942&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java (added)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java Tue May 31 22:18:30 2011
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import java.util.EnumSet;
+
+import static org.junit.Assert.*;
+import org.junit.Test;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
+
+/**
+ * Test for {@link LayoutVersion}
+ */
+public class TestLayoutVersion {
+  
+  /**
+   * Tests to make sure a given layout version supports all the
+   * features from the ancestor
+   */
+  @Test
+  public void testFeaturesFromAncestorSupported() {
+    for (Feature f : Feature.values()) {
+      validateFeatureList(f);
+    }
+  }
+  
+  /**
+   * Test to make sure 0.20.203 supports delegation token
+   */
+  @Test
+  public void testRelease203() {
+    assertTrue(LayoutVersion.supports(Feature.DELEGATION_TOKEN, 
+        Feature.RESERVED_REL20_203.lv));
+  }
+  
+  /**
+   * Test to make sure 0.20.204 supports delegation token
+   */
+  @Test
+  public void testRelease204() {
+    assertTrue(LayoutVersion.supports(Feature.DELEGATION_TOKEN, 
+        Feature.RESERVED_REL20_204.lv));
+  }
+  
+  /**
+   * Given feature {@code f}, ensures the layout version of that feature
+   * supports all the features supported by it's ancestor.
+   */
+  private void validateFeatureList(Feature f) {
+    int lv = f.lv;
+    int ancestorLV = f.ancestorLV;
+    EnumSet<Feature> ancestorSet = LayoutVersion.map.get(ancestorLV);
+    assertNotNull(ancestorSet);
+    for (Feature  feature : ancestorSet) {
+      assertTrue(LayoutVersion.supports(feature, lv));
+    }
+  }
+}