You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by bo...@apache.org on 2010/09/20 23:09:40 UTC

svn commit: r999122 - in /hadoop/hdfs/branches/HDFS-1052: ./ src/java/org/apache/hadoop/hdfs/server/common/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/java/org/apache/hadoop/hdfs/server/protoco...

Author: boryas
Date: Mon Sep 20 21:09:39 2010
New Revision: 999122

URL: http://svn.apache.org/viewvc?rev=999122&view=rev
Log:
HDFS-1365. HDFS federation: propose ClusterID and BlockPoolID format

Added:
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestStorageInfo.java
Modified:
    hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
    hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java

Modified: hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/CHANGES.txt?rev=999122&r1=999121&r2=999122&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-1052/CHANGES.txt Mon Sep 20 21:09:39 2010
@@ -37,6 +37,8 @@ Trunk (unreleased changes)
 
     HDFS-1361. Add -fileStatus operation to NNThroughputBenchmark. (shv)
 
+    HDFS-1365.HDFS federation: propose ClusterID and BlockPoolID format (tanping via boryas)
+
   IMPROVEMENTS
 
     HDFS-1096. fix for prev. commit. (boryas)

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java?rev=999122&r1=999121&r2=999122&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java Mon Sep 20 21:09:39 2010
@@ -665,8 +665,8 @@ public abstract class Storage extends St
     this.storageType = type;
   }
   
-  protected Storage(NodeType type, int nsID, long cT) {
-    super(FSConstants.LAYOUT_VERSION, nsID, cT);
+  protected Storage(NodeType type, int nsID, String cid, String bpid, long cT) {
+    super(FSConstants.LAYOUT_VERSION, nsID, cid, bpid, cT);
     this.storageType = type;
   }
   
@@ -734,20 +734,29 @@ public abstract class Storage extends St
   protected void getFields(Properties props, 
                            StorageDirectory sd 
                            ) throws IOException {
-    String sv, st, sid, sct;
+    String sv, st, sid, scid, sbpid, sct;
     sv = props.getProperty("layoutVersion");
     st = props.getProperty("storageType");
     sid = props.getProperty("namespaceID");
+    scid = props.getProperty("clusterID");
+    sbpid = props.getProperty("blockpoolID");
     sct = props.getProperty("cTime");
-    if (sv == null || st == null || sid == null || sct == null)
+    if (sv == null || st == null || sid == null || scid == null || sbpid == null
+        || sct == null) {
       throw new InconsistentFSStateException(sd.root,
-                                             "file " + STORAGE_FILE_VERSION + " is invalid.");
+        "file " + STORAGE_FILE_VERSION + " is invalid.");
+    }
     int rv = Integer.parseInt(sv);
     NodeType rt = NodeType.valueOf(st);
     int rid = Integer.parseInt(sid);
+    String rcid = scid;
+    String rbpid = sbpid;
     long rct = Long.parseLong(sct);
     if (!storageType.equals(rt) ||
-        !((namespaceID == 0) || (rid == 0) || namespaceID == rid))
+        !((namespaceID == 0) || (rid == 0) || namespaceID == rid) ||
+        !( (clusterID.equals(rcid)) || (clusterID.equals("")) )   ||
+        !( (blockpoolID.equals(rbpid)) || (clusterID.equals("")) )
+        )
       throw new InconsistentFSStateException(sd.root,
                                              "is incompatible with others.");
     if (rv < FSConstants.LAYOUT_VERSION) // future version
@@ -756,6 +765,8 @@ public abstract class Storage extends St
     layoutVersion = rv;
     storageType = rt;
     namespaceID = rid;
+    clusterID = rcid;
+    blockpoolID = rbpid;
     cTime = rct;
   }
   
@@ -772,6 +783,8 @@ public abstract class Storage extends St
     props.setProperty("layoutVersion", String.valueOf(layoutVersion));
     props.setProperty("storageType", storageType.toString());
     props.setProperty("namespaceID", String.valueOf(namespaceID));
+    props.setProperty("clusterID", clusterID);
+    props.setProperty("blockpoolID", blockpoolID);
     props.setProperty("cTime", String.valueOf(cTime));
   }
 
@@ -848,6 +861,8 @@ public abstract class Storage extends St
 
   public static String getRegistrationID(StorageInfo storage) {
     return "NS-" + Integer.toString(storage.getNamespaceID())
+      + "-" + storage.getClusterID()
+      + "-" + storage.getBlockPoolID()
       + "-" + Integer.toString(storage.getLayoutVersion())
       + "-" + Long.toString(storage.getCTime());
   }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java?rev=999122&r1=999121&r2=999122&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java Mon Sep 20 21:09:39 2010
@@ -23,6 +23,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableUtils;
 
 
 /**
@@ -34,14 +35,18 @@ import org.apache.hadoop.io.Writable;
 public class StorageInfo implements Writable {
   public int   layoutVersion;   // layout version of the storage data
   public int   namespaceID;     // id of the file system
+  public String clusterID;      // id of the cluster
+  public String blockpoolID;    // id of the blockpool
   public long  cTime;           // creation time of the file system state
   
   public StorageInfo () {
-    this(0, 0, 0L);
+    this(0, 0, "", "", 0L);
   }
   
-  public StorageInfo(int layoutV, int nsID, long cT) {
+  public StorageInfo(int layoutV, int nsID, String cid, String bpid, long cT) {
     layoutVersion = layoutV;
+    clusterID = cid;
+    blockpoolID = bpid;
     namespaceID = nsID;
     cTime = cT;
   }
@@ -63,13 +68,25 @@ public class StorageInfo implements Writ
   public int    getNamespaceID()  { return namespaceID; }
 
   /**
+   * cluster id of the file system.<p>
+   */
+  public String    getClusterID()  { return clusterID; }
+  
+  /**
+   * blockpool id of the file system.<p>
+   */
+  public String    getBlockPoolID()  { return blockpoolID; }
+  
+  /**
    * Creation time of the file system state.<p>
    * Modified during upgrades.
    */
   public long   getCTime()        { return cTime; }
-
+  
   public void   setStorageInfo(StorageInfo from) {
     layoutVersion = from.layoutVersion;
+    clusterID = from.clusterID;
+    blockpoolID = from.blockpoolID;
     namespaceID = from.namespaceID;
     cTime = from.cTime;
   }
@@ -80,12 +97,16 @@ public class StorageInfo implements Writ
   public void write(DataOutput out) throws IOException {
     out.writeInt(getLayoutVersion());
     out.writeInt(getNamespaceID());
+    WritableUtils.writeString(out, clusterID);
+    WritableUtils.writeString(out, blockpoolID); 
     out.writeLong(getCTime());
   }
 
   public void readFields(DataInput in) throws IOException {
     layoutVersion = in.readInt();
     namespaceID = in.readInt();
+    clusterID = WritableUtils.readString(in);
+    blockpoolID = WritableUtils.readString(in);
     cTime = in.readLong();
   }
 }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=999122&r1=999121&r2=999122&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Mon Sep 20 21:09:39 2010
@@ -348,6 +348,8 @@ public class DataNode extends Configured
         setNewStorageID(dnRegistration);
         dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION;
         dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID;
+        dnRegistration.storageInfo.clusterID = nsInfo.clusterID;
+        dnRegistration.storageInfo.blockpoolID = nsInfo.blockpoolID;
         // it would have been better to pass storage as a parameter to
         // constructor below - need to augment ReflectionUtils used below.
         conf.set(DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY, dnRegistration.getStorageID());

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=999122&r1=999121&r2=999122&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Mon Sep 20 21:09:39 2010
@@ -66,8 +66,8 @@ public class DataStorage extends Storage
     storageID = "";
   }
   
-  DataStorage(int nsID, long cT, String strgID) {
-    super(NodeType.DATA_NODE, nsID, cT);
+  DataStorage(int nsID, String cID, String bpID, long cT, String strgID) {
+    super(NodeType.DATA_NODE, nsID, cID, bpID, cT);
     this.storageID = strgID;
   }
   
@@ -164,6 +164,8 @@ public class DataStorage extends Storage
     sd.clearDirectory(); // create directory
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     this.namespaceID = nsInfo.getNamespaceID();
+    this.clusterID = nsInfo.getClusterID();
+    this.blockpoolID = nsInfo.getBlockPoolID();
     this.cTime = 0;
     // store storageID as it currently is
     sd.write();
@@ -238,6 +240,16 @@ public class DataStorage extends Storage
                             "Incompatible namespaceIDs in " + sd.getRoot().getCanonicalPath()
                             + ": namenode namespaceID = " + nsInfo.getNamespaceID() 
                             + "; datanode namespaceID = " + getNamespaceID());
+    if (!getClusterID().equals (nsInfo.getClusterID()))
+      throw new IOException(
+                            "Incompatible clusterIDs in " + sd.getRoot().getCanonicalPath()
+                            + ": namenode clusterID = " + nsInfo.getClusterID() 
+                            + "; datanode clusterID = " + getClusterID());
+    if (!getBlockPoolID().equals(nsInfo.getBlockPoolID()))
+      throw new IOException(
+                            "Incompatible blockpoolIDs in " + sd.getRoot().getCanonicalPath()
+                            + ": namenode blockpoolID = " + nsInfo.getBlockPoolID() 
+                            + "; datanode blockpoolID = " + getBlockPoolID());
     if (this.layoutVersion == FSConstants.LAYOUT_VERSION 
         && this.cTime == nsInfo.getCTime())
       return; // regular startup

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=999122&r1=999121&r2=999122&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Mon Sep 20 21:09:39 2010
@@ -29,7 +29,10 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.net.URI;
+import java.net.UnknownHostException;
 import java.nio.ByteBuffer;
+import java.security.NoSuchAlgorithmException;
+import java.security.SecureRandom;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -43,6 +46,7 @@ import java.util.Map;
 import java.util.Properties;
 import java.util.Random;
 import java.util.Set;
+import java.util.UUID;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -73,6 +77,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 
 /**
@@ -159,6 +164,8 @@ public class FSImage extends Storage {
   static private final FsPermission FILE_PERM = new FsPermission((short)0);
   static private final byte[] PATH_SEPARATOR = DFSUtil.string2Bytes(Path.SEPARATOR);
 
+  private static final Random R = new Random();
+  
   /**
    */
   FSImage() {
@@ -1448,6 +1455,51 @@ public class FSImage extends Storage {
     return newID;
   }
 
+  /**
+   * Generate new clusterID.
+   * 
+   * clusterID is a persistent attribute of the cluster.
+   * It is generated when the cluster is created and remains the same
+   * during the life cycle of the cluster.  When a new name node is formated, if 
+   * this is a new cluster, a new clusterID is geneated and stored.  Subsequent 
+   * name node must be given the same ClusterID during its format to be in the 
+   * same cluster.
+   * When a datanode register it receive the clusterID and stick with it.
+   * If at any point, name node or data node tries to join another cluster, it 
+   * will be rejected.
+   * 
+   * @return new clusterID
+   */ 
+  private String newClusterID() {
+    this.clusterID = "cid-" + UUID.randomUUID().toString();
+    return this.clusterID;
+  }
+  
+  /**
+   * Generate new blockpoolID.
+   * 
+   * @return new blockpoolID
+   */ 
+  private String newBlockPoolID() throws UnknownHostException{
+    String ip = "unknownIP";
+    try {
+      ip = DNS.getDefaultIP("default");
+    } catch (UnknownHostException e) {
+      LOG.warn("Could not find ip address of \"default\" inteface.");
+      throw e;
+    }
+    
+    int rand = 0;
+    try {
+      rand = SecureRandom.getInstance("SHA1PRNG").nextInt(Integer.MAX_VALUE);
+    } catch (NoSuchAlgorithmException e) {
+      LOG.warn("Could not use SecureRandom");
+      rand = R.nextInt(Integer.MAX_VALUE);
+    }
+    this.blockpoolID ="BP-" + rand + "-"+ ip + "-" + System.currentTimeMillis();
+    return this.blockpoolID;
+  }
+  
   /** Create new dfs name directory.  Caution: this destroys all files
    * in this filesystem. */
   void format(StorageDirectory sd) throws IOException {
@@ -1465,6 +1517,8 @@ public class FSImage extends Storage {
   public void format() throws IOException {
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     this.namespaceID = newNamespaceID();
+    this.clusterID = newClusterID();
+    this.blockpoolID = newBlockPoolID();
     this.cTime = 0L;
     this.checkpointTime = now();
     for (Iterator<StorageDirectory> it = 

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=999122&r1=999121&r2=999122&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon Sep 20 21:09:39 2010
@@ -523,6 +523,8 @@ public class FSNamesystem implements FSC
   
   NamespaceInfo getNamespaceInfo() {
     return new NamespaceInfo(dir.fsImage.getNamespaceID(),
+                             dir.fsImage.getClusterID(),
+                             dir.fsImage.getBlockPoolID(),
                              dir.fsImage.getCTime(),
                              getDistributedUpgradeVersion());
   }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java?rev=999122&r1=999121&r2=999122&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java Mon Sep 20 21:09:39 2010
@@ -48,8 +48,9 @@ public class NamespaceInfo extends Stora
     buildVersion = null;
   }
   
-  public NamespaceInfo(int nsID, long cT, int duVersion) {
-    super(FSConstants.LAYOUT_VERSION, nsID, cT);
+  public NamespaceInfo(int nsID, String clusterID, String bpID, 
+      long cT, int duVersion) {
+    super(FSConstants.LAYOUT_VERSION, nsID, clusterID, bpID, cT);
     buildVersion = Storage.getBuildVersion();
     this.distributedUpgradeVersion = duVersion;
   }

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=999122&r1=999121&r2=999122&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java Mon Sep 20 21:09:39 2010
@@ -170,6 +170,8 @@ public class TestDFSRollback extends Tes
       UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
                                          new StorageInfo(Integer.MIN_VALUE,
                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
+                                                         UpgradeUtilities.getCurrentClusterID(cluster),
+                                                         UpgradeUtilities.getCurrentBlockPoolID(cluster),
                                                          UpgradeUtilities.getCurrentFsscTime(cluster)));
       startDataNodeShouldFail(StartupOption.ROLLBACK);
       cluster.shutdown();
@@ -185,6 +187,8 @@ public class TestDFSRollback extends Tes
       UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
                                          new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
+                                                         UpgradeUtilities.getCurrentClusterID(cluster),
+                                                         UpgradeUtilities.getCurrentBlockPoolID(cluster),
                                                          Long.MAX_VALUE));
       startDataNodeShouldFail(StartupOption.ROLLBACK);
       cluster.shutdown();
@@ -224,6 +228,8 @@ public class TestDFSRollback extends Tes
       UpgradeUtilities.createVersionFile(NAME_NODE, baseDirs,
                                          new StorageInfo(1,
                                                          UpgradeUtilities.getCurrentNamespaceID(null),
+                                                         UpgradeUtilities.getCurrentClusterID(null),
+                                                         UpgradeUtilities.getCurrentBlockPoolID(null),
                                                          UpgradeUtilities.getCurrentFsscTime(null)));
       startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java?rev=999122&r1=999121&r2=999122&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java Mon Sep 20 21:09:39 2010
@@ -76,26 +76,28 @@ public class TestDFSStartupVersions exte
     long fsscTimeOld = Long.MIN_VALUE;
     long fsscTimeCur = UpgradeUtilities.getCurrentFsscTime(null);
     long fsscTimeNew = Long.MAX_VALUE;
+    String clusterID = "cid-test";
+    String bpID = "bpid-test";
     
     return new StorageInfo[] {
-      new StorageInfo(layoutVersionOld, namespaceIdCur, fsscTimeOld), // 0
-      new StorageInfo(layoutVersionOld, namespaceIdCur, fsscTimeCur), // 1
-      new StorageInfo(layoutVersionOld, namespaceIdCur, fsscTimeNew), // 2
-      new StorageInfo(layoutVersionOld, namespaceIdOld, fsscTimeOld), // 3
-      new StorageInfo(layoutVersionOld, namespaceIdOld, fsscTimeCur), // 4
-      new StorageInfo(layoutVersionOld, namespaceIdOld, fsscTimeNew), // 5
-      new StorageInfo(layoutVersionCur, namespaceIdCur, fsscTimeOld), // 6
-      new StorageInfo(layoutVersionCur, namespaceIdCur, fsscTimeCur), // 7
-      new StorageInfo(layoutVersionCur, namespaceIdCur, fsscTimeNew), // 8
-      new StorageInfo(layoutVersionCur, namespaceIdOld, fsscTimeOld), // 9
-      new StorageInfo(layoutVersionCur, namespaceIdOld, fsscTimeCur), // 10
-      new StorageInfo(layoutVersionCur, namespaceIdOld, fsscTimeNew), // 11
-      new StorageInfo(layoutVersionNew, namespaceIdCur, fsscTimeOld), // 12
-      new StorageInfo(layoutVersionNew, namespaceIdCur, fsscTimeCur), // 13
-      new StorageInfo(layoutVersionNew, namespaceIdCur, fsscTimeNew), // 14
-      new StorageInfo(layoutVersionNew, namespaceIdOld, fsscTimeOld), // 15
-      new StorageInfo(layoutVersionNew, namespaceIdOld, fsscTimeCur), // 16
-      new StorageInfo(layoutVersionNew, namespaceIdOld, fsscTimeNew), // 17
+      new StorageInfo(layoutVersionOld, namespaceIdCur, clusterID, bpID, fsscTimeOld), // 0
+      new StorageInfo(layoutVersionOld, namespaceIdCur, clusterID, bpID, fsscTimeCur), // 1
+      new StorageInfo(layoutVersionOld, namespaceIdCur, clusterID, bpID, fsscTimeNew), // 2
+      new StorageInfo(layoutVersionOld, namespaceIdOld, clusterID, bpID, fsscTimeOld), // 3
+      new StorageInfo(layoutVersionOld, namespaceIdOld, clusterID, bpID, fsscTimeCur), // 4
+      new StorageInfo(layoutVersionOld, namespaceIdOld, clusterID, bpID, fsscTimeNew), // 5
+      new StorageInfo(layoutVersionCur, namespaceIdCur, clusterID, bpID, fsscTimeOld), // 6
+      new StorageInfo(layoutVersionCur, namespaceIdCur, clusterID, bpID, fsscTimeCur), // 7
+      new StorageInfo(layoutVersionCur, namespaceIdCur, clusterID, bpID, fsscTimeNew), // 8
+      new StorageInfo(layoutVersionCur, namespaceIdOld, clusterID, bpID, fsscTimeOld), // 9
+      new StorageInfo(layoutVersionCur, namespaceIdOld, clusterID, bpID, fsscTimeCur), // 10
+      new StorageInfo(layoutVersionCur, namespaceIdOld, clusterID, bpID, fsscTimeNew), // 11
+      new StorageInfo(layoutVersionNew, namespaceIdCur, clusterID, bpID, fsscTimeOld), // 12
+      new StorageInfo(layoutVersionNew, namespaceIdCur, clusterID, bpID, fsscTimeCur), // 13
+      new StorageInfo(layoutVersionNew, namespaceIdCur, clusterID, bpID, fsscTimeNew), // 14
+      new StorageInfo(layoutVersionNew, namespaceIdOld, clusterID, bpID, fsscTimeOld), // 15
+      new StorageInfo(layoutVersionNew, namespaceIdOld, clusterID, bpID, fsscTimeCur), // 16
+      new StorageInfo(layoutVersionNew, namespaceIdOld, clusterID, bpID, fsscTimeNew), // 17
     };
   }
   
@@ -177,6 +179,8 @@ public class TestDFSStartupVersions exte
     StorageInfo nameNodeVersion = new StorageInfo(
                                                   UpgradeUtilities.getCurrentLayoutVersion(),
                                                   UpgradeUtilities.getCurrentNamespaceID(cluster),
+                                                  UpgradeUtilities.getCurrentClusterID(cluster),
+                                                  UpgradeUtilities.getCurrentBlockPoolID(cluster),
                                                   UpgradeUtilities.getCurrentFsscTime(cluster));
     log("NameNode version info", NAME_NODE, null, nameNodeVersion);
     for (int i = 0; i < versions.length; i++) {

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java?rev=999122&r1=999121&r2=999122&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java Mon Sep 20 21:09:39 2010
@@ -175,6 +175,8 @@ public class TestDFSUpgrade extends Test
       UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
                                          new StorageInfo(Integer.MIN_VALUE,
                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
+                                                         UpgradeUtilities.getCurrentClusterID(cluster),
+                                                         UpgradeUtilities.getCurrentBlockPoolID(cluster),
                                                          UpgradeUtilities.getCurrentFsscTime(cluster)));
       startDataNodeShouldFail(StartupOption.REGULAR);
       cluster.shutdown();
@@ -188,6 +190,8 @@ public class TestDFSUpgrade extends Test
       UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
                                          new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
+                                                         UpgradeUtilities.getCurrentClusterID(cluster),
+                                                         UpgradeUtilities.getCurrentBlockPoolID(cluster),
                                                          Long.MAX_VALUE));
       startDataNodeShouldFail(StartupOption.REGULAR);
       cluster.shutdown();
@@ -223,6 +227,8 @@ public class TestDFSUpgrade extends Test
       UpgradeUtilities.createVersionFile(NAME_NODE, baseDirs,
                                          new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1,
                                                          UpgradeUtilities.getCurrentNamespaceID(null),
+                                                         UpgradeUtilities.getCurrentClusterID(null),
+                                                         UpgradeUtilities.getCurrentBlockPoolID(null),
                                                          UpgradeUtilities.getCurrentFsscTime(null)));
       startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
@@ -232,6 +238,8 @@ public class TestDFSUpgrade extends Test
       UpgradeUtilities.createVersionFile(NAME_NODE, baseDirs,
                                          new StorageInfo(Integer.MIN_VALUE,
                                                          UpgradeUtilities.getCurrentNamespaceID(null),
+                                                         UpgradeUtilities.getCurrentClusterID(null),
+                                                         UpgradeUtilities.getCurrentBlockPoolID(null),
                                                          UpgradeUtilities.getCurrentFsscTime(null)));
       startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=999122&r1=999121&r2=999122&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java Mon Sep 20 21:09:39 2010
@@ -68,6 +68,10 @@ public class UpgradeUtilities {
   private static long namenodeStorageChecksum;
   // The namespaceId of the namenodeStorage directory
   private static int namenodeStorageNamespaceID;
+  // The clusterId of the namenodeStorage directory
+  private static String namenodeStorageClusterID;
+  // The blockpoolId of the namenodeStorage directory
+  private static String namenodeStorageBlockPoolID;
   // The fsscTime of the namenodeStorage directory
   private static long namenodeStorageFsscTime;
   // The singleton master storage directory for Datanode
@@ -103,6 +107,8 @@ public class UpgradeUtilities {
       NameNode namenode = cluster.getNameNode();
       namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
       namenodeStorageFsscTime = namenode.versionRequest().getCTime();
+      namenodeStorageClusterID = namenode.versionRequest().getClusterID();
+      namenodeStorageBlockPoolID = namenode.versionRequest().getBlockPoolID();
       
       FileSystem fs = FileSystem.get(config);
       Path baseDir = new Path("/TestUpgrade");
@@ -378,6 +384,28 @@ public class UpgradeUtilities {
   }
   
   /**
+   * Return the cluster ID inherent in the currently running
+   * Namenode. 
+   */
+  public static String getCurrentClusterID(MiniDFSCluster cluster) throws IOException {
+    if (cluster != null) {
+      return cluster.getNameNode().versionRequest().getClusterID();
+    }
+    return namenodeStorageClusterID;
+  }
+  
+  /**
+   * Return the blockpool ID inherent in the currently running
+   * Namenode. 
+   */
+  public static String getCurrentBlockPoolID(MiniDFSCluster cluster) throws IOException {
+    if (cluster != null) {
+      return cluster.getNameNode().versionRequest().getBlockPoolID();
+    }
+    return namenodeStorageBlockPoolID;
+  }
+  
+  /**
    * Return the File System State Creation Timestamp (FSSCTime) inherent
    * in the currently running Namenode.  If no Namenode is running,
    * return the FSSCTime of the master Namenode storage directory.

Added: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestStorageInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestStorageInfo.java?rev=999122&view=auto
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestStorageInfo.java (added)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestStorageInfo.java Mon Sep 20 21:09:39 2010
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.common;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutput;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import junit.framework.Assert;
+import junit.framework.TestCase;
+
+/**
+ * This is a unit test, which tests {@link Util#stringAsURI(String)}
+ * for IDs being used in HDFS, e.g. ClusterID and BlockPoolID.
+ */
+public class TestStorageInfo extends TestCase {
+
+  /**
+   * Test write() / readFieds() of StroageInfo.  Write StorageInfo into a buffer
+   * then read it back and the result should be the same with the original one.
+   * @throws IOException 
+   */
+  public void testStorageInfo() throws IOException {
+    
+    int nsID = 123;
+    String cid = "cid-test";
+    String bpid = "bpid-test";
+    int layoutV = 234;
+    long cT = 0L;
+    
+    StorageInfo sinfo = new StorageInfo(layoutV, nsID, cid,  bpid, cT);
+    
+    Assert.assertNotNull(sinfo);
+
+    ByteArrayOutputStream bos = new ByteArrayOutputStream();
+    DataOutput output = new DataOutputStream(bos);
+
+    try {
+        // we need to first create an DataOutputStream for sinfo to write into
+        sinfo.write(output);
+        //remember to close the DataOutputStream 
+        //to make sure the data has been written
+        bos.close();
+        
+        // convert ByteArrayInputStream to ByteArrayOutputStream
+        ByteArrayInputStream bis = new ByteArrayInputStream(bos.toByteArray());
+        DataInputStream dataInputStream = new DataInputStream(bis);
+
+        StorageInfo secondsinfo = new StorageInfo();
+        secondsinfo.readFields(dataInputStream);
+        
+        // compare
+        Assert.assertEquals(sinfo.getClusterID(), secondsinfo.getClusterID());
+        Assert.assertEquals(sinfo.getNamespaceID(), secondsinfo.getNamespaceID());
+        Assert.assertEquals(sinfo.getBlockPoolID(), secondsinfo.getBlockPoolID());
+        Assert.assertEquals(sinfo.getLayoutVersion(), secondsinfo.getLayoutVersion());
+        Assert.assertEquals(sinfo.getCTime(), secondsinfo.getCTime());
+    }catch (IOException e) {
+      e.getMessage();
+    }
+  }
+}
+

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java?rev=999122&r1=999121&r2=999122&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java Mon Sep 20 21:09:39 2010
@@ -100,7 +100,8 @@ public class TestBlockRecovery {
     dataDir.mkdirs();
     dirs.add(dataDir);
     DatanodeProtocol namenode = mock(DatanodeProtocol.class);
-    when(namenode.versionRequest()).thenReturn(new NamespaceInfo(1, 1L, 1));
+    when(namenode.versionRequest()).thenReturn(new NamespaceInfo
+        (1, "cid-test", "bpid-test", 1L, 1));
     when(namenode.sendHeartbeat(any(DatanodeRegistration.class), anyLong(), 
         anyLong(), anyLong(), anyInt(), anyInt())).thenReturn(
             new DatanodeCommand[0]);