You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/04/12 00:51:15 UTC

svn commit: r1325052 [2/3] - in /hadoop/common/branches/HDFS-3092/hadoop-hdfs-project: ./ hadoop-hdfs/ hadoop-hdfs/src/contrib/ hadoop-hdfs/src/contrib/fuse-dfs/ hadoop-hdfs/src/contrib/fuse-dfs/src/ hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main/java...

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java Wed Apr 11 22:51:10 2012
@@ -33,10 +33,14 @@ import org.apache.hadoop.HadoopIllegalAr
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.ha.HAServiceStatus;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
 import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
@@ -47,8 +51,10 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.tools.NNHAServiceTarget;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Tool;
@@ -65,7 +71,7 @@ import com.google.common.collect.Sets;
  */
 @InterfaceAudience.Private
 public class BootstrapStandby implements Tool, Configurable {
-  private static final Log LOG = LogFactory.getLog(BootstrapStandby.class); 
+  private static final Log LOG = LogFactory.getLog(BootstrapStandby.class);
   private String nsId;
   private String nnId;
   private String otherNNId;
@@ -79,7 +85,13 @@ public class BootstrapStandby implements
   
   private boolean force = false;
   private boolean interactive = true;
-  
+
+  // Exit/return codes.
+  static final int ERR_CODE_FAILED_CONNECT = 2;
+  static final int ERR_CODE_INVALID_VERSION = 3;
+  static final int ERR_CODE_OTHER_NN_NOT_ACTIVE = 4;
+  static final int ERR_CODE_ALREADY_FORMATTED = 5;
+  static final int ERR_CODE_LOGS_UNAVAILABLE = 6; 
 
   public int run(String[] args) throws Exception {
     SecurityUtil.initKrb5CipherSuites();
@@ -121,24 +133,43 @@ public class BootstrapStandby implements
     System.err.println("Usage: " + this.getClass().getSimpleName() +
         "[-force] [-nonInteractive]");
   }
+  
+  private NamenodeProtocol createNNProtocolProxy()
+      throws IOException {
+    return NameNodeProxies.createNonHAProxy(getConf(),
+        otherIpcAddr, NamenodeProtocol.class,
+        UserGroupInformation.getLoginUser(), true)
+        .getProxy();
+  }
+  
+  private HAServiceProtocol createHAProtocolProxy()
+      throws IOException {
+    return new NNHAServiceTarget(new HdfsConfiguration(conf),
+        nsId, otherNNId).getProxy(conf, 15000);
+  }
 
   private int doRun() throws IOException {
-    ProxyAndInfo<NamenodeProtocol> proxyAndInfo = NameNodeProxies.createNonHAProxy(getConf(),
-      otherIpcAddr, NamenodeProtocol.class,
-      UserGroupInformation.getLoginUser(), true);
-    NamenodeProtocol proxy = proxyAndInfo.getProxy();
+
+    NamenodeProtocol proxy = createNNProtocolProxy();
     NamespaceInfo nsInfo;
     try {
       nsInfo = proxy.versionRequest();
-      checkLayoutVersion(nsInfo);
     } catch (IOException ioe) {
       LOG.fatal("Unable to fetch namespace information from active NN at " +
           otherIpcAddr + ": " + ioe.getMessage());
       if (LOG.isDebugEnabled()) {
         LOG.debug("Full exception trace", ioe);
       }
-      return 1;
+      return ERR_CODE_FAILED_CONNECT;
     }
+
+    if (!checkLayoutVersion(nsInfo)) {
+      LOG.fatal("Layout version on remote node (" +
+          nsInfo.getLayoutVersion() + ") does not match " +
+          "this node's layout version (" + HdfsConstants.LAYOUT_VERSION + ")");
+      return ERR_CODE_INVALID_VERSION;
+    }
+
     
     System.out.println(
         "=====================================================\n" +
@@ -153,12 +184,35 @@ public class BootstrapStandby implements
         "           Layout version: " + nsInfo.getLayoutVersion() + "\n" +
         "=====================================================");
 
+    // Ensure the other NN is active - we can't force it to roll edit logs
+    // below if it's not active.
+    if (!isOtherNNActive()) {
+      String err = "NameNode " + nsId + "." + nnId + " at " + otherIpcAddr +
+          " is not currently in ACTIVE state.";
+      if (!interactive) {
+        LOG.fatal(err + " Please transition it to " +
+            "active before attempting to bootstrap a standby node.");
+        return ERR_CODE_OTHER_NN_NOT_ACTIVE;
+      }
+      
+      System.err.println(err);
+      if (ToolRunner.confirmPrompt(
+            "Do you want to automatically transition it to active now?")) {
+        transitionOtherNNActive();
+      } else {
+        LOG.fatal("User aborted. Exiting without bootstrapping standby.");
+        return ERR_CODE_OTHER_NN_NOT_ACTIVE;
+      }
+    }
+    
+
+    
     // Check with the user before blowing away data.
     if (!NameNode.confirmFormat(
             Sets.union(Sets.newHashSet(dirsToFormat),
                 Sets.newHashSet(editUrisToFormat)),
             force, interactive)) {
-      return 1;
+      return ERR_CODE_ALREADY_FORMATTED;
     }
 
     // Force the active to roll its log
@@ -180,7 +234,7 @@ public class BootstrapStandby implements
     // Ensure that we have enough edits already in the shared directory to
     // start up from the last checkpoint on the active.
     if (!checkLogsAvailableForRead(image, imageTxId, rollTxId)) {
-      return 1;
+      return ERR_CODE_LOGS_UNAVAILABLE;
     }
     
     image.getStorage().writeTransactionIdFileToStorage(rollTxId);
@@ -193,6 +247,14 @@ public class BootstrapStandby implements
     return 0;
   }
 
+  
+  private void transitionOtherNNActive()
+      throws AccessControlException, ServiceFailedException, IOException {
+    LOG.info("Transitioning the running namenode to active...");
+    createHAProtocolProxy().transitionToActive();    
+    LOG.info("Successful");
+  }
+
   private boolean checkLogsAvailableForRead(FSImage image, long imageTxId,
       long rollTxId) {
     
@@ -225,12 +287,14 @@ public class BootstrapStandby implements
     }
   }
 
-  private void checkLayoutVersion(NamespaceInfo nsInfo) throws IOException {
-    if (nsInfo.getLayoutVersion() != HdfsConstants.LAYOUT_VERSION) {
-      throw new IOException("Layout version on remote node (" +
-          nsInfo.getLayoutVersion() + ") does not match " +
-          "this node's layout version (" + HdfsConstants.LAYOUT_VERSION + ")");
-    }
+  private boolean checkLayoutVersion(NamespaceInfo nsInfo) throws IOException {
+    return (nsInfo.getLayoutVersion() == HdfsConstants.LAYOUT_VERSION);
+  }
+  
+  private boolean isOtherNNActive()
+      throws AccessControlException, IOException {
+    HAServiceStatus status = createHAProtocolProxy().getServiceStatus();
+    return status.getState() == HAServiceState.ACTIVE;
   }
 
   private void parseConfAndFindOtherNN() throws IOException {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java Wed Apr 11 22:51:10 2012
@@ -25,14 +25,6 @@ package org.apache.hadoop.hdfs.server.pr
  * each datanode.
  */
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-
 /**
  * Balancer bandwidth command instructs each datanode to change its value for
  * the max amount of network bandwidth it may use during the block balancing
@@ -71,35 +63,4 @@ public class BalancerBandwidthCommand ex
   public long getBalancerBandwidthValue() {
     return this.bandwidth;
   }
-
-  // ///////////////////////////////////////////////
-  // Writable
-  // ///////////////////////////////////////////////
-  static { // register a ctor
-    WritableFactories.setFactory(BalancerBandwidthCommand.class, new WritableFactory() {
-      public Writable newInstance() {
-        return new BalancerBandwidthCommand();
-      }
-    });
-  }
-
-  /**
-   * Writes the bandwidth payload to the Balancer Bandwidth Command packet.
-   * @param out DataOutput stream used for writing commands to the datanode.
-   * @throws IOException
-   */
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    out.writeLong(this.bandwidth);
-  }
-
-  /**
-   * Reads the bandwidth payload from the Balancer Bandwidth Command packet.
-   * @param in DataInput stream used for reading commands to the datanode.
-   * @throws IOException
-   */
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    this.bandwidth = in.readLong();
-  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java Wed Apr 11 22:51:10 2012
@@ -17,9 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -27,11 +24,6 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-
 
 /****************************************************
  * A BlockCommand is an instruction to a datanode 
@@ -58,8 +50,6 @@ public class BlockCommand extends Datano
   Block blocks[];
   DatanodeInfo targets[][];
 
-  public BlockCommand() {}
-
   /**
    * Create BlockCommand for transferring blocks to another datanode
    * @param blocktargetlist    blocks to be transferred 
@@ -110,50 +100,4 @@ public class BlockCommand extends Datano
   public DatanodeInfo[][] getTargets() {
     return targets;
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (BlockCommand.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new BlockCommand(); }
-       });
-  }
-
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    Text.writeString(out, poolId);
-    out.writeInt(blocks.length);
-    for (int i = 0; i < blocks.length; i++) {
-      blocks[i].write(out);
-    }
-    out.writeInt(targets.length);
-    for (int i = 0; i < targets.length; i++) {
-      out.writeInt(targets[i].length);
-      for (int j = 0; j < targets[i].length; j++) {
-        targets[i][j].write(out);
-      }
-    }
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    this.poolId = Text.readString(in);
-    this.blocks = new Block[in.readInt()];
-    for (int i = 0; i < blocks.length; i++) {
-      blocks[i] = new Block();
-      blocks[i].readFields(in);
-    }
-
-    this.targets = new DatanodeInfo[in.readInt()][];
-    for (int i = 0; i < targets.length; i++) {
-      this.targets[i] = new DatanodeInfo[in.readInt()];
-      for (int j = 0; j < targets[i].length; j++) {
-        targets[i][j] = new DatanodeInfo();
-        targets[i][j].readFields(in);
-      }
-    }
-  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java Wed Apr 11 22:51:10 2012
@@ -17,9 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
 import java.util.Collection;
 import java.util.ArrayList;
 
@@ -28,9 +25,6 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 import com.google.common.base.Joiner;
 
@@ -62,14 +56,6 @@ public class BlockRecoveryCommand extend
     private long newGenerationStamp;
 
     /**
-     * Create empty RecoveringBlock.
-     */
-    public RecoveringBlock() {
-      super();
-      newGenerationStamp = -1L;
-    }
-
-    /**
      * Create RecoveringBlock.
      */
     public RecoveringBlock(ExtendedBlock b, DatanodeInfo[] locs, long newGS) {
@@ -84,27 +70,6 @@ public class BlockRecoveryCommand extend
     public long getNewGenerationStamp() {
       return newGenerationStamp;
     }
-
-    ///////////////////////////////////////////
-    // Writable
-    ///////////////////////////////////////////
-    static {                                      // register a ctor
-      WritableFactories.setFactory
-        (RecoveringBlock.class,
-         new WritableFactory() {
-           public Writable newInstance() { return new RecoveringBlock(); }
-         });
-    }
-
-    public void write(DataOutput out) throws IOException {
-      super.write(out);
-      out.writeLong(newGenerationStamp);
-    }
-
-    public void readFields(DataInput in) throws IOException {
-      super.readFields(in);
-      newGenerationStamp = in.readLong();
-    }
   }
 
   /**
@@ -149,34 +114,4 @@ public class BlockRecoveryCommand extend
     sb.append("\n)");
     return sb.toString();
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (BlockRecoveryCommand.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new BlockRecoveryCommand(); }
-       });
-  }
-
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    out.writeInt(recoveringBlocks.size());
-    for(RecoveringBlock block : recoveringBlocks) {
-      block.write(out);
-    }
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    int numBlocks = in.readInt();
-    recoveringBlocks = new ArrayList<RecoveringBlock>(numBlocks);
-    for(int i = 0; i < numBlocks; i++) {
-      RecoveringBlock b = new RecoveringBlock();
-      b.readFields(in);
-      add(b);
-    }
-  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java Wed Apr 11 22:51:10 2012
@@ -17,16 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
 
 /** A class to implement an array of BlockLocations
  *  It provide efficient customized serialization/deserialization methods
@@ -34,23 +27,17 @@ import org.apache.hadoop.io.WritableUtil
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public class BlocksWithLocations implements Writable {
+public class BlocksWithLocations {
 
   /**
    * A class to keep track of a block and its locations
    */
   @InterfaceAudience.Private
   @InterfaceStability.Evolving
-  public static class BlockWithLocations  implements Writable {
+  public static class BlockWithLocations {
     Block block;
     String datanodeIDs[];
     
-    /** default constructor */
-    public BlockWithLocations() {
-      block = new Block();
-      datanodeIDs = null;
-    }
-    
     /** constructor */
     public BlockWithLocations(Block b, String[] datanodes) {
       block = b;
@@ -66,33 +53,10 @@ public class BlocksWithLocations impleme
     public String[] getDatanodes() {
       return datanodeIDs;
     }
-    
-    /** deserialization method */
-    public void readFields(DataInput in) throws IOException {
-      block.readFields(in);
-      int len = WritableUtils.readVInt(in); // variable length integer
-      datanodeIDs = new String[len];
-      for(int i=0; i<len; i++) {
-        datanodeIDs[i] = Text.readString(in);
-      }
-    }
-    
-    /** serialization method */
-    public void write(DataOutput out) throws IOException {
-      block.write(out);
-      WritableUtils.writeVInt(out, datanodeIDs.length); // variable length int
-      for(String id:datanodeIDs) {
-        Text.writeString(out, id);
-      }
-    }
   }
 
   private BlockWithLocations[] blocks;
 
-  /** default constructor */
-  BlocksWithLocations() {
-  }
-
   /** Constructor with one parameter */
   public BlocksWithLocations( BlockWithLocations[] blocks ) {
     this.blocks = blocks;
@@ -102,22 +66,4 @@ public class BlocksWithLocations impleme
   public BlockWithLocations[] getBlocks() {
     return blocks;
   }
-
-  /** serialization method */
-  public void write( DataOutput out ) throws IOException {
-    WritableUtils.writeVInt(out, blocks.length);
-    for(int i=0; i<blocks.length; i++) {
-      blocks[i].write(out);
-    }
-  }
-
-  /** deserialization method */
-  public void readFields(DataInput in) throws IOException {
-    int len = WritableUtils.readVInt(in);
-    blocks = new BlockWithLocations[len];
-    for(int i=0; i<len; i++) {
-      blocks[i] = new BlockWithLocations();
-      blocks[i].readFields(in);
-    }
-  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CheckpointCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CheckpointCommand.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CheckpointCommand.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CheckpointCommand.java Wed Apr 11 22:51:10 2012
@@ -17,13 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
@@ -77,27 +70,4 @@ public class CheckpointCommand extends N
   public boolean needToReturnImage() {
     return needToReturnImage;
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  static {
-    WritableFactories.setFactory(CheckpointCommand.class,
-        new WritableFactory() {
-          public Writable newInstance() {return new CheckpointCommand();}
-        });
-  }
-
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    cSig.write(out);
-    out.writeBoolean(needToReturnImage);
-  }
-  
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    cSig = new CheckpointSignature();
-    cSig.readFields(in);
-    needToReturnImage = in.readBoolean();
-  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java Wed Apr 11 22:51:10 2012
@@ -27,10 +27,7 @@ import org.apache.hadoop.classification.
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public abstract class DatanodeCommand extends ServerCommand {
-  public DatanodeCommand() {
-    super();
-  }
-  
+
   DatanodeCommand(int action) {
     super(action);
   }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java Wed Apr 11 22:51:10 2012
@@ -18,20 +18,12 @@
 
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 /** 
  * DatanodeRegistration class contains all information the name-node needs
@@ -41,23 +33,11 @@ import org.apache.hadoop.io.WritableFact
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class DatanodeRegistration extends DatanodeID
-implements Writable, NodeRegistration {
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (DatanodeRegistration.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new DatanodeRegistration(); }
-       });
-  }
+    implements NodeRegistration {
 
   private StorageInfo storageInfo;
   private ExportedBlockKeys exportedKeys;
 
-  public DatanodeRegistration() {
-    this("", DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
-        new StorageInfo(), new ExportedBlockKeys());
-  }
-  
   public DatanodeRegistration(DatanodeID dn, StorageInfo info,
       ExportedBlockKeys keys) {
     super(dn);
@@ -118,30 +98,6 @@ implements Writable, NodeRegistration {
       + ")";
   }
 
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
-  @Override
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-
-    //TODO: move it to DatanodeID once HADOOP-2797 has been committed
-    out.writeShort(ipcPort);
-
-    storageInfo.write(out);
-    exportedKeys.write(out);
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-
-    //TODO: move it to DatanodeID once HADOOP-2797 has been committed
-    this.ipcPort = in.readShort() & 0x0000ffff;
-
-    storageInfo.readFields(in);
-    exportedKeys.readFields(in);
-  }
   @Override
   public boolean equals(Object to) {
     return super.equals(to);

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java Wed Apr 11 22:51:10 2012
@@ -17,16 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-import org.apache.hadoop.io.WritableUtils;
 
 /**
  * A BlockCommand is an instruction to a datanode to register with the namenode.
@@ -34,17 +26,6 @@ import org.apache.hadoop.io.WritableUtil
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class FinalizeCommand extends DatanodeCommand {
-  // /////////////////////////////////////////
-  // Writable
-  // /////////////////////////////////////////
-  static { // register a ctor
-    WritableFactories.setFactory(FinalizeCommand.class, new WritableFactory() {
-      public Writable newInstance() {
-        return new FinalizeCommand();
-      }
-    });
-  }
-  
   String blockPoolId;
   private FinalizeCommand() {
     super(DatanodeProtocol.DNA_FINALIZE);
@@ -58,11 +39,4 @@ public class FinalizeCommand extends Dat
   public String getBlockPoolId() {
     return blockPoolId;
   }
-  
-  public void readFields(DataInput in) throws IOException {
-    blockPoolId = WritableUtils.readString(in);
-  }
-  public void write(DataOutput out) throws IOException {
-    WritableUtils.writeString(out, blockPoolId);
-  }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java Wed Apr 11 22:51:10 2012
@@ -17,31 +17,21 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.ObjectWritable;
-import org.apache.hadoop.io.Writable;
 
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 /**
  * Response to {@link DatanodeProtocol#sendHeartbeat}
  */
-public class HeartbeatResponse implements Writable {
+public class HeartbeatResponse {
   /** Commands returned from the namenode to the datanode */
   private DatanodeCommand[] commands;
   
   /** Information about the current HA-related state of the NN */
   private NNHAStatusHeartbeat haStatus;
   
-  public HeartbeatResponse() {
-    // Empty constructor required for Writable
-  }
-  
   public HeartbeatResponse(DatanodeCommand[] cmds,
       NNHAStatusHeartbeat haStatus) {
     commands = cmds;
@@ -55,31 +45,4 @@ public class HeartbeatResponse implement
   public NNHAStatusHeartbeat getNameNodeHaState() {
     return haStatus;
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  @Override
-  public void write(DataOutput out) throws IOException {
-    int length = commands == null ? 0 : commands.length;
-    out.writeInt(length);
-    for (int i = 0; i < length; i++) {
-      ObjectWritable.writeObject(out, commands[i], commands[i].getClass(),
-                                 null, true);
-    }
-    haStatus.write(out);
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    int length = in.readInt();
-    commands = new DatanodeCommand[length];
-    ObjectWritable objectWritable = new ObjectWritable();
-    for (int i = 0; i < length; i++) {
-      commands[i] = (DatanodeCommand) ObjectWritable.readObject(in,
-          objectWritable, null);
-    }
-    haStatus = new NNHAStatusHeartbeat();
-    haStatus.readFields(in);
-  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java Wed Apr 11 22:51:10 2012
@@ -42,9 +42,6 @@ public interface InterDatanodeProtocol {
    * the interface to the DN AND the RPC protocol used to communicate with the 
    * DN.
    * 
-   * Post version 6L (release 23 of Hadoop), the protocol is implemented in
-   * {@literal ../protocolR23Compatible/InterDatanodeWireProtocol}
-   * 
    * This class is used by both the DN to insulate from the protocol 
    * serialization.
    * 

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java Wed Apr 11 22:51:10 2012
@@ -17,16 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -45,29 +38,4 @@ public class KeyUpdateCommand extends Da
   public ExportedBlockKeys getExportedKeys() {
     return this.keys;
   }
-
-  // ///////////////////////////////////////////////
-  // Writable
-  // ///////////////////////////////////////////////
-  static { // register a ctor
-    WritableFactories.setFactory(KeyUpdateCommand.class, new WritableFactory() {
-      public Writable newInstance() {
-        return new KeyUpdateCommand();
-      }
-    });
-  }
-
-  /**
-   */
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    keys.write(out);
-  }
-
-  /**
-   */
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    keys.readFields(in);
-  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java Wed Apr 11 22:51:10 2012
@@ -17,26 +17,17 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
 
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public class NNHAStatusHeartbeat implements Writable {
+public class NNHAStatusHeartbeat {
 
   private State state;
   private long txid = HdfsConstants.INVALID_TXID;
   
-  public NNHAStatusHeartbeat() {
-  }
-  
   public NNHAStatusHeartbeat(State state, long txid) {
     this.state = state;
     this.txid = txid;
@@ -50,21 +41,6 @@ public class NNHAStatusHeartbeat impleme
     return txid;
   }
   
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  @Override
-  public void write(DataOutput out) throws IOException {
-    WritableUtils.writeEnum(out, state);
-    out.writeLong(txid);
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    state = WritableUtils.readEnum(in, State.class);
-    txid = in.readLong();
-  }
-
   @InterfaceAudience.Private
   public enum State {
     ACTIVE,

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeCommand.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeCommand.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeCommand.java Wed Apr 11 22:51:10 2012
@@ -19,9 +19,6 @@ package org.apache.hadoop.hdfs.server.pr
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 /**
  * Base class for name-node command.
@@ -30,17 +27,6 @@ import org.apache.hadoop.io.WritableFact
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class NamenodeCommand extends ServerCommand {
-  static {
-    WritableFactories.setFactory(NamenodeCommand.class,
-        new WritableFactory() {
-          public Writable newInstance() {return new NamenodeCommand();}
-        });
-  }
-
-  public NamenodeCommand() {
-    super();
-  }
-
   public NamenodeCommand(int action) {
     super(action);
   }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java Wed Apr 11 22:51:10 2012
@@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
-import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.KerberosInfo;
 
 /*****************************************************************************
@@ -42,9 +41,6 @@ public interface NamenodeProtocol {
    * the client interface to the NN AND the RPC protocol used to 
    * communicate with the NN.
    * 
-   * Post version 70 (release 23 of Hadoop), the protocol is implemented in
-   * {@literal ../protocolR23Compatible/ClientNamenodeWireProtocol}
-   * 
    * This class is used by both the DFSClient and the 
    * NN server side to insulate from the protocol serialization.
    * 

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java Wed Apr 11 22:51:10 2012
@@ -18,14 +18,6 @@
 
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.common.Storage;
@@ -44,10 +36,6 @@ implements NodeRegistration {
   String httpAddress;         // HTTP address of the node
   NamenodeRole role;          // node role
 
-  public NamenodeRegistration() {
-    super();
-  }
-
   public NamenodeRegistration(String address,
                               String httpAddress,
                               StorageInfo storageInfo,
@@ -95,31 +83,4 @@ implements NodeRegistration {
   public boolean isRole(NamenodeRole that) {
     return role.equals(that);
   }
-
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
-  static {
-    WritableFactories.setFactory
-      (NamenodeRegistration.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new NamenodeRegistration(); }
-       });
-  }
-
-  @Override // Writable
-  public void write(DataOutput out) throws IOException {
-    Text.writeString(out, rpcAddress);
-    Text.writeString(out, httpAddress);
-    Text.writeString(out, role.name());
-    super.write(out);
-  }
-
-  @Override // Writable
-  public void readFields(DataInput in) throws IOException {
-    rpcAddress = Text.readString(in);
-    httpAddress = Text.readString(in);
-    role = NamenodeRole.valueOf(Text.readString(in));
-    super.readFields(in);
-  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java Wed Apr 11 22:51:10 2012
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -28,11 +26,6 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
-import org.apache.hadoop.hdfs.DeprecatedUTF8;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-import org.apache.hadoop.io.WritableUtils;
 
 /**
  * NamespaceInfo is returned by the name-node in reply 
@@ -76,31 +69,6 @@ public class NamespaceInfo extends Stora
     return blockPoolID;
   }
 
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (NamespaceInfo.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new NamespaceInfo(); }
-       });
-  }
-
-  public void write(DataOutput out) throws IOException {
-    DeprecatedUTF8.writeString(out, getBuildVersion());
-    super.write(out);
-    out.writeInt(getDistributedUpgradeVersion());
-    WritableUtils.writeString(out, blockPoolID);
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    buildVersion = DeprecatedUTF8.readString(in);
-    super.readFields(in);
-    distributedUpgradeVersion = in.readInt();
-    blockPoolID = WritableUtils.readString(in);
-  }
-  
   public String toString(){
     return super.toString() + ";bpid=" + blockPoolID;
   }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java Wed Apr 11 22:51:10 2012
@@ -18,19 +18,12 @@
 
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
 
 /**
  * A data structure to store the blocks in an incremental block report. 
  */
-public class ReceivedDeletedBlockInfo implements Writable {
+public class ReceivedDeletedBlockInfo {
   Block block;
   BlockStatus status;
   String delHints;
@@ -113,25 +106,6 @@ public class ReceivedDeletedBlockInfo im
     return status == BlockStatus.DELETED_BLOCK;
   }
 
-  @Override
-  public void write(DataOutput out) throws IOException {
-    this.block.write(out);
-    WritableUtils.writeVInt(out, this.status.code);
-    if (this.status == BlockStatus.DELETED_BLOCK) {
-      Text.writeString(out, this.delHints);
-    }
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    this.block = new Block();
-    this.block.readFields(in);
-    this.status = BlockStatus.fromCode(WritableUtils.readVInt(in));
-    if (this.status == BlockStatus.DELETED_BLOCK) {
-      this.delHints = Text.readString(in);
-    }
-  }
-
   public String toString() {
     return block.toString() + ", status: " + status +
       ", delHint: " + delHints;

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java Wed Apr 11 22:51:10 2012
@@ -17,14 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 /**
  * A BlockCommand is an instruction to a datanode to register with the namenode.
@@ -32,26 +26,10 @@ import org.apache.hadoop.io.WritableFact
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class RegisterCommand extends DatanodeCommand {
-  // /////////////////////////////////////////
-  // Writable
-  // /////////////////////////////////////////
-  static { // register a ctor
-    WritableFactories.setFactory(RegisterCommand.class, new WritableFactory() {
-      public Writable newInstance() {
-        return new RegisterCommand();
-      }
-    });
-  }
   
   public static final DatanodeCommand REGISTER = new RegisterCommand();
 
   public RegisterCommand() {
     super(DatanodeProtocol.DNA_REGISTER);
   }
-
-  @Override
-  public void readFields(DataInput in) { }
- 
-  @Override
-  public void write(DataOutput out) { }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java Wed Apr 11 22:51:10 2012
@@ -18,17 +18,10 @@
 
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 /**
  * Replica recovery information.
@@ -38,9 +31,6 @@ import org.apache.hadoop.io.WritableFact
 public class ReplicaRecoveryInfo extends Block {
   private ReplicaState originalState;
 
-  public ReplicaRecoveryInfo() {
-  }
-
   public ReplicaRecoveryInfo(long blockId, long diskLen, long gs, ReplicaState rState) {
     set(blockId, diskLen, gs);
     originalState = rState;
@@ -59,27 +49,4 @@ public class ReplicaRecoveryInfo extends
   public int hashCode() {
     return super.hashCode();
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (ReplicaRecoveryInfo.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new ReplicaRecoveryInfo(); }
-       });
-  }
-
- @Override
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    originalState = ReplicaState.read(in); 
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    originalState.write(out);
-  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java Wed Apr 11 22:51:10 2012
@@ -17,11 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.*;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
 
 /**
  * Base class for a server command.
@@ -33,21 +30,10 @@ import org.apache.hadoop.io.Writable;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public abstract class ServerCommand implements Writable {
+public abstract class ServerCommand {
   private int action;
 
   /**
-   * Unknown server command constructor.
-   * Creates a command with action 0.
-   * 
-   * @see NamenodeProtocol#ACT_UNKNOWN
-   * @see DatanodeProtocol#DNA_UNKNOWN
-   */
-  public ServerCommand() {
-    this(0);
-  }
-
-  /**
    * Create a command for the specified action.
    * Actions are protocol specific.
    * 
@@ -66,15 +52,4 @@ public abstract class ServerCommand impl
   public int getAction() {
     return this.action;
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  public void write(DataOutput out) throws IOException {
-    out.writeInt(this.action);
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    this.action = in.readInt();
-  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java Wed Apr 11 22:51:10 2012
@@ -17,15 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 /**
  * This as a generic distributed upgrade command.
@@ -68,31 +61,4 @@ public class UpgradeCommand extends Data
   public short getCurrentStatus() {
     return this.upgradeStatus;
   }
-
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (UpgradeCommand.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new UpgradeCommand(); }
-       });
-  }
-
-  /**
-   */
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    out.writeInt(this.version);
-    out.writeShort(this.upgradeStatus);
-  }
-
-  /**
-   */
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    this.version = in.readInt();
-    this.upgradeStatus = in.readShort();
-  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java Wed Apr 11 22:51:10 2012
@@ -248,7 +248,6 @@ public class GetConf extends Configured 
     @Override
     int doWorkInternal(GetConf tool, String[] args) throws Exception {
       this.key = args[0];
-      System.err.println("key: " + key);
       return super.doWorkInternal(tool, args);
     }
   }

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1311518-1325051

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1311518-1325051

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1311518-1325051

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1311518-1325051

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1311518-1325051

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Wed Apr 11 22:51:10 2012
@@ -57,6 +57,7 @@ import org.apache.hadoop.fs.FileSystem.S
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -705,4 +706,14 @@ public class DFSTestUtil {
     conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, Joiner.on(",")
         .join(nameservices));
   }
+  
+  public static DatanodeDescriptor getLocalDatanodeDescriptor() {
+    return new DatanodeDescriptor(
+        new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
+  }
+
+  public static DatanodeInfo getLocalDatanodeInfo() {
+    return new DatanodeInfo(
+        new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
+  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Wed Apr 11 22:51:10 2012
@@ -62,7 +62,7 @@ public class TestDFSUtil {
    */
   @Test
   public void testLocatedBlocks2Locations() {
-    DatanodeInfo d = new DatanodeInfo();
+    DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
     DatanodeInfo[] ds = new DatanodeInfo[1];
     ds[0] = d;
 

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java Wed Apr 11 22:51:10 2012
@@ -121,7 +121,9 @@ public class TestGetBlocks extends TestC
       getBlocksWithException(namenode, dataNodes[0], -1);
 
       // get blocks of size BlockSize from a non-existent datanode
-      getBlocksWithException(namenode, new DatanodeInfo(), 2);
+      DatanodeInfo info = DFSTestUtil.getLocalDatanodeInfo();
+      info.setIpAddr("1.2.3.4");
+      getBlocksWithException(namenode, info, 2);
     } finally {
       cluster.shutdown();
     }
@@ -132,7 +134,7 @@ public class TestGetBlocks extends TestC
                                       long size) throws IOException {
     boolean getException = false;
     try {
-        namenode.getBlocks(new DatanodeInfo(), 2);
+        namenode.getBlocks(DFSTestUtil.getLocalDatanodeInfo(), 2);
     } catch(RemoteException e) {
       getException = true;
       assertTrue(e.getClassName().contains("HadoopIllegalArgumentException"));

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java Wed Apr 11 22:51:10 2012
@@ -179,7 +179,7 @@ public class TestParallelReadUtil {
    */
   static class ReadWorker extends Thread {
 
-    static public final int N_ITERATIONS = 1024 * 4;
+    static public final int N_ITERATIONS = 1024;
 
     private static final double PROPORTION_NON_POSITIONAL_READ = 0.10;
 

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java Wed Apr 11 22:51:10 2012
@@ -25,6 +25,7 @@ import org.apache.commons.logging.impl.L
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -234,4 +235,56 @@ public class TestReplaceDatanodeOnFailur
       Assert.assertEquals(REPLICATION, dfsout.getNumCurrentReplicas());
     }        
   }
+
+  @Test
+  public void testAppend() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    final short REPLICATION = (short)3;
+    
+    Assert.assertEquals(ReplaceDatanodeOnFailure.DEFAULT, ReplaceDatanodeOnFailure.get(conf));
+
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
+        ).numDataNodes(1).build();
+
+    try {
+      final DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem();
+      final Path f = new Path(DIR, "testAppend");
+      
+      {
+        LOG.info("create an empty file " + f);
+        fs.create(f, REPLICATION).close();
+        final FileStatus status = fs.getFileStatus(f);
+        Assert.assertEquals(REPLICATION, status.getReplication());
+        Assert.assertEquals(0L, status.getLen());
+      }
+      
+      
+      final byte[] bytes = new byte[1000];
+      {
+        LOG.info("append " + bytes.length + " bytes to " + f);
+        final FSDataOutputStream out = fs.append(f);
+        out.write(bytes);
+        out.close();
+
+        final FileStatus status = fs.getFileStatus(f);
+        Assert.assertEquals(REPLICATION, status.getReplication());
+        Assert.assertEquals(bytes.length, status.getLen());
+      }
+
+      {
+        LOG.info("append another " + bytes.length + " bytes to " + f);
+        try {
+          final FSDataOutputStream out = fs.append(f);
+          out.write(bytes);
+          out.close();
+
+          Assert.fail();
+        } catch(IOException ioe) {
+          LOG.info("This exception is expected", ioe);
+        }
+      }
+    } finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Wed Apr 11 22:51:10 2012
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -441,9 +442,9 @@ public class TestPBHelper {
     Block[] blocks = new Block[] { new Block(21), new Block(22) };
     DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1],
         new DatanodeInfo[2] };
-    dnInfos[0][0] = new DatanodeInfo();
-    dnInfos[1][0] = new DatanodeInfo();
-    dnInfos[1][1] = new DatanodeInfo();
+    dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
+    dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
+    dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
     BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1",
         blocks, dnInfos);
     BlockCommandProto bcProto = PBHelper.convert(bc);

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java Wed Apr 11 22:51:10 2012
@@ -26,6 +26,7 @@ import java.util.Random;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@@ -47,7 +48,7 @@ public class TestBlockInfo {
 
     final int MAX_BLOCKS = 10;
 
-    DatanodeDescriptor dd = new DatanodeDescriptor();
+    DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
     ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
     ArrayList<BlockInfo> blockInfoList = new ArrayList<BlockInfo>();
     int headIndex;

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java Wed Apr 11 22:51:10 2012
@@ -28,6 +28,7 @@ import junit.framework.TestCase;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 
 
@@ -80,8 +81,8 @@ public class TestCorruptReplicaInfo exte
         block_ids.add((long)i);
       }
       
-      DatanodeDescriptor dn1 = new DatanodeDescriptor();
-      DatanodeDescriptor dn2 = new DatanodeDescriptor();
+      DatanodeDescriptor dn1 = DFSTestUtil.getLocalDatanodeDescriptor();
+      DatanodeDescriptor dn2 = DFSTestUtil.getLocalDatanodeDescriptor();
       
       crm.addToCorruptReplicasMap(getBlock(0), dn1, "TEST");
       assertEquals("Number of corrupt blocks not returning correctly",

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java Wed Apr 11 22:51:10 2012
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.bl
 
 import java.util.ArrayList;
 
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 
@@ -36,7 +37,7 @@ public class TestDatanodeDescriptor exte
     final int REMAINING_BLOCKS = 2;
     final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
     
-    DatanodeDescriptor dd = new DatanodeDescriptor();
+    DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
     ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
     for (int i=0; i<MAX_BLOCKS; i++) {
       blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
@@ -49,7 +50,7 @@ public class TestDatanodeDescriptor exte
   }
   
   public void testBlocksCounter() throws Exception {
-    DatanodeDescriptor dd = new DatanodeDescriptor();
+    DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
     assertEquals(0, dd.numBlocks());
     BlockInfo blk = new BlockInfo(new Block(1L), 1);
     BlockInfo blk1 = new BlockInfo(new Block(2L), 2);

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java?rev=1325052&r1=1325051&r2=1325052&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java Wed Apr 11 22:51:10 2012
@@ -18,12 +18,19 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.IOException;
+import java.io.InputStream;
+import java.io.PrintStream;
 import java.net.URI;
-import java.util.ArrayList;
+import java.security.Permission;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
@@ -40,11 +47,11 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-
 public class TestClusterId {
   private static final Log LOG = LogFactory.getLog(TestClusterId.class);
   File hdfsDir;
-  
+  Configuration config;
+
   private String getClusterId(Configuration config) throws IOException {
     // see if cluster id not empty.
     Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(config);
@@ -59,33 +66,41 @@ public class TestClusterId {
     LOG.info("successfully formated : sd="+sd.getCurrentDir() + ";cid="+cid);
     return cid;
   }
-  
+
   @Before
   public void setUp() throws IOException {
+    System.setSecurityManager(new NoExitSecurityManager());
+
     String baseDir = System.getProperty("test.build.data", "build/test/data");
 
-    hdfsDir = new File(baseDir, "dfs");
-    if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
-      throw new IOException("Could not delete test directory '" + 
-          hdfsDir + "'");
+    hdfsDir = new File(baseDir, "dfs/name");
+    if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
+      throw new IOException("Could not delete test directory '" + hdfsDir + "'");
     }
     LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
+
+    // as some tests might change these values we reset them to defaults before
+    // every test
+    StartupOption.FORMAT.setForceFormat(false);
+    StartupOption.FORMAT.setInteractiveFormat(true);
+    
+    config = new Configuration();
+    config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath());
   }
-  
+
   @After
   public void tearDown() throws IOException {
-    if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
-      throw new IOException("Could not tearDown test directory '" +
-          hdfsDir + "'");
+    System.setSecurityManager(null);
+
+    if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
+      throw new IOException("Could not tearDown test directory '" + hdfsDir
+          + "'");
     }
   }
-  
+
   @Test
   public void testFormatClusterIdOption() throws IOException {
-    Configuration config = new Configuration();
     
-    config.set(DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name").getPath());
-
     // 1. should format without cluster id
     //StartupOption.FORMAT.setClusterId("");
     NameNode.format(config);
@@ -107,4 +122,356 @@ public class TestClusterId {
     String newCid = getClusterId(config);
     assertFalse("ClusterId should not be the same", newCid.equals(cid));
   }
-}
+
+  /**
+   * Test namenode format with -format option. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormat() throws IOException {
+    String[] argv = { "-format" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -format option when an empty name directory
+   * exists. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithEmptyDir() throws IOException {
+
+    if (!hdfsDir.mkdirs()) {
+      fail("Failed to create dir " + hdfsDir.getPath());
+    }
+
+    String[] argv = { "-format" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -format -force options when name directory
+   * exists. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithForce() throws IOException {
+
+    if (!hdfsDir.mkdirs()) {
+      fail("Failed to create dir " + hdfsDir.getPath());
+    }
+
+    String[] argv = { "-format", "-force" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -format -force -clusterid option when name
+   * directory exists. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithForceAndClusterId() throws IOException {
+
+    if (!hdfsDir.mkdirs()) {
+      fail("Failed to create dir " + hdfsDir.getPath());
+    }
+
+    String myId = "testFormatWithForceAndClusterId";
+    String[] argv = { "-format", "-force", "-clusterid", myId };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cId = getClusterId(config);
+    assertEquals("ClusterIds do not match", myId, cId);
+  }
+
+  /**
+   * Test namenode format with -clusterid -force option. Format command should
+   * fail as no cluster id was provided.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithInvalidClusterIdOption() throws IOException {
+
+    String[] argv = { "-format", "-clusterid", "-force" };
+    PrintStream origErr = System.err;
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream stdErr = new PrintStream(baos);
+    System.setErr(stdErr);
+
+    NameNode.createNameNode(argv, config);
+
+    // Check if usage is printed
+    assertTrue(baos.toString("UTF-8").contains("Usage: java NameNode"));
+    System.setErr(origErr);
+
+    // check if the version file does not exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertFalse("Check version should not exist", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format -clusterid options. Format should fail
+   * was no clusterid was sent.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithNoClusterIdOption() throws IOException {
+
+    String[] argv = { "-format", "-clusterid" };
+    PrintStream origErr = System.err;
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream stdErr = new PrintStream(baos);
+    System.setErr(stdErr);
+
+    NameNode.createNameNode(argv, config);
+
+    // Check if usage is printed
+    assertTrue(baos.toString("UTF-8").contains("Usage: java NameNode"));
+    System.setErr(origErr);
+
+    // check if the version file does not exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertFalse("Check version should not exist", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format -clusterid and empty clusterid. Format
+   * should fail as no valid if was provided.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithEmptyClusterIdOption() throws IOException {
+
+    String[] argv = { "-format", "-clusterid", "" };
+
+    PrintStream origErr = System.err;
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream stdErr = new PrintStream(baos);
+    System.setErr(stdErr);
+
+    NameNode.createNameNode(argv, config);
+
+    // Check if usage is printed
+    assertTrue(baos.toString("UTF-8").contains("Usage: java NameNode"));
+    System.setErr(origErr);
+
+    // check if the version file does not exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertFalse("Check version should not exist", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format -nonInteractive options when a non empty
+   * name directory exists. Format should not succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithNonInteractive() throws IOException {
+
+    // we check for a non empty dir, so create a child path
+    File data = new File(hdfsDir, "file");
+    if (!data.mkdirs()) {
+      fail("Failed to create dir " + data.getPath());
+    }
+
+    String[] argv = { "-format", "-nonInteractive" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have been aborted with exit code 1", 1,
+          e.status);
+    }
+
+    // check if the version file does not exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertFalse("Check version should not exist", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format -nonInteractive options when name
+   * directory does not exist. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithNonInteractiveNameDirDoesNotExit()
+      throws IOException {
+
+    String[] argv = { "-format", "-nonInteractive" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -force -nonInteractive -force option. Format
+   * should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithNonInteractiveAndForce() throws IOException {
+
+    if (!hdfsDir.mkdirs()) {
+      fail("Failed to create dir " + hdfsDir.getPath());
+    }
+
+    String[] argv = { "-format", "-nonInteractive", "-force" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -format option when a non empty name directory
+   * exists. Enter Y when prompted and the format should succeed.
+   * 
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  @Test
+  public void testFormatWithoutForceEnterYes() throws IOException,
+      InterruptedException {
+
+    // we check for a non empty dir, so create a child path
+    File data = new File(hdfsDir, "file");
+    if (!data.mkdirs()) {
+      fail("Failed to create dir " + data.getPath());
+    }
+
+    // capture the input stream
+    InputStream origIn = System.in;
+    ByteArrayInputStream bins = new ByteArrayInputStream("Y\n".getBytes());
+    System.setIn(bins);
+
+    String[] argv = { "-format" };
+
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    System.setIn(origIn);
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -format option when a non empty name directory
+   * exists. Enter N when prompted and format should be aborted.
+   * 
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  @Test
+  public void testFormatWithoutForceEnterNo() throws IOException,
+      InterruptedException {
+
+    // we check for a non empty dir, so create a child path
+    File data = new File(hdfsDir, "file");
+    if (!data.mkdirs()) {
+      fail("Failed to create dir " + data.getPath());
+    }
+
+    // capture the input stream
+    InputStream origIn = System.in;
+    ByteArrayInputStream bins = new ByteArrayInputStream("N\n".getBytes());
+    System.setIn(bins);
+
+    String[] argv = { "-format" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should not have succeeded", 1, e.status);
+    }
+
+    System.setIn(origIn);
+
+    // check if the version file does not exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertFalse("Check version should not exist", version.exists());
+  }
+
+  private static class ExitException extends SecurityException {
+    private static final long serialVersionUID = 1L;
+    public final int status;
+
+    public ExitException(int status) {
+      super("There is no escape!");
+      this.status = status;
+    }
+  }
+
+  private static class NoExitSecurityManager extends SecurityManager {
+    @Override
+    public void checkPermission(Permission perm) {
+      // allow anything.
+    }
+
+    @Override
+    public void checkPermission(Permission perm, Object context) {
+      // allow anything.
+    }
+
+    @Override
+    public void checkExit(int status) {
+      super.checkExit(status);
+      throw new ExitException(status);
+    }
+  }
+}
\ No newline at end of file