You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2012/04/11 07:47:46 UTC

svn commit: r1324567 [3/4] - in /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project: ./ hadoop-hdfs/ hadoop-hdfs/dev-support/ hadoop-hdfs/src/contrib/ hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/ hadoop-hdfs/sr...

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java Wed Apr 11 05:47:40 2012
@@ -33,10 +33,14 @@ import org.apache.hadoop.HadoopIllegalAr
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.ha.HAServiceStatus;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
 import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
@@ -47,8 +51,10 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.tools.NNHAServiceTarget;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Tool;
@@ -65,7 +71,7 @@ import com.google.common.collect.Sets;
  */
 @InterfaceAudience.Private
 public class BootstrapStandby implements Tool, Configurable {
-  private static final Log LOG = LogFactory.getLog(BootstrapStandby.class); 
+  private static final Log LOG = LogFactory.getLog(BootstrapStandby.class);
   private String nsId;
   private String nnId;
   private String otherNNId;
@@ -79,7 +85,13 @@ public class BootstrapStandby implements
   
   private boolean force = false;
   private boolean interactive = true;
-  
+
+  // Exit/return codes.
+  static final int ERR_CODE_FAILED_CONNECT = 2;
+  static final int ERR_CODE_INVALID_VERSION = 3;
+  static final int ERR_CODE_OTHER_NN_NOT_ACTIVE = 4;
+  static final int ERR_CODE_ALREADY_FORMATTED = 5;
+  static final int ERR_CODE_LOGS_UNAVAILABLE = 6; 
 
   public int run(String[] args) throws Exception {
     SecurityUtil.initKrb5CipherSuites();
@@ -121,24 +133,43 @@ public class BootstrapStandby implements
     System.err.println("Usage: " + this.getClass().getSimpleName() +
         "[-force] [-nonInteractive]");
   }
+  
+  private NamenodeProtocol createNNProtocolProxy()
+      throws IOException {
+    return NameNodeProxies.createNonHAProxy(getConf(),
+        otherIpcAddr, NamenodeProtocol.class,
+        UserGroupInformation.getLoginUser(), true)
+        .getProxy();
+  }
+  
+  private HAServiceProtocol createHAProtocolProxy()
+      throws IOException {
+    return new NNHAServiceTarget(new HdfsConfiguration(conf),
+        nsId, otherNNId).getProxy(conf, 15000);
+  }
 
   private int doRun() throws IOException {
-    ProxyAndInfo<NamenodeProtocol> proxyAndInfo = NameNodeProxies.createNonHAProxy(getConf(),
-      otherIpcAddr, NamenodeProtocol.class,
-      UserGroupInformation.getLoginUser(), true);
-    NamenodeProtocol proxy = proxyAndInfo.getProxy();
+
+    NamenodeProtocol proxy = createNNProtocolProxy();
     NamespaceInfo nsInfo;
     try {
       nsInfo = proxy.versionRequest();
-      checkLayoutVersion(nsInfo);
     } catch (IOException ioe) {
       LOG.fatal("Unable to fetch namespace information from active NN at " +
           otherIpcAddr + ": " + ioe.getMessage());
       if (LOG.isDebugEnabled()) {
         LOG.debug("Full exception trace", ioe);
       }
-      return 1;
+      return ERR_CODE_FAILED_CONNECT;
     }
+
+    if (!checkLayoutVersion(nsInfo)) {
+      LOG.fatal("Layout version on remote node (" +
+          nsInfo.getLayoutVersion() + ") does not match " +
+          "this node's layout version (" + HdfsConstants.LAYOUT_VERSION + ")");
+      return ERR_CODE_INVALID_VERSION;
+    }
+
     
     System.out.println(
         "=====================================================\n" +
@@ -153,12 +184,35 @@ public class BootstrapStandby implements
         "           Layout version: " + nsInfo.getLayoutVersion() + "\n" +
         "=====================================================");
 
+    // Ensure the other NN is active - we can't force it to roll edit logs
+    // below if it's not active.
+    if (!isOtherNNActive()) {
+      String err = "NameNode " + nsId + "." + nnId + " at " + otherIpcAddr +
+          " is not currently in ACTIVE state.";
+      if (!interactive) {
+        LOG.fatal(err + " Please transition it to " +
+            "active before attempting to bootstrap a standby node.");
+        return ERR_CODE_OTHER_NN_NOT_ACTIVE;
+      }
+      
+      System.err.println(err);
+      if (ToolRunner.confirmPrompt(
+            "Do you want to automatically transition it to active now?")) {
+        transitionOtherNNActive();
+      } else {
+        LOG.fatal("User aborted. Exiting without bootstrapping standby.");
+        return ERR_CODE_OTHER_NN_NOT_ACTIVE;
+      }
+    }
+    
+
+    
     // Check with the user before blowing away data.
     if (!NameNode.confirmFormat(
             Sets.union(Sets.newHashSet(dirsToFormat),
                 Sets.newHashSet(editUrisToFormat)),
             force, interactive)) {
-      return 1;
+      return ERR_CODE_ALREADY_FORMATTED;
     }
 
     // Force the active to roll its log
@@ -180,7 +234,7 @@ public class BootstrapStandby implements
     // Ensure that we have enough edits already in the shared directory to
     // start up from the last checkpoint on the active.
     if (!checkLogsAvailableForRead(image, imageTxId, rollTxId)) {
-      return 1;
+      return ERR_CODE_LOGS_UNAVAILABLE;
     }
     
     image.getStorage().writeTransactionIdFileToStorage(rollTxId);
@@ -193,6 +247,14 @@ public class BootstrapStandby implements
     return 0;
   }
 
+  
+  private void transitionOtherNNActive()
+      throws AccessControlException, ServiceFailedException, IOException {
+    LOG.info("Transitioning the running namenode to active...");
+    createHAProtocolProxy().transitionToActive();    
+    LOG.info("Successful");
+  }
+
   private boolean checkLogsAvailableForRead(FSImage image, long imageTxId,
       long rollTxId) {
     
@@ -225,12 +287,14 @@ public class BootstrapStandby implements
     }
   }
 
-  private void checkLayoutVersion(NamespaceInfo nsInfo) throws IOException {
-    if (nsInfo.getLayoutVersion() != HdfsConstants.LAYOUT_VERSION) {
-      throw new IOException("Layout version on remote node (" +
-          nsInfo.getLayoutVersion() + ") does not match " +
-          "this node's layout version (" + HdfsConstants.LAYOUT_VERSION + ")");
-    }
+  private boolean checkLayoutVersion(NamespaceInfo nsInfo) throws IOException {
+    return (nsInfo.getLayoutVersion() == HdfsConstants.LAYOUT_VERSION);
+  }
+  
+  private boolean isOtherNNActive()
+      throws AccessControlException, IOException {
+    HAServiceStatus status = createHAProtocolProxy().getServiceStatus();
+    return status.getState() == HAServiceState.ACTIVE;
   }
 
   private void parseConfAndFindOtherNN() throws IOException {

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java Wed Apr 11 05:47:40 2012
@@ -219,7 +219,7 @@ public class EditLogTailer {
       // disk are ignored.
       long editsLoaded = 0;
       try {
-        editsLoaded = image.loadEdits(streams, namesystem);
+        editsLoaded = image.loadEdits(streams, namesystem, null);
       } catch (EditLogInputException elie) {
         editsLoaded = elie.getNumEditsLoaded();
         throw elie;

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java Wed Apr 11 05:47:40 2012
@@ -25,14 +25,6 @@ package org.apache.hadoop.hdfs.server.pr
  * each datanode.
  */
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-
 /**
  * Balancer bandwidth command instructs each datanode to change its value for
  * the max amount of network bandwidth it may use during the block balancing
@@ -71,35 +63,4 @@ public class BalancerBandwidthCommand ex
   public long getBalancerBandwidthValue() {
     return this.bandwidth;
   }
-
-  // ///////////////////////////////////////////////
-  // Writable
-  // ///////////////////////////////////////////////
-  static { // register a ctor
-    WritableFactories.setFactory(BalancerBandwidthCommand.class, new WritableFactory() {
-      public Writable newInstance() {
-        return new BalancerBandwidthCommand();
-      }
-    });
-  }
-
-  /**
-   * Writes the bandwidth payload to the Balancer Bandwidth Command packet.
-   * @param out DataOutput stream used for writing commands to the datanode.
-   * @throws IOException
-   */
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    out.writeLong(this.bandwidth);
-  }
-
-  /**
-   * Reads the bandwidth payload from the Balancer Bandwidth Command packet.
-   * @param in DataInput stream used for reading commands to the datanode.
-   * @throws IOException
-   */
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    this.bandwidth = in.readLong();
-  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java Wed Apr 11 05:47:40 2012
@@ -17,9 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -27,11 +24,6 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-
 
 /****************************************************
  * A BlockCommand is an instruction to a datanode 
@@ -58,8 +50,6 @@ public class BlockCommand extends Datano
   Block blocks[];
   DatanodeInfo targets[][];
 
-  public BlockCommand() {}
-
   /**
    * Create BlockCommand for transferring blocks to another datanode
    * @param blocktargetlist    blocks to be transferred 
@@ -110,50 +100,4 @@ public class BlockCommand extends Datano
   public DatanodeInfo[][] getTargets() {
     return targets;
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (BlockCommand.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new BlockCommand(); }
-       });
-  }
-
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    Text.writeString(out, poolId);
-    out.writeInt(blocks.length);
-    for (int i = 0; i < blocks.length; i++) {
-      blocks[i].write(out);
-    }
-    out.writeInt(targets.length);
-    for (int i = 0; i < targets.length; i++) {
-      out.writeInt(targets[i].length);
-      for (int j = 0; j < targets[i].length; j++) {
-        targets[i][j].write(out);
-      }
-    }
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    this.poolId = Text.readString(in);
-    this.blocks = new Block[in.readInt()];
-    for (int i = 0; i < blocks.length; i++) {
-      blocks[i] = new Block();
-      blocks[i].readFields(in);
-    }
-
-    this.targets = new DatanodeInfo[in.readInt()][];
-    for (int i = 0; i < targets.length; i++) {
-      this.targets[i] = new DatanodeInfo[in.readInt()];
-      for (int j = 0; j < targets[i].length; j++) {
-        targets[i][j] = new DatanodeInfo();
-        targets[i][j].readFields(in);
-      }
-    }
-  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java Wed Apr 11 05:47:40 2012
@@ -17,9 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
 import java.util.Collection;
 import java.util.ArrayList;
 
@@ -28,9 +25,6 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 import com.google.common.base.Joiner;
 
@@ -62,14 +56,6 @@ public class BlockRecoveryCommand extend
     private long newGenerationStamp;
 
     /**
-     * Create empty RecoveringBlock.
-     */
-    public RecoveringBlock() {
-      super();
-      newGenerationStamp = -1L;
-    }
-
-    /**
      * Create RecoveringBlock.
      */
     public RecoveringBlock(ExtendedBlock b, DatanodeInfo[] locs, long newGS) {
@@ -84,27 +70,6 @@ public class BlockRecoveryCommand extend
     public long getNewGenerationStamp() {
       return newGenerationStamp;
     }
-
-    ///////////////////////////////////////////
-    // Writable
-    ///////////////////////////////////////////
-    static {                                      // register a ctor
-      WritableFactories.setFactory
-        (RecoveringBlock.class,
-         new WritableFactory() {
-           public Writable newInstance() { return new RecoveringBlock(); }
-         });
-    }
-
-    public void write(DataOutput out) throws IOException {
-      super.write(out);
-      out.writeLong(newGenerationStamp);
-    }
-
-    public void readFields(DataInput in) throws IOException {
-      super.readFields(in);
-      newGenerationStamp = in.readLong();
-    }
   }
 
   /**
@@ -149,34 +114,4 @@ public class BlockRecoveryCommand extend
     sb.append("\n)");
     return sb.toString();
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (BlockRecoveryCommand.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new BlockRecoveryCommand(); }
-       });
-  }
-
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    out.writeInt(recoveringBlocks.size());
-    for(RecoveringBlock block : recoveringBlocks) {
-      block.write(out);
-    }
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    int numBlocks = in.readInt();
-    recoveringBlocks = new ArrayList<RecoveringBlock>(numBlocks);
-    for(int i = 0; i < numBlocks; i++) {
-      RecoveringBlock b = new RecoveringBlock();
-      b.readFields(in);
-      add(b);
-    }
-  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java Wed Apr 11 05:47:40 2012
@@ -17,16 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
 
 /** A class to implement an array of BlockLocations
  *  It provide efficient customized serialization/deserialization methods
@@ -34,23 +27,17 @@ import org.apache.hadoop.io.WritableUtil
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public class BlocksWithLocations implements Writable {
+public class BlocksWithLocations {
 
   /**
    * A class to keep track of a block and its locations
    */
   @InterfaceAudience.Private
   @InterfaceStability.Evolving
-  public static class BlockWithLocations  implements Writable {
+  public static class BlockWithLocations {
     Block block;
     String datanodeIDs[];
     
-    /** default constructor */
-    public BlockWithLocations() {
-      block = new Block();
-      datanodeIDs = null;
-    }
-    
     /** constructor */
     public BlockWithLocations(Block b, String[] datanodes) {
       block = b;
@@ -66,33 +53,10 @@ public class BlocksWithLocations impleme
     public String[] getDatanodes() {
       return datanodeIDs;
     }
-    
-    /** deserialization method */
-    public void readFields(DataInput in) throws IOException {
-      block.readFields(in);
-      int len = WritableUtils.readVInt(in); // variable length integer
-      datanodeIDs = new String[len];
-      for(int i=0; i<len; i++) {
-        datanodeIDs[i] = Text.readString(in);
-      }
-    }
-    
-    /** serialization method */
-    public void write(DataOutput out) throws IOException {
-      block.write(out);
-      WritableUtils.writeVInt(out, datanodeIDs.length); // variable length int
-      for(String id:datanodeIDs) {
-        Text.writeString(out, id);
-      }
-    }
   }
 
   private BlockWithLocations[] blocks;
 
-  /** default constructor */
-  BlocksWithLocations() {
-  }
-
   /** Constructor with one parameter */
   public BlocksWithLocations( BlockWithLocations[] blocks ) {
     this.blocks = blocks;
@@ -102,22 +66,4 @@ public class BlocksWithLocations impleme
   public BlockWithLocations[] getBlocks() {
     return blocks;
   }
-
-  /** serialization method */
-  public void write( DataOutput out ) throws IOException {
-    WritableUtils.writeVInt(out, blocks.length);
-    for(int i=0; i<blocks.length; i++) {
-      blocks[i].write(out);
-    }
-  }
-
-  /** deserialization method */
-  public void readFields(DataInput in) throws IOException {
-    int len = WritableUtils.readVInt(in);
-    blocks = new BlockWithLocations[len];
-    for(int i=0; i<len; i++) {
-      blocks[i] = new BlockWithLocations();
-      blocks[i].readFields(in);
-    }
-  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CheckpointCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CheckpointCommand.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CheckpointCommand.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CheckpointCommand.java Wed Apr 11 05:47:40 2012
@@ -17,13 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
@@ -77,27 +70,4 @@ public class CheckpointCommand extends N
   public boolean needToReturnImage() {
     return needToReturnImage;
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  static {
-    WritableFactories.setFactory(CheckpointCommand.class,
-        new WritableFactory() {
-          public Writable newInstance() {return new CheckpointCommand();}
-        });
-  }
-
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    cSig.write(out);
-    out.writeBoolean(needToReturnImage);
-  }
-  
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    cSig = new CheckpointSignature();
-    cSig.readFields(in);
-    needToReturnImage = in.readBoolean();
-  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java Wed Apr 11 05:47:40 2012
@@ -27,10 +27,7 @@ import org.apache.hadoop.classification.
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public abstract class DatanodeCommand extends ServerCommand {
-  public DatanodeCommand() {
-    super();
-  }
-  
+
   DatanodeCommand(int action) {
     super(action);
   }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java Wed Apr 11 05:47:40 2012
@@ -18,20 +18,12 @@
 
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 /** 
  * DatanodeRegistration class contains all information the name-node needs
@@ -41,23 +33,11 @@ import org.apache.hadoop.io.WritableFact
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class DatanodeRegistration extends DatanodeID
-implements Writable, NodeRegistration {
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (DatanodeRegistration.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new DatanodeRegistration(); }
-       });
-  }
+    implements NodeRegistration {
 
   private StorageInfo storageInfo;
   private ExportedBlockKeys exportedKeys;
 
-  public DatanodeRegistration() {
-    this("", DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
-        new StorageInfo(), new ExportedBlockKeys());
-  }
-  
   public DatanodeRegistration(DatanodeID dn, StorageInfo info,
       ExportedBlockKeys keys) {
     super(dn);
@@ -118,30 +98,6 @@ implements Writable, NodeRegistration {
       + ")";
   }
 
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
-  @Override
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-
-    //TODO: move it to DatanodeID once HADOOP-2797 has been committed
-    out.writeShort(ipcPort);
-
-    storageInfo.write(out);
-    exportedKeys.write(out);
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-
-    //TODO: move it to DatanodeID once HADOOP-2797 has been committed
-    this.ipcPort = in.readShort() & 0x0000ffff;
-
-    storageInfo.readFields(in);
-    exportedKeys.readFields(in);
-  }
   @Override
   public boolean equals(Object to) {
     return super.equals(to);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java Wed Apr 11 05:47:40 2012
@@ -17,16 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-import org.apache.hadoop.io.WritableUtils;
 
 /**
  * A BlockCommand is an instruction to a datanode to register with the namenode.
@@ -34,17 +26,6 @@ import org.apache.hadoop.io.WritableUtil
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class FinalizeCommand extends DatanodeCommand {
-  // /////////////////////////////////////////
-  // Writable
-  // /////////////////////////////////////////
-  static { // register a ctor
-    WritableFactories.setFactory(FinalizeCommand.class, new WritableFactory() {
-      public Writable newInstance() {
-        return new FinalizeCommand();
-      }
-    });
-  }
-  
   String blockPoolId;
   private FinalizeCommand() {
     super(DatanodeProtocol.DNA_FINALIZE);
@@ -58,11 +39,4 @@ public class FinalizeCommand extends Dat
   public String getBlockPoolId() {
     return blockPoolId;
   }
-  
-  public void readFields(DataInput in) throws IOException {
-    blockPoolId = WritableUtils.readString(in);
-  }
-  public void write(DataOutput out) throws IOException {
-    WritableUtils.writeString(out, blockPoolId);
-  }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java Wed Apr 11 05:47:40 2012
@@ -17,31 +17,21 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.ObjectWritable;
-import org.apache.hadoop.io.Writable;
 
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 /**
  * Response to {@link DatanodeProtocol#sendHeartbeat}
  */
-public class HeartbeatResponse implements Writable {
+public class HeartbeatResponse {
   /** Commands returned from the namenode to the datanode */
   private DatanodeCommand[] commands;
   
   /** Information about the current HA-related state of the NN */
   private NNHAStatusHeartbeat haStatus;
   
-  public HeartbeatResponse() {
-    // Empty constructor required for Writable
-  }
-  
   public HeartbeatResponse(DatanodeCommand[] cmds,
       NNHAStatusHeartbeat haStatus) {
     commands = cmds;
@@ -55,31 +45,4 @@ public class HeartbeatResponse implement
   public NNHAStatusHeartbeat getNameNodeHaState() {
     return haStatus;
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  @Override
-  public void write(DataOutput out) throws IOException {
-    int length = commands == null ? 0 : commands.length;
-    out.writeInt(length);
-    for (int i = 0; i < length; i++) {
-      ObjectWritable.writeObject(out, commands[i], commands[i].getClass(),
-                                 null, true);
-    }
-    haStatus.write(out);
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    int length = in.readInt();
-    commands = new DatanodeCommand[length];
-    ObjectWritable objectWritable = new ObjectWritable();
-    for (int i = 0; i < length; i++) {
-      commands[i] = (DatanodeCommand) ObjectWritable.readObject(in,
-          objectWritable, null);
-    }
-    haStatus = new NNHAStatusHeartbeat();
-    haStatus.readFields(in);
-  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java Wed Apr 11 05:47:40 2012
@@ -42,9 +42,6 @@ public interface InterDatanodeProtocol {
    * the interface to the DN AND the RPC protocol used to communicate with the 
    * DN.
    * 
-   * Post version 6L (release 23 of Hadoop), the protocol is implemented in
-   * {@literal ../protocolR23Compatible/InterDatanodeWireProtocol}
-   * 
    * This class is used by both the DN to insulate from the protocol 
    * serialization.
    * 

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java Wed Apr 11 05:47:40 2012
@@ -17,16 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -45,29 +38,4 @@ public class KeyUpdateCommand extends Da
   public ExportedBlockKeys getExportedKeys() {
     return this.keys;
   }
-
-  // ///////////////////////////////////////////////
-  // Writable
-  // ///////////////////////////////////////////////
-  static { // register a ctor
-    WritableFactories.setFactory(KeyUpdateCommand.class, new WritableFactory() {
-      public Writable newInstance() {
-        return new KeyUpdateCommand();
-      }
-    });
-  }
-
-  /**
-   */
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    keys.write(out);
-  }
-
-  /**
-   */
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    keys.readFields(in);
-  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java Wed Apr 11 05:47:40 2012
@@ -17,26 +17,17 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
 
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public class NNHAStatusHeartbeat implements Writable {
+public class NNHAStatusHeartbeat {
 
   private State state;
   private long txid = HdfsConstants.INVALID_TXID;
   
-  public NNHAStatusHeartbeat() {
-  }
-  
   public NNHAStatusHeartbeat(State state, long txid) {
     this.state = state;
     this.txid = txid;
@@ -50,21 +41,6 @@ public class NNHAStatusHeartbeat impleme
     return txid;
   }
   
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  @Override
-  public void write(DataOutput out) throws IOException {
-    WritableUtils.writeEnum(out, state);
-    out.writeLong(txid);
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    state = WritableUtils.readEnum(in, State.class);
-    txid = in.readLong();
-  }
-
   @InterfaceAudience.Private
   public enum State {
     ACTIVE,

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeCommand.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeCommand.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeCommand.java Wed Apr 11 05:47:40 2012
@@ -19,9 +19,6 @@ package org.apache.hadoop.hdfs.server.pr
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 /**
  * Base class for name-node command.
@@ -30,17 +27,6 @@ import org.apache.hadoop.io.WritableFact
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class NamenodeCommand extends ServerCommand {
-  static {
-    WritableFactories.setFactory(NamenodeCommand.class,
-        new WritableFactory() {
-          public Writable newInstance() {return new NamenodeCommand();}
-        });
-  }
-
-  public NamenodeCommand() {
-    super();
-  }
-
   public NamenodeCommand(int action) {
     super(action);
   }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java Wed Apr 11 05:47:40 2012
@@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
-import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.KerberosInfo;
 
 /*****************************************************************************
@@ -42,9 +41,6 @@ public interface NamenodeProtocol {
    * the client interface to the NN AND the RPC protocol used to 
    * communicate with the NN.
    * 
-   * Post version 70 (release 23 of Hadoop), the protocol is implemented in
-   * {@literal ../protocolR23Compatible/ClientNamenodeWireProtocol}
-   * 
    * This class is used by both the DFSClient and the 
    * NN server side to insulate from the protocol serialization.
    * 

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java Wed Apr 11 05:47:40 2012
@@ -18,14 +18,6 @@
 
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.common.Storage;
@@ -44,10 +36,6 @@ implements NodeRegistration {
   String httpAddress;         // HTTP address of the node
   NamenodeRole role;          // node role
 
-  public NamenodeRegistration() {
-    super();
-  }
-
   public NamenodeRegistration(String address,
                               String httpAddress,
                               StorageInfo storageInfo,
@@ -95,31 +83,4 @@ implements NodeRegistration {
   public boolean isRole(NamenodeRole that) {
     return role.equals(that);
   }
-
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
-  static {
-    WritableFactories.setFactory
-      (NamenodeRegistration.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new NamenodeRegistration(); }
-       });
-  }
-
-  @Override // Writable
-  public void write(DataOutput out) throws IOException {
-    Text.writeString(out, rpcAddress);
-    Text.writeString(out, httpAddress);
-    Text.writeString(out, role.name());
-    super.write(out);
-  }
-
-  @Override // Writable
-  public void readFields(DataInput in) throws IOException {
-    rpcAddress = Text.readString(in);
-    httpAddress = Text.readString(in);
-    role = NamenodeRole.valueOf(Text.readString(in));
-    super.readFields(in);
-  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java Wed Apr 11 05:47:40 2012
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -28,11 +26,6 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
-import org.apache.hadoop.hdfs.DeprecatedUTF8;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-import org.apache.hadoop.io.WritableUtils;
 
 /**
  * NamespaceInfo is returned by the name-node in reply 
@@ -76,31 +69,6 @@ public class NamespaceInfo extends Stora
     return blockPoolID;
   }
 
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (NamespaceInfo.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new NamespaceInfo(); }
-       });
-  }
-
-  public void write(DataOutput out) throws IOException {
-    DeprecatedUTF8.writeString(out, getBuildVersion());
-    super.write(out);
-    out.writeInt(getDistributedUpgradeVersion());
-    WritableUtils.writeString(out, blockPoolID);
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    buildVersion = DeprecatedUTF8.readString(in);
-    super.readFields(in);
-    distributedUpgradeVersion = in.readInt();
-    blockPoolID = WritableUtils.readString(in);
-  }
-  
   public String toString(){
     return super.toString() + ";bpid=" + blockPoolID;
   }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java Wed Apr 11 05:47:40 2012
@@ -18,19 +18,12 @@
 
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
 
 /**
  * A data structure to store the blocks in an incremental block report. 
  */
-public class ReceivedDeletedBlockInfo implements Writable {
+public class ReceivedDeletedBlockInfo {
   Block block;
   BlockStatus status;
   String delHints;
@@ -113,25 +106,6 @@ public class ReceivedDeletedBlockInfo im
     return status == BlockStatus.DELETED_BLOCK;
   }
 
-  @Override
-  public void write(DataOutput out) throws IOException {
-    this.block.write(out);
-    WritableUtils.writeVInt(out, this.status.code);
-    if (this.status == BlockStatus.DELETED_BLOCK) {
-      Text.writeString(out, this.delHints);
-    }
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    this.block = new Block();
-    this.block.readFields(in);
-    this.status = BlockStatus.fromCode(WritableUtils.readVInt(in));
-    if (this.status == BlockStatus.DELETED_BLOCK) {
-      this.delHints = Text.readString(in);
-    }
-  }
-
   public String toString() {
     return block.toString() + ", status: " + status +
       ", delHint: " + delHints;

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java Wed Apr 11 05:47:40 2012
@@ -17,14 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 /**
  * A BlockCommand is an instruction to a datanode to register with the namenode.
@@ -32,26 +26,10 @@ import org.apache.hadoop.io.WritableFact
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class RegisterCommand extends DatanodeCommand {
-  // /////////////////////////////////////////
-  // Writable
-  // /////////////////////////////////////////
-  static { // register a ctor
-    WritableFactories.setFactory(RegisterCommand.class, new WritableFactory() {
-      public Writable newInstance() {
-        return new RegisterCommand();
-      }
-    });
-  }
   
   public static final DatanodeCommand REGISTER = new RegisterCommand();
 
   public RegisterCommand() {
     super(DatanodeProtocol.DNA_REGISTER);
   }
-
-  @Override
-  public void readFields(DataInput in) { }
- 
-  @Override
-  public void write(DataOutput out) { }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java Wed Apr 11 05:47:40 2012
@@ -18,17 +18,10 @@
 
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 /**
  * Replica recovery information.
@@ -38,9 +31,6 @@ import org.apache.hadoop.io.WritableFact
 public class ReplicaRecoveryInfo extends Block {
   private ReplicaState originalState;
 
-  public ReplicaRecoveryInfo() {
-  }
-
   public ReplicaRecoveryInfo(long blockId, long diskLen, long gs, ReplicaState rState) {
     set(blockId, diskLen, gs);
     originalState = rState;
@@ -59,27 +49,4 @@ public class ReplicaRecoveryInfo extends
   public int hashCode() {
     return super.hashCode();
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (ReplicaRecoveryInfo.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new ReplicaRecoveryInfo(); }
-       });
-  }
-
- @Override
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    originalState = ReplicaState.read(in); 
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    originalState.write(out);
-  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java Wed Apr 11 05:47:40 2012
@@ -17,11 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.*;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
 
 /**
  * Base class for a server command.
@@ -33,21 +30,10 @@ import org.apache.hadoop.io.Writable;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public abstract class ServerCommand implements Writable {
+public abstract class ServerCommand {
   private int action;
 
   /**
-   * Unknown server command constructor.
-   * Creates a command with action 0.
-   * 
-   * @see NamenodeProtocol#ACT_UNKNOWN
-   * @see DatanodeProtocol#DNA_UNKNOWN
-   */
-  public ServerCommand() {
-    this(0);
-  }
-
-  /**
    * Create a command for the specified action.
    * Actions are protocol specific.
    * 
@@ -66,15 +52,4 @@ public abstract class ServerCommand impl
   public int getAction() {
     return this.action;
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  public void write(DataOutput out) throws IOException {
-    out.writeInt(this.action);
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    this.action = in.readInt();
-  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java Wed Apr 11 05:47:40 2012
@@ -17,15 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 /**
  * This as a generic distributed upgrade command.
@@ -68,31 +61,4 @@ public class UpgradeCommand extends Data
   public short getCurrentStatus() {
     return this.upgradeStatus;
   }
-
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (UpgradeCommand.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new UpgradeCommand(); }
-       });
-  }
-
-  /**
-   */
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    out.writeInt(this.version);
-    out.writeShort(this.upgradeStatus);
-  }
-
-  /**
-   */
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    this.version = in.readInt();
-    this.upgradeStatus = in.readShort();
-  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java Wed Apr 11 05:47:40 2012
@@ -248,7 +248,6 @@ public class GetConf extends Configured 
     @Override
     int doWorkInternal(GetConf tool, String[] args) throws Exception {
       this.key = args[0];
-      System.err.println("key: " + key);
       return super.doWorkInternal(tool, args);
     }
   }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java Wed Apr 11 05:47:40 2012
@@ -28,6 +28,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 
 import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
 import org.xml.sax.Attributes;
@@ -54,6 +55,7 @@ class OfflineEditsXmlLoader 
   private FSEditLogOpCodes opCode;
   private StringBuffer cbuf;
   private long nextTxId;
+  private final OpInstanceCache opCache = new OpInstanceCache();
   
   static enum ParseState {
     EXPECT_EDITS_TAG,
@@ -207,7 +209,7 @@ class OfflineEditsXmlLoader 
           throw new InvalidXmlException("expected </DATA>");
         }
         state = ParseState.EXPECT_RECORD;
-        FSEditLogOp op = FSEditLogOp.getOpInstance(opCode);
+        FSEditLogOp op = opCache.get(opCode);
         opCode = null;
         try {
           op.decodeXml(stanza);

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1310902-1324566

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto Wed Apr 11 05:47:40 2012
@@ -38,8 +38,11 @@ message InitReplicaRecoveryRequestProto 
  * Repica recovery information
  */
 message InitReplicaRecoveryResponseProto {
-  required ReplicaStateProto state = 1; // State of the replica
-  required BlockProto block = 2;   // block information
+  required bool replicaFound = 1;
+
+  // The following entries are not set if there was no replica found.
+  optional ReplicaStateProto state = 2; // State of the replica
+  optional BlockProto block = 3;   // block information
 }
 
 /**

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1310902-1324566

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1310902-1324566

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1310902-1324566

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1310902-1324566

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Wed Apr 11 05:47:40 2012
@@ -57,6 +57,7 @@ import org.apache.hadoop.fs.FileSystem.S
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -705,4 +706,14 @@ public class DFSTestUtil {
     conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, Joiner.on(",")
         .join(nameservices));
   }
+  
+  public static DatanodeDescriptor getLocalDatanodeDescriptor() {
+    return new DatanodeDescriptor(
+        new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
+  }
+
+  public static DatanodeInfo getLocalDatanodeInfo() {
+    return new DatanodeInfo(
+        new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
+  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Wed Apr 11 05:47:40 2012
@@ -583,6 +583,10 @@ public class MiniDFSCluster {
       }
     }
     
+    if (operation == StartupOption.RECOVER) {
+      return;
+    }
+
     // Start the DataNodes
     startDataNodes(conf, numDataNodes, manageDataDfsDirs, operation, racks,
         hosts, simulatedCapacities, setupHostsFile);
@@ -783,6 +787,9 @@ public class MiniDFSCluster {
                      operation == StartupOption.REGULAR) ?
       new String[] {} : new String[] {operation.getName()};
     NameNode nn =  NameNode.createNameNode(args, conf);
+    if (operation == StartupOption.RECOVER) {
+      return;
+    }
     
     // After the NN has started, set back the bound ports into
     // the conf
@@ -958,6 +965,9 @@ public class MiniDFSCluster {
                              long[] simulatedCapacities,
                              boolean setupHostsFile,
                              boolean checkDataNodeAddrConfig) throws IOException {
+    if (operation == StartupOption.RECOVER) {
+      return;
+    }
     conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
 
     int curDatanodesNum = dataNodes.size();

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Wed Apr 11 05:47:40 2012
@@ -62,7 +62,7 @@ public class TestDFSUtil {
    */
   @Test
   public void testLocatedBlocks2Locations() {
-    DatanodeInfo d = new DatanodeInfo();
+    DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
     DatanodeInfo[] ds = new DatanodeInfo[1];
     ds[0] = d;
 

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java Wed Apr 11 05:47:40 2012
@@ -121,7 +121,9 @@ public class TestGetBlocks extends TestC
       getBlocksWithException(namenode, dataNodes[0], -1);
 
       // get blocks of size BlockSize from a non-existent datanode
-      getBlocksWithException(namenode, new DatanodeInfo(), 2);
+      DatanodeInfo info = DFSTestUtil.getLocalDatanodeInfo();
+      info.setIpAddr("1.2.3.4");
+      getBlocksWithException(namenode, info, 2);
     } finally {
       cluster.shutdown();
     }
@@ -132,7 +134,7 @@ public class TestGetBlocks extends TestC
                                       long size) throws IOException {
     boolean getException = false;
     try {
-        namenode.getBlocks(new DatanodeInfo(), 2);
+        namenode.getBlocks(DFSTestUtil.getLocalDatanodeInfo(), 2);
     } catch(RemoteException e) {
       getException = true;
       assertTrue(e.getClassName().contains("HadoopIllegalArgumentException"));

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java Wed Apr 11 05:47:40 2012
@@ -179,7 +179,7 @@ public class TestParallelReadUtil {
    */
   static class ReadWorker extends Thread {
 
-    static public final int N_ITERATIONS = 1024 * 4;
+    static public final int N_ITERATIONS = 1024;
 
     private static final double PROPORTION_NON_POSITIONAL_READ = 0.10;
 

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Wed Apr 11 05:47:40 2012
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -441,9 +442,9 @@ public class TestPBHelper {
     Block[] blocks = new Block[] { new Block(21), new Block(22) };
     DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1],
         new DatanodeInfo[2] };
-    dnInfos[0][0] = new DatanodeInfo();
-    dnInfos[1][0] = new DatanodeInfo();
-    dnInfos[1][1] = new DatanodeInfo();
+    dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
+    dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
+    dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
     BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1",
         blocks, dnInfos);
     BlockCommandProto bcProto = PBHelper.convert(bc);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java Wed Apr 11 05:47:40 2012
@@ -26,6 +26,7 @@ import java.util.Random;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@@ -47,7 +48,7 @@ public class TestBlockInfo {
 
     final int MAX_BLOCKS = 10;
 
-    DatanodeDescriptor dd = new DatanodeDescriptor();
+    DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
     ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
     ArrayList<BlockInfo> blockInfoList = new ArrayList<BlockInfo>();
     int headIndex;

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java Wed Apr 11 05:47:40 2012
@@ -28,6 +28,7 @@ import junit.framework.TestCase;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 
 
@@ -80,8 +81,8 @@ public class TestCorruptReplicaInfo exte
         block_ids.add((long)i);
       }
       
-      DatanodeDescriptor dn1 = new DatanodeDescriptor();
-      DatanodeDescriptor dn2 = new DatanodeDescriptor();
+      DatanodeDescriptor dn1 = DFSTestUtil.getLocalDatanodeDescriptor();
+      DatanodeDescriptor dn2 = DFSTestUtil.getLocalDatanodeDescriptor();
       
       crm.addToCorruptReplicasMap(getBlock(0), dn1, "TEST");
       assertEquals("Number of corrupt blocks not returning correctly",

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java Wed Apr 11 05:47:40 2012
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.bl
 
 import java.util.ArrayList;
 
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 
@@ -36,7 +37,7 @@ public class TestDatanodeDescriptor exte
     final int REMAINING_BLOCKS = 2;
     final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
     
-    DatanodeDescriptor dd = new DatanodeDescriptor();
+    DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
     ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
     for (int i=0; i<MAX_BLOCKS; i++) {
       blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
@@ -49,7 +50,7 @@ public class TestDatanodeDescriptor exte
   }
   
   public void testBlocksCounter() throws Exception {
-    DatanodeDescriptor dd = new DatanodeDescriptor();
+    DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
     assertEquals(0, dd.numBlocks());
     BlockInfo blk = new BlockInfo(new Block(1L), 1);
     BlockInfo blk1 = new BlockInfo(new Block(2L), 2);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java Wed Apr 11 05:47:40 2012
@@ -17,12 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import static org.junit.Assert.*;
 import java.io.File;
 import java.io.IOException;
 
-import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -36,13 +37,15 @@ import org.apache.hadoop.hdfs.protocol.E
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.junit.Test;
 
-public class TestOverReplicatedBlocks extends TestCase {
+public class TestOverReplicatedBlocks {
   /** Test processOverReplicatedBlock can handle corrupt replicas fine.
    * It make sure that it won't treat corrupt replicas as valid ones 
    * thus prevents NN deleting valid replicas but keeping
    * corrupt ones.
    */
+  @Test
   public void testProcesOverReplicateBlock() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
@@ -113,4 +116,30 @@ public class TestOverReplicatedBlocks ex
       cluster.shutdown();
     }
   }
+  /**
+   * Test over replicated block should get invalidated when decreasing the
+   * replication for a partial block.
+   */
+  @Test
+  public void testInvalidateOverReplicatedBlock() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
+        .build();
+    try {
+      final FSNamesystem namesystem = cluster.getNamesystem();
+      final BlockManager bm = namesystem.getBlockManager();
+      FileSystem fs = cluster.getFileSystem();
+      Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
+      FSDataOutputStream out = fs.create(p, (short) 2);
+      out.writeBytes("HDFS-3119: " + p);
+      out.hsync();
+      fs.setReplication(p, (short) 1);
+      out.close();
+      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
+      assertEquals("Expected only one live replica for the block", 1, bm
+          .countNodes(block.getLocalBlock()).liveReplicas());
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java Wed Apr 11 05:47:40 2012
@@ -17,8 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -172,6 +171,13 @@ public class TestInterDatanodeProtocol {
           b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
       idp.updateReplicaUnderRecovery(b, recoveryId, newblock.getNumBytes());
       checkMetaInfo(newblock, datanode);
+      
+      // Verify correct null response trying to init recovery for a missing block
+      ExtendedBlock badBlock = new ExtendedBlock("fake-pool",
+          b.getBlockId(), 0, 0);
+      assertNull(idp.initReplicaRecovery(
+          new RecoveringBlock(badBlock,
+              locatedblock.getLocations(), recoveryId)));
     }
     finally {
       if (cluster != null) {cluster.shutdown();}