You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2011/06/12 08:34:39 UTC

svn commit: r1134869 [1/2] - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/protocol/datatransfer/ src/java/org/apache/hadoop/hdfs/server/balancer/ src/java/org/apach...

Author: szetszwo
Date: Sun Jun 12 06:34:37 2011
New Revision: 1134869

URL: http://svn.apache.org/viewvc?rev=1134869&view=rev
Log:
HDFS-2066. Create a package and individual class files for DataTransferProtocol.

Added:
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlockConstructionStage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java
      - copied, changed from r1134724, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtoUtil.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
      - copied, changed from r1134724, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/Op.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
Removed:
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtoUtil.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj
    hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataTransferProtocolAspects.aj
    hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
    hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1134869&r1=1134868&r2=1134869&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Sun Jun 12 06:34:37 2011
@@ -500,6 +500,9 @@ Trunk (unreleased changes)
     HDFS-2003. Separate FSEditLog reading logic from edit log memory state
     building logic. (Ivan Kelly via todd)
 
+    HDFS-2066. Create a package and individual class files for
+    DataTransferProtocol.  (szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java?rev=1134869&r1=1134868&r2=1134869&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java Sun Jun 12 06:34:37 2011
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
+
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
 import java.io.DataInputStream;
@@ -30,10 +32,9 @@ import java.nio.ByteBuffer;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.FSInputChecker;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
-import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
@@ -45,8 +46,6 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
 
-import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
-
 
 /** This is a wrapper around connection to datanode
  * and understands checksum, offset etc.
@@ -405,7 +404,7 @@ public class BlockReader extends FSInput
                                      String clientName)
                                      throws IOException {
     // in and out will be closed when sock is closed (by the caller)
-    DataTransferProtocol.Sender.opReadBlock(
+    Sender.opReadBlock(
         new DataOutputStream(new BufferedOutputStream(
             NetUtils.getOutputStream(sock,HdfsConstants.WRITE_TIMEOUT))),
         block, startOffset, len, clientName, blockToken);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1134869&r1=1134868&r2=1134869&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java Sun Jun 12 06:34:37 2011
@@ -18,8 +18,6 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op.BLOCK_CHECKSUM;
-
 import java.io.BufferedOutputStream;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
@@ -63,18 +61,20 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
+import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
@@ -133,7 +133,7 @@ public class DFSClient implements FSCons
   SocketFactory socketFactory;
   int socketTimeout;
   final int writePacketSize;
-  final DataTransferProtocol.ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
+  final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
   final FileSystem.Statistics stats;
   final int hdfsTimeout;    // timeout value for a DFS operation.
   final LeaseRenewer leaserenewer;
@@ -267,7 +267,7 @@ public class DFSClient implements FSCons
     this.writePacketSize = 
       conf.getInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 
                   DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
-    this.dtpReplaceDatanodeOnFailure = DataTransferProtocol.ReplaceDatanodeOnFailure.get(conf);
+    this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
 
     // The hdfsTimeout is currently the same as the ipc timeout 
     this.hdfsTimeout = Client.getTimeout(conf);
@@ -1114,11 +1114,10 @@ public class DFSClient implements FSCons
 
           if (LOG.isDebugEnabled()) {
             LOG.debug("write to " + datanodes[j].getName() + ": "
-                + BLOCK_CHECKSUM + ", block=" + block);
+                + Op.BLOCK_CHECKSUM + ", block=" + block);
           }
           // get block MD5
-          DataTransferProtocol.Sender.opBlockChecksum(out, block,
-              lb.getBlockToken());
+          Sender.opBlockChecksum(out, block, lb.getBlockToken());
 
           final BlockOpResponseProto reply =
             BlockOpResponseProto.parseFrom(HdfsProtoUtil.vintPrefixed(in));

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1134869&r1=1134868&r2=1134869&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java Sun Jun 12 06:34:37 2011
@@ -18,8 +18,6 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
-import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR;
-
 
 import java.io.BufferedOutputStream;
 import java.io.DataInputStream;
@@ -48,17 +46,18 @@ import org.apache.hadoop.fs.UnresolvedLi
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
+import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@@ -68,7 +67,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
@@ -849,7 +847,7 @@ class DFSOutputStream extends FSOutputSu
             DataNode.SMALL_BUFFER_SIZE));
 
         //send the TRANSFER_BLOCK request
-        DataTransferProtocol.Sender.opTransferBlock(out, block,
+        Sender.opTransferBlock(out, block,
             dfsClient.clientName, targets, blockToken);
 
         //ack
@@ -1023,7 +1021,7 @@ class DFSOutputStream extends FSOutputSu
         blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));
 
         // send the request
-        DataTransferProtocol.Sender.opWriteBlock(out, block,
+        Sender.opWriteBlock(out, block,
             nodes.length, recoveryFlag ? stage.getRecoveryStage() : stage, newGS, 
             block.getNumBytes(), bytesSent, dfsClient.clientName, null, nodes,
             accessToken);

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlockConstructionStage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlockConstructionStage.java?rev=1134869&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlockConstructionStage.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlockConstructionStage.java Sun Jun 12 06:34:37 2011
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/** Block Construction Stage */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public enum BlockConstructionStage {
+  /** The enumerates are always listed as regular stage followed by the
+   * recovery stage. 
+   * Changing this order will make getRecoveryStage not working.
+   */
+  // pipeline set up for block append
+  PIPELINE_SETUP_APPEND,
+  // pipeline set up for failed PIPELINE_SETUP_APPEND recovery
+  PIPELINE_SETUP_APPEND_RECOVERY,
+  // data streaming
+  DATA_STREAMING,
+  // pipeline setup for failed data streaming recovery
+  PIPELINE_SETUP_STREAMING_RECOVERY,
+  // close the block and pipeline
+  PIPELINE_CLOSE,
+  // Recover a failed PIPELINE_CLOSE
+  PIPELINE_CLOSE_RECOVERY,
+  // pipeline set up for block creation
+  PIPELINE_SETUP_CREATE,
+  // transfer RBW for adding datanodes
+  TRANSFER_RBW,
+  // transfer Finalized for adding datanodes
+  TRANSFER_FINALIZED;
+  
+  final static private byte RECOVERY_BIT = (byte)1;
+  
+  /**
+   * get the recovery stage of this stage
+   */
+  public BlockConstructionStage getRecoveryStage() {
+    if (this == PIPELINE_SETUP_CREATE) {
+      throw new IllegalArgumentException( "Unexpected blockStage " + this);
+    } else {
+      return values()[ordinal()|RECOVERY_BIT];
+    }
+  }
+}    

Copied: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java (from r1134724, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtoUtil.java)
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java?p2=hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java&p1=hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtoUtil.java&r1=1134724&r2=1134869&rev=1134869&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtoUtil.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java Sun Jun 12 06:34:37 2011
@@ -15,10 +15,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.protocol;
+package org.apache.hadoop.hdfs.protocol.datatransfer;
 
 
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
@@ -30,9 +33,11 @@ import org.apache.hadoop.security.token.
  * Static utilities for dealing with the protocol buffers used by the
  * Data Transfer Protocol.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 abstract class DataTransferProtoUtil {
 
-  static DataTransferProtocol.BlockConstructionStage fromProto(
+  static BlockConstructionStage fromProto(
       OpWriteBlockProto.BlockConstructionStage stage) {
     return BlockConstructionStage.valueOf(BlockConstructionStage.class,
         stage.name());

Copied: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java (from r1134724, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java)
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java?p2=hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java&p1=hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java&r1=1134724&r2=1134869&rev=1134869&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java Sun Jun 12 06:34:37 2011
@@ -15,49 +15,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.protocol;
-
-import java.io.DataInput;
-import java.io.DataInputStream;
-import java.io.DataOutput;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
+package org.apache.hadoop.hdfs.protocol.datatransfer;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.util.ByteBufferOutputStream;
-import org.apache.hadoop.security.token.Token;
-
-import static org.apache.hadoop.hdfs.protocol.DataTransferProtoUtil.fromProto;
-import static org.apache.hadoop.hdfs.protocol.DataTransferProtoUtil.toProto;
-import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.fromProto;
-import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.fromProtos;
-import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.toProto;
-import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.toProtos;
-import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
-
-import com.google.protobuf.Message;
 
 /**
  * Transfer data to/from datanode using a streaming protocol.
@@ -72,645 +35,8 @@ public interface DataTransferProtocol {
    * when protocol changes. It is not very obvious. 
    */
   /*
-   * Version 25:
-   *    Encapsulate individual operation headers.
+   * Version 27:
+   *    Move DataTransferProtocol and the inner classes to a package.
    */
-  public static final int DATA_TRANSFER_VERSION = 25;
-
-  /** Operation */
-  public enum Op {
-    WRITE_BLOCK((byte)80),
-    READ_BLOCK((byte)81),
-    READ_METADATA((byte)82),
-    REPLACE_BLOCK((byte)83),
-    COPY_BLOCK((byte)84),
-    BLOCK_CHECKSUM((byte)85),
-    TRANSFER_BLOCK((byte)86);
-
-    /** The code for this operation. */
-    public final byte code;
-    
-    private Op(byte code) {
-      this.code = code;
-    }
-    
-    private static final int FIRST_CODE = values()[0].code;
-    /** Return the object represented by the code. */
-    private static Op valueOf(byte code) {
-      final int i = (code & 0xff) - FIRST_CODE;
-      return i < 0 || i >= values().length? null: values()[i];
-    }
-
-    /** Read from in */
-    public static Op read(DataInput in) throws IOException {
-      return valueOf(in.readByte());
-    }
-
-    /** Write to out */
-    public void write(DataOutput out) throws IOException {
-      out.write(code);
-    }
-  }
-    
-  public enum BlockConstructionStage {
-    /** The enumerates are always listed as regular stage followed by the
-     * recovery stage. 
-     * Changing this order will make getRecoveryStage not working.
-     */
-    // pipeline set up for block append
-    PIPELINE_SETUP_APPEND,
-    // pipeline set up for failed PIPELINE_SETUP_APPEND recovery
-    PIPELINE_SETUP_APPEND_RECOVERY,
-    // data streaming
-    DATA_STREAMING,
-    // pipeline setup for failed data streaming recovery
-    PIPELINE_SETUP_STREAMING_RECOVERY,
-    // close the block and pipeline
-    PIPELINE_CLOSE,
-    // Recover a failed PIPELINE_CLOSE
-    PIPELINE_CLOSE_RECOVERY,
-    // pipeline set up for block creation
-    PIPELINE_SETUP_CREATE,
-    // transfer RBW for adding datanodes
-    TRANSFER_RBW,
-    // transfer Finalized for adding datanodes
-    TRANSFER_FINALIZED;
-    
-    final static private byte RECOVERY_BIT = (byte)1;
-    
-    /**
-     * get the recovery stage of this stage
-     */
-    public BlockConstructionStage getRecoveryStage() {
-      if (this == PIPELINE_SETUP_CREATE) {
-        throw new IllegalArgumentException( "Unexpected blockStage " + this);
-      } else {
-        return values()[ordinal()|RECOVERY_BIT];
-      }
-    }
-  }    
-
-  
-  /** Sender */
-  @InterfaceAudience.Private
-  @InterfaceStability.Evolving
-  public static class Sender {
-    /** Initialize a operation. */
-    private static void op(final DataOutput out, final Op op
-        ) throws IOException {
-      out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
-      op.write(out);
-    }
-
-    private static void send(final DataOutputStream out, final Op opcode,
-        final Message proto) throws IOException {
-      op(out, opcode);
-      proto.writeDelimitedTo(out);
-      out.flush();
-    }
-
-    /** Send OP_READ_BLOCK */
-    public static void opReadBlock(DataOutputStream out, ExtendedBlock blk,
-        long blockOffset, long blockLen, String clientName,
-        Token<BlockTokenIdentifier> blockToken)
-        throws IOException {
-
-      OpReadBlockProto proto = OpReadBlockProto.newBuilder()
-        .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken))
-        .setOffset(blockOffset)
-        .setLen(blockLen)
-        .build();
-
-      send(out, Op.READ_BLOCK, proto);
-    }
-    
-
-    /** Send OP_WRITE_BLOCK */
-    public static void opWriteBlock(DataOutputStream out, ExtendedBlock blk,
-        int pipelineSize, BlockConstructionStage stage, long newGs,
-        long minBytesRcvd, long maxBytesRcvd, String client, DatanodeInfo src,
-        DatanodeInfo[] targets, Token<BlockTokenIdentifier> blockToken)
-        throws IOException {
-      ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(blk, client,
-          blockToken);
-      
-      OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
-        .setHeader(header)
-        .addAllTargets(
-            toProtos(targets, 1))
-        .setStage(toProto(stage))
-        .setPipelineSize(pipelineSize)
-        .setMinBytesRcvd(minBytesRcvd)
-        .setMaxBytesRcvd(maxBytesRcvd)
-        .setLatestGenerationStamp(newGs);
-      
-      if (src != null) {
-        proto.setSource(toProto(src));
-      }
-
-      send(out, Op.WRITE_BLOCK, proto.build());
-    }
-
-    /** Send {@link Op#TRANSFER_BLOCK} */
-    public static void opTransferBlock(DataOutputStream out, ExtendedBlock blk,
-        String client, DatanodeInfo[] targets,
-        Token<BlockTokenIdentifier> blockToken) throws IOException {
-      
-      OpTransferBlockProto proto = OpTransferBlockProto.newBuilder()
-        .setHeader(DataTransferProtoUtil.buildClientHeader(
-            blk, client, blockToken))
-        .addAllTargets(toProtos(targets, 0))
-        .build();
-
-      send(out, Op.TRANSFER_BLOCK, proto);
-    }
-
-    /** Send OP_REPLACE_BLOCK */
-    public static void opReplaceBlock(DataOutputStream out,
-        ExtendedBlock blk, String delHint, DatanodeInfo src,
-        Token<BlockTokenIdentifier> blockToken) throws IOException {
-      OpReplaceBlockProto proto = OpReplaceBlockProto.newBuilder()
-        .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
-        .setDelHint(delHint)
-        .setSource(toProto(src))
-        .build();
-      
-      send(out, Op.REPLACE_BLOCK, proto);
-    }
-
-    /** Send OP_COPY_BLOCK */
-    public static void opCopyBlock(DataOutputStream out, ExtendedBlock blk,
-        Token<BlockTokenIdentifier> blockToken)
-        throws IOException {
-      OpCopyBlockProto proto = OpCopyBlockProto.newBuilder()
-        .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
-        .build();
-      
-      send(out, Op.COPY_BLOCK, proto);
-    }
-
-    /** Send OP_BLOCK_CHECKSUM */
-    public static void opBlockChecksum(DataOutputStream out, ExtendedBlock blk,
-        Token<BlockTokenIdentifier> blockToken)
-        throws IOException {
-      OpBlockChecksumProto proto = OpBlockChecksumProto.newBuilder()
-        .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
-        .build();
-      
-      send(out, Op.BLOCK_CHECKSUM, proto);
-    }
-  }
-
-  /** Receiver */
-  public static abstract class Receiver {
-    /** Read an Op.  It also checks protocol version. */
-    protected final Op readOp(DataInputStream in) throws IOException {
-      final short version = in.readShort();
-      if (version != DATA_TRANSFER_VERSION) {
-        throw new IOException( "Version Mismatch (Expected: " +
-            DataTransferProtocol.DATA_TRANSFER_VERSION  +
-            ", Received: " +  version + " )");
-      }
-      return Op.read(in);
-    }
-
-    /** Process op by the corresponding method. */
-    protected final void processOp(Op op, DataInputStream in
-        ) throws IOException {
-      switch(op) {
-      case READ_BLOCK:
-        opReadBlock(in);
-        break;
-      case WRITE_BLOCK:
-        opWriteBlock(in);
-        break;
-      case REPLACE_BLOCK:
-        opReplaceBlock(in);
-        break;
-      case COPY_BLOCK:
-        opCopyBlock(in);
-        break;
-      case BLOCK_CHECKSUM:
-        opBlockChecksum(in);
-        break;
-      case TRANSFER_BLOCK:
-        opTransferBlock(in);
-        break;
-      default:
-        throw new IOException("Unknown op " + op + " in data stream");
-      }
-    }
-
-    /** Receive OP_READ_BLOCK */
-    private void opReadBlock(DataInputStream in) throws IOException {
-      OpReadBlockProto proto = OpReadBlockProto.parseFrom(vintPrefixed(in));
-      
-      ExtendedBlock b = fromProto(
-          proto.getHeader().getBaseHeader().getBlock());
-      Token<BlockTokenIdentifier> token = fromProto(
-          proto.getHeader().getBaseHeader().getToken());
-
-      opReadBlock(in, b, proto.getOffset(), proto.getLen(),
-          proto.getHeader().getClientName(), token);
-    }
-    /**
-     * Abstract OP_READ_BLOCK method. Read a block.
-     */
-    protected abstract void opReadBlock(DataInputStream in, ExtendedBlock blk,
-        long offset, long length, String client,
-        Token<BlockTokenIdentifier> blockToken) throws IOException;
-    
-    /** Receive OP_WRITE_BLOCK */
-    private void opWriteBlock(DataInputStream in) throws IOException {
-      final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
-      opWriteBlock(in,
-          fromProto(proto.getHeader().getBaseHeader().getBlock()),
-          proto.getPipelineSize(),
-          fromProto(proto.getStage()),
-          proto.getLatestGenerationStamp(),
-          proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
-          proto.getHeader().getClientName(),
-          fromProto(proto.getSource()),
-          fromProtos(proto.getTargetsList()),
-          fromProto(proto.getHeader().getBaseHeader().getToken()));
-    }
-
-    /**
-     * Abstract OP_WRITE_BLOCK method. 
-     * Write a block.
-     */
-    protected abstract void opWriteBlock(DataInputStream in, ExtendedBlock blk,
-        int pipelineSize, BlockConstructionStage stage, long newGs,
-        long minBytesRcvd, long maxBytesRcvd, String client, DatanodeInfo src,
-        DatanodeInfo[] targets, Token<BlockTokenIdentifier> blockToken)
-        throws IOException;
-
-    /** Receive {@link Op#TRANSFER_BLOCK} */
-    private void opTransferBlock(DataInputStream in) throws IOException {
-      final OpTransferBlockProto proto =
-        OpTransferBlockProto.parseFrom(vintPrefixed(in));
-
-      opTransferBlock(in,
-          fromProto(proto.getHeader().getBaseHeader().getBlock()),
-          proto.getHeader().getClientName(),
-          fromProtos(proto.getTargetsList()),
-          fromProto(proto.getHeader().getBaseHeader().getToken()));
-    }
-
-    /**
-     * Abstract {@link Op#TRANSFER_BLOCK} method.
-     * For {@link BlockConstructionStage#TRANSFER_RBW}
-     * or {@link BlockConstructionStage#TRANSFER_FINALIZED}.
-     */
-    protected abstract void opTransferBlock(DataInputStream in, ExtendedBlock blk,
-        String client, DatanodeInfo[] targets,
-        Token<BlockTokenIdentifier> blockToken)
-        throws IOException;
-
-    /** Receive OP_REPLACE_BLOCK */
-    private void opReplaceBlock(DataInputStream in) throws IOException {
-      OpReplaceBlockProto proto = OpReplaceBlockProto.parseFrom(vintPrefixed(in));
-
-      opReplaceBlock(in,
-          fromProto(proto.getHeader().getBlock()),
-          proto.getDelHint(),
-          fromProto(proto.getSource()),
-          fromProto(proto.getHeader().getToken()));
-    }
-
-    /**
-     * Abstract OP_REPLACE_BLOCK method.
-     * It is used for balancing purpose; send to a destination
-     */
-    protected abstract void opReplaceBlock(DataInputStream in,
-        ExtendedBlock blk, String delHint, DatanodeInfo src,
-        Token<BlockTokenIdentifier> blockToken) throws IOException;
-
-    /** Receive OP_COPY_BLOCK */
-    private void opCopyBlock(DataInputStream in) throws IOException {
-      OpCopyBlockProto proto = OpCopyBlockProto.parseFrom(vintPrefixed(in));
-      
-      opCopyBlock(in,
-          fromProto(proto.getHeader().getBlock()),
-          fromProto(proto.getHeader().getToken()));
-    }
-
-    /**
-     * Abstract OP_COPY_BLOCK method. It is used for balancing purpose; send to
-     * a proxy source.
-     */
-    protected abstract void opCopyBlock(DataInputStream in, ExtendedBlock blk,
-        Token<BlockTokenIdentifier> blockToken)
-        throws IOException;
-
-    /** Receive OP_BLOCK_CHECKSUM */
-    private void opBlockChecksum(DataInputStream in) throws IOException {
-      OpBlockChecksumProto proto = OpBlockChecksumProto.parseFrom(vintPrefixed(in));
-      
-      opBlockChecksum(in,
-          fromProto(proto.getHeader().getBlock()),
-          fromProto(proto.getHeader().getToken()));
-    }
-
-    /**
-     * Abstract OP_BLOCK_CHECKSUM method.
-     * Get the checksum of a block 
-     */
-    protected abstract void opBlockChecksum(DataInputStream in,
-        ExtendedBlock blk, Token<BlockTokenIdentifier> blockToken)
-        throws IOException;
-  }
-  
-  /** reply **/
-  @InterfaceAudience.Private
-  @InterfaceStability.Evolving
-  public static class PipelineAck {
-    PipelineAckProto proto;
-    public final static long UNKOWN_SEQNO = -2;
-
-    /** default constructor **/
-    public PipelineAck() {
-    }
-    
-    /**
-     * Constructor
-     * @param seqno sequence number
-     * @param replies an array of replies
-     */
-    public PipelineAck(long seqno, Status[] replies) {
-      proto = PipelineAckProto.newBuilder()
-        .setSeqno(seqno)
-        .addAllStatus(Arrays.asList(replies))
-        .build();
-    }
-    
-    /**
-     * Get the sequence number
-     * @return the sequence number
-     */
-    public long getSeqno() {
-      return proto.getSeqno();
-    }
-    
-    /**
-     * Get the number of replies
-     * @return the number of replies
-     */
-    public short getNumOfReplies() {
-      return (short)proto.getStatusCount();
-    }
-    
-    /**
-     * get the ith reply
-     * @return the the ith reply
-     */
-    public Status getReply(int i) {
-      return proto.getStatus(i);
-    }
-    
-    /**
-     * Check if this ack contains error status
-     * @return true if all statuses are SUCCESS
-     */
-    public boolean isSuccess() {
-      for (DataTransferProtos.Status reply : proto.getStatusList()) {
-        if (reply != DataTransferProtos.Status.SUCCESS) {
-          return false;
-        }
-      }
-      return true;
-    }
-    
-    /**** Writable interface ****/
-    public void readFields(InputStream in) throws IOException {
-      proto = PipelineAckProto.parseFrom(vintPrefixed(in));
-    }
-
-    public void write(OutputStream out) throws IOException {
-      proto.writeDelimitedTo(out);
-    }
-    
-    @Override //Object
-    public String toString() {
-      return proto.toString();
-    }
-  }
-
-  /**
-   * Header data for each packet that goes through the read/write pipelines.
-   */
-  public static class PacketHeader {
-    /** Header size for a packet */
-    private static final int PROTO_SIZE = 
-      PacketHeaderProto.newBuilder()
-        .setOffsetInBlock(0)
-        .setSeqno(0)
-        .setLastPacketInBlock(false)
-        .setDataLen(0)
-        .build().getSerializedSize();
-    public static final int PKT_HEADER_LEN =
-      6 + PROTO_SIZE;
-
-    private int packetLen;
-    private PacketHeaderProto proto;
-
-    public PacketHeader() {
-    }
-
-    public PacketHeader(int packetLen, long offsetInBlock, long seqno,
-                        boolean lastPacketInBlock, int dataLen) {
-      this.packetLen = packetLen;
-      proto = PacketHeaderProto.newBuilder()
-        .setOffsetInBlock(offsetInBlock)
-        .setSeqno(seqno)
-        .setLastPacketInBlock(lastPacketInBlock)
-        .setDataLen(dataLen)
-        .build();
-    }
-
-    public int getDataLen() {
-      return proto.getDataLen();
-    }
-
-    public boolean isLastPacketInBlock() {
-      return proto.getLastPacketInBlock();
-    }
-
-    public long getSeqno() {
-      return proto.getSeqno();
-    }
-
-    public long getOffsetInBlock() {
-      return proto.getOffsetInBlock();
-    }
-
-    public int getPacketLen() {
-      return packetLen;
-    }
-
-    @Override
-    public String toString() {
-      return "PacketHeader with packetLen=" + packetLen +
-        "Header data: " + 
-        proto.toString();
-    }
-    
-    public void readFields(ByteBuffer buf) throws IOException {
-      packetLen = buf.getInt();
-      short protoLen = buf.getShort();
-      byte[] data = new byte[protoLen];
-      buf.get(data);
-      proto = PacketHeaderProto.parseFrom(data);
-    }
-    
-    public void readFields(DataInputStream in) throws IOException {
-      this.packetLen = in.readInt();
-      short protoLen = in.readShort();
-      byte[] data = new byte[protoLen];
-      in.readFully(data);
-      proto = PacketHeaderProto.parseFrom(data);
-    }
-
-
-    /**
-     * Write the header into the buffer.
-     * This requires that PKT_HEADER_LEN bytes are available.
-     */
-    public void putInBuffer(final ByteBuffer buf) {
-      assert proto.getSerializedSize() == PROTO_SIZE
-        : "Expected " + (PROTO_SIZE) + " got: " + proto.getSerializedSize();
-      try {
-        buf.putInt(packetLen);
-        buf.putShort((short) proto.getSerializedSize());
-        proto.writeTo(new ByteBufferOutputStream(buf));
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
-    }
-    
-    public void write(DataOutputStream out) throws IOException {
-      assert proto.getSerializedSize() == PROTO_SIZE
-      : "Expected " + (PROTO_SIZE) + " got: " + proto.getSerializedSize();
-      out.writeInt(packetLen);
-      out.writeShort(proto.getSerializedSize());
-      proto.writeTo(out);
-    }
-
-    /**
-     * Perform a sanity check on the packet, returning true if it is sane.
-     * @param lastSeqNo the previous sequence number received - we expect the current
-     * sequence number to be larger by 1.
-     */
-    public boolean sanityCheck(long lastSeqNo) {
-      // We should only have a non-positive data length for the last packet
-      if (proto.getDataLen() <= 0 && proto.getLastPacketInBlock()) return false;
-      // The last packet should not contain data
-      if (proto.getLastPacketInBlock() && proto.getDataLen() != 0) return false;
-      // Seqnos should always increase by 1 with each packet received
-      if (proto.getSeqno() != lastSeqNo + 1) return false;
-      return true;
-    }
-
-    @Override
-    public boolean equals(Object o) {
-      if (!(o instanceof PacketHeader)) return false;
-      PacketHeader other = (PacketHeader)o;
-      return this.proto.equals(other.proto);
-    }
-
-    @Override
-    public int hashCode() {
-      return (int)proto.getSeqno();
-    }
-  }
-
-  /**
-   * The setting of replace-datanode-on-failure feature.
-   */
-  public enum ReplaceDatanodeOnFailure {
-    /** The feature is disabled in the entire site. */
-    DISABLE,
-    /** Never add a new datanode. */
-    NEVER,
-    /**
-     * DEFAULT policy:
-     *   Let r be the replication number.
-     *   Let n be the number of existing datanodes.
-     *   Add a new datanode only if r >= 3 and either
-     *   (1) floor(r/2) >= n; or
-     *   (2) r > n and the block is hflushed/appended.
-     */
-    DEFAULT,
-    /** Always add a new datanode when an existing datanode is removed. */
-    ALWAYS;
-
-    /** Check if the feature is enabled. */
-    public void checkEnabled() {
-      if (this == DISABLE) {
-        throw new UnsupportedOperationException(
-            "This feature is disabled.  Please refer to "
-            + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY
-            + " configuration property.");
-      }
-    }
-
-    /** Is the policy satisfied? */
-    public boolean satisfy(
-        final short replication, final DatanodeInfo[] existings,
-        final boolean isAppend, final boolean isHflushed) {
-      final int n = existings == null? 0: existings.length;
-      if (n == 0 || n >= replication) {
-        //don't need to add datanode for any policy.
-        return false;
-      } else if (this == DISABLE || this == NEVER) {
-        return false;
-      } else if (this == ALWAYS) {
-        return true;
-      } else {
-        //DEFAULT
-        if (replication < 3) {
-          return false;
-        } else {
-          if (n <= (replication/2)) {
-            return true;
-          } else {
-            return isAppend || isHflushed;
-          }
-        }
-      }
-    }
-
-    /** Get the setting from configuration. */
-    public static ReplaceDatanodeOnFailure get(final Configuration conf) {
-      final boolean enabled = conf.getBoolean(
-          DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY,
-          DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_DEFAULT);
-      if (!enabled) {
-        return DISABLE;
-      }
-
-      final String policy = conf.get(
-          DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY,
-          DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_DEFAULT);
-      for(int i = 1; i < values().length; i++) {
-        final ReplaceDatanodeOnFailure rdof = values()[i];
-        if (rdof.name().equalsIgnoreCase(policy)) {
-          return rdof;
-        }
-      }
-      throw new HadoopIllegalArgumentException("Illegal configuration value for "
-          + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY
-          + ": " + policy);
-    }
-
-    /** Write the setting to configuration. */
-    public void write(final Configuration conf) {
-      conf.setBoolean(
-          DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY,
-          this != DISABLE);
-      conf.set(
-          DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY,
-          name());
-    }
-  }
+  public static final int DATA_TRANSFER_VERSION = 27;
 }

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/Op.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/Op.java?rev=1134869&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/Op.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/Op.java Sun Jun 12 06:34:37 2011
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/** Operation */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public enum Op {
+  WRITE_BLOCK((byte)80),
+  READ_BLOCK((byte)81),
+  READ_METADATA((byte)82),
+  REPLACE_BLOCK((byte)83),
+  COPY_BLOCK((byte)84),
+  BLOCK_CHECKSUM((byte)85),
+  TRANSFER_BLOCK((byte)86);
+
+  /** The code for this operation. */
+  public final byte code;
+  
+  private Op(byte code) {
+    this.code = code;
+  }
+  
+  private static final int FIRST_CODE = values()[0].code;
+  /** Return the object represented by the code. */
+  private static Op valueOf(byte code) {
+    final int i = (code & 0xff) - FIRST_CODE;
+    return i < 0 || i >= values().length? null: values()[i];
+  }
+
+  /** Read from in */
+  public static Op read(DataInput in) throws IOException {
+    return valueOf(in.readByte());
+  }
+
+  /** Write to out */
+  public void write(DataOutput out) throws IOException {
+    out.write(code);
+  }
+}

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java?rev=1134869&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java Sun Jun 12 06:34:37 2011
@@ -0,0 +1,158 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto;
+import org.apache.hadoop.hdfs.util.ByteBufferOutputStream;
+
+/**
+ * Header data for each packet that goes through the read/write pipelines.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class PacketHeader {
+  /** Header size for a packet */
+  private static final int PROTO_SIZE = 
+    PacketHeaderProto.newBuilder()
+      .setOffsetInBlock(0)
+      .setSeqno(0)
+      .setLastPacketInBlock(false)
+      .setDataLen(0)
+      .build().getSerializedSize();
+  public static final int PKT_HEADER_LEN =
+    6 + PROTO_SIZE;
+
+  private int packetLen;
+  private PacketHeaderProto proto;
+
+  public PacketHeader() {
+  }
+
+  public PacketHeader(int packetLen, long offsetInBlock, long seqno,
+                      boolean lastPacketInBlock, int dataLen) {
+    this.packetLen = packetLen;
+    proto = PacketHeaderProto.newBuilder()
+      .setOffsetInBlock(offsetInBlock)
+      .setSeqno(seqno)
+      .setLastPacketInBlock(lastPacketInBlock)
+      .setDataLen(dataLen)
+      .build();
+  }
+
+  public int getDataLen() {
+    return proto.getDataLen();
+  }
+
+  public boolean isLastPacketInBlock() {
+    return proto.getLastPacketInBlock();
+  }
+
+  public long getSeqno() {
+    return proto.getSeqno();
+  }
+
+  public long getOffsetInBlock() {
+    return proto.getOffsetInBlock();
+  }
+
+  public int getPacketLen() {
+    return packetLen;
+  }
+
+  @Override
+  public String toString() {
+    return "PacketHeader with packetLen=" + packetLen +
+      "Header data: " + 
+      proto.toString();
+  }
+  
+  public void readFields(ByteBuffer buf) throws IOException {
+    packetLen = buf.getInt();
+    short protoLen = buf.getShort();
+    byte[] data = new byte[protoLen];
+    buf.get(data);
+    proto = PacketHeaderProto.parseFrom(data);
+  }
+  
+  public void readFields(DataInputStream in) throws IOException {
+    this.packetLen = in.readInt();
+    short protoLen = in.readShort();
+    byte[] data = new byte[protoLen];
+    in.readFully(data);
+    proto = PacketHeaderProto.parseFrom(data);
+  }
+
+
+  /**
+   * Write the header into the buffer.
+   * This requires that PKT_HEADER_LEN bytes are available.
+   */
+  public void putInBuffer(final ByteBuffer buf) {
+    assert proto.getSerializedSize() == PROTO_SIZE
+      : "Expected " + (PROTO_SIZE) + " got: " + proto.getSerializedSize();
+    try {
+      buf.putInt(packetLen);
+      buf.putShort((short) proto.getSerializedSize());
+      proto.writeTo(new ByteBufferOutputStream(buf));
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+  
+  public void write(DataOutputStream out) throws IOException {
+    assert proto.getSerializedSize() == PROTO_SIZE
+    : "Expected " + (PROTO_SIZE) + " got: " + proto.getSerializedSize();
+    out.writeInt(packetLen);
+    out.writeShort(proto.getSerializedSize());
+    proto.writeTo(out);
+  }
+
+  /**
+   * Perform a sanity check on the packet, returning true if it is sane.
+   * @param lastSeqNo the previous sequence number received - we expect the current
+   * sequence number to be larger by 1.
+   */
+  public boolean sanityCheck(long lastSeqNo) {
+    // We should only have a non-positive data length for the last packet
+    if (proto.getDataLen() <= 0 && proto.getLastPacketInBlock()) return false;
+    // The last packet should not contain data
+    if (proto.getLastPacketInBlock() && proto.getDataLen() != 0) return false;
+    // Seqnos should always increase by 1 with each packet received
+    if (proto.getSeqno() != lastSeqNo + 1) return false;
+    return true;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (!(o instanceof PacketHeader)) return false;
+    PacketHeader other = (PacketHeader)o;
+    return this.proto.equals(other.proto);
+  }
+
+  @Override
+  public int hashCode() {
+    return (int)proto.getSeqno();
+  }
+}

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java?rev=1134869&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java Sun Jun 12 06:34:37 2011
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+
+import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Arrays;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+
+/** Pipeline Acknowledgment **/
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class PipelineAck {
+  PipelineAckProto proto;
+  public final static long UNKOWN_SEQNO = -2;
+
+  /** default constructor **/
+  public PipelineAck() {
+  }
+  
+  /**
+   * Constructor
+   * @param seqno sequence number
+   * @param replies an array of replies
+   */
+  public PipelineAck(long seqno, Status[] replies) {
+    proto = PipelineAckProto.newBuilder()
+      .setSeqno(seqno)
+      .addAllStatus(Arrays.asList(replies))
+      .build();
+  }
+  
+  /**
+   * Get the sequence number
+   * @return the sequence number
+   */
+  public long getSeqno() {
+    return proto.getSeqno();
+  }
+  
+  /**
+   * Get the number of replies
+   * @return the number of replies
+   */
+  public short getNumOfReplies() {
+    return (short)proto.getStatusCount();
+  }
+  
+  /**
+   * get the ith reply
+   * @return the the ith reply
+   */
+  public Status getReply(int i) {
+    return proto.getStatus(i);
+  }
+  
+  /**
+   * Check if this ack contains error status
+   * @return true if all statuses are SUCCESS
+   */
+  public boolean isSuccess() {
+    for (DataTransferProtos.Status reply : proto.getStatusList()) {
+      if (reply != DataTransferProtos.Status.SUCCESS) {
+        return false;
+      }
+    }
+    return true;
+  }
+  
+  /**** Writable interface ****/
+  public void readFields(InputStream in) throws IOException {
+    proto = PipelineAckProto.parseFrom(vintPrefixed(in));
+  }
+
+  public void write(OutputStream out) throws IOException {
+    proto.writeDelimitedTo(out);
+  }
+  
+  @Override //Object
+  public String toString() {
+    return proto.toString();
+  }
+}

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java?rev=1134869&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java Sun Jun 12 06:34:37 2011
@@ -0,0 +1,201 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+
+import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.fromProto;
+import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.fromProtos;
+import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
+import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.fromProto;
+
+import java.io.DataInputStream;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.security.token.Token;
+
+/** Receiver */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public abstract class Receiver {
+  /** Read an Op.  It also checks protocol version. */
+  protected final Op readOp(DataInputStream in) throws IOException {
+    final short version = in.readShort();
+    if (version != DataTransferProtocol.DATA_TRANSFER_VERSION) {
+      throw new IOException( "Version Mismatch (Expected: " +
+          DataTransferProtocol.DATA_TRANSFER_VERSION  +
+          ", Received: " +  version + " )");
+    }
+    return Op.read(in);
+  }
+
+  /** Process op by the corresponding method. */
+  protected final void processOp(Op op, DataInputStream in
+      ) throws IOException {
+    switch(op) {
+    case READ_BLOCK:
+      opReadBlock(in);
+      break;
+    case WRITE_BLOCK:
+      opWriteBlock(in);
+      break;
+    case REPLACE_BLOCK:
+      opReplaceBlock(in);
+      break;
+    case COPY_BLOCK:
+      opCopyBlock(in);
+      break;
+    case BLOCK_CHECKSUM:
+      opBlockChecksum(in);
+      break;
+    case TRANSFER_BLOCK:
+      opTransferBlock(in);
+      break;
+    default:
+      throw new IOException("Unknown op " + op + " in data stream");
+    }
+  }
+
+  /** Receive OP_READ_BLOCK */
+  private void opReadBlock(DataInputStream in) throws IOException {
+    OpReadBlockProto proto = OpReadBlockProto.parseFrom(vintPrefixed(in));
+    
+    ExtendedBlock b = fromProto(
+        proto.getHeader().getBaseHeader().getBlock());
+    Token<BlockTokenIdentifier> token = fromProto(
+        proto.getHeader().getBaseHeader().getToken());
+
+    opReadBlock(in, b, proto.getOffset(), proto.getLen(),
+        proto.getHeader().getClientName(), token);
+  }
+  /**
+   * Abstract OP_READ_BLOCK method. Read a block.
+   */
+  protected abstract void opReadBlock(DataInputStream in, ExtendedBlock blk,
+      long offset, long length, String client,
+      Token<BlockTokenIdentifier> blockToken) throws IOException;
+  
+  /** Receive OP_WRITE_BLOCK */
+  private void opWriteBlock(DataInputStream in) throws IOException {
+    final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
+    opWriteBlock(in,
+        fromProto(proto.getHeader().getBaseHeader().getBlock()),
+        proto.getPipelineSize(),
+        fromProto(proto.getStage()),
+        proto.getLatestGenerationStamp(),
+        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
+        proto.getHeader().getClientName(),
+        fromProto(proto.getSource()),
+        fromProtos(proto.getTargetsList()),
+        fromProto(proto.getHeader().getBaseHeader().getToken()));
+  }
+
+  /**
+   * Abstract OP_WRITE_BLOCK method. 
+   * Write a block.
+   */
+  protected abstract void opWriteBlock(DataInputStream in, ExtendedBlock blk,
+      int pipelineSize, BlockConstructionStage stage, long newGs,
+      long minBytesRcvd, long maxBytesRcvd, String client, DatanodeInfo src,
+      DatanodeInfo[] targets, Token<BlockTokenIdentifier> blockToken)
+      throws IOException;
+
+  /** Receive {@link Op#TRANSFER_BLOCK} */
+  private void opTransferBlock(DataInputStream in) throws IOException {
+    final OpTransferBlockProto proto =
+      OpTransferBlockProto.parseFrom(vintPrefixed(in));
+
+    opTransferBlock(in,
+        fromProto(proto.getHeader().getBaseHeader().getBlock()),
+        proto.getHeader().getClientName(),
+        fromProtos(proto.getTargetsList()),
+        fromProto(proto.getHeader().getBaseHeader().getToken()));
+  }
+
+  /**
+   * Abstract {@link Op#TRANSFER_BLOCK} method.
+   * For {@link BlockConstructionStage#TRANSFER_RBW}
+   * or {@link BlockConstructionStage#TRANSFER_FINALIZED}.
+   */
+  protected abstract void opTransferBlock(DataInputStream in, ExtendedBlock blk,
+      String client, DatanodeInfo[] targets,
+      Token<BlockTokenIdentifier> blockToken)
+      throws IOException;
+
+  /** Receive OP_REPLACE_BLOCK */
+  private void opReplaceBlock(DataInputStream in) throws IOException {
+    OpReplaceBlockProto proto = OpReplaceBlockProto.parseFrom(vintPrefixed(in));
+
+    opReplaceBlock(in,
+        fromProto(proto.getHeader().getBlock()),
+        proto.getDelHint(),
+        fromProto(proto.getSource()),
+        fromProto(proto.getHeader().getToken()));
+  }
+
+  /**
+   * Abstract OP_REPLACE_BLOCK method.
+   * It is used for balancing purpose; send to a destination
+   */
+  protected abstract void opReplaceBlock(DataInputStream in,
+      ExtendedBlock blk, String delHint, DatanodeInfo src,
+      Token<BlockTokenIdentifier> blockToken) throws IOException;
+
+  /** Receive OP_COPY_BLOCK */
+  private void opCopyBlock(DataInputStream in) throws IOException {
+    OpCopyBlockProto proto = OpCopyBlockProto.parseFrom(vintPrefixed(in));
+    
+    opCopyBlock(in,
+        fromProto(proto.getHeader().getBlock()),
+        fromProto(proto.getHeader().getToken()));
+  }
+
+  /**
+   * Abstract OP_COPY_BLOCK method. It is used for balancing purpose; send to
+   * a proxy source.
+   */
+  protected abstract void opCopyBlock(DataInputStream in, ExtendedBlock blk,
+      Token<BlockTokenIdentifier> blockToken)
+      throws IOException;
+
+  /** Receive OP_BLOCK_CHECKSUM */
+  private void opBlockChecksum(DataInputStream in) throws IOException {
+    OpBlockChecksumProto proto = OpBlockChecksumProto.parseFrom(vintPrefixed(in));
+    
+    opBlockChecksum(in,
+        fromProto(proto.getHeader().getBlock()),
+        fromProto(proto.getHeader().getToken()));
+  }
+
+  /**
+   * Abstract OP_BLOCK_CHECKSUM method.
+   * Get the checksum of a block 
+   */
+  protected abstract void opBlockChecksum(DataInputStream in,
+      ExtendedBlock blk, Token<BlockTokenIdentifier> blockToken)
+      throws IOException;
+}

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java?rev=1134869&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java Sun Jun 12 06:34:37 2011
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+
+/**
+ * The setting of replace-datanode-on-failure feature.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public enum ReplaceDatanodeOnFailure {
+  /** The feature is disabled in the entire site. */
+  DISABLE,
+  /** Never add a new datanode. */
+  NEVER,
+  /**
+   * DEFAULT policy:
+   *   Let r be the replication number.
+   *   Let n be the number of existing datanodes.
+   *   Add a new datanode only if r >= 3 and either
+   *   (1) floor(r/2) >= n; or
+   *   (2) r > n and the block is hflushed/appended.
+   */
+  DEFAULT,
+  /** Always add a new datanode when an existing datanode is removed. */
+  ALWAYS;
+
+  /** Check if the feature is enabled. */
+  public void checkEnabled() {
+    if (this == DISABLE) {
+      throw new UnsupportedOperationException(
+          "This feature is disabled.  Please refer to "
+          + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY
+          + " configuration property.");
+    }
+  }
+
+  /** Is the policy satisfied? */
+  public boolean satisfy(
+      final short replication, final DatanodeInfo[] existings,
+      final boolean isAppend, final boolean isHflushed) {
+    final int n = existings == null? 0: existings.length;
+    if (n == 0 || n >= replication) {
+      //don't need to add datanode for any policy.
+      return false;
+    } else if (this == DISABLE || this == NEVER) {
+      return false;
+    } else if (this == ALWAYS) {
+      return true;
+    } else {
+      //DEFAULT
+      if (replication < 3) {
+        return false;
+      } else {
+        if (n <= (replication/2)) {
+          return true;
+        } else {
+          return isAppend || isHflushed;
+        }
+      }
+    }
+  }
+
+  /** Get the setting from configuration. */
+  public static ReplaceDatanodeOnFailure get(final Configuration conf) {
+    final boolean enabled = conf.getBoolean(
+        DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY,
+        DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_DEFAULT);
+    if (!enabled) {
+      return DISABLE;
+    }
+
+    final String policy = conf.get(
+        DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY,
+        DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_DEFAULT);
+    for(int i = 1; i < values().length; i++) {
+      final ReplaceDatanodeOnFailure rdof = values()[i];
+      if (rdof.name().equalsIgnoreCase(policy)) {
+        return rdof;
+      }
+    }
+    throw new HadoopIllegalArgumentException("Illegal configuration value for "
+        + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY
+        + ": " + policy);
+  }
+
+  /** Write the setting to configuration. */
+  public void write(final Configuration conf) {
+    conf.setBoolean(
+        DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY,
+        this != DISABLE);
+    conf.set(
+        DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY,
+        name());
+  }
+}
\ No newline at end of file

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java?rev=1134869&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java Sun Jun 12 06:34:37 2011
@@ -0,0 +1,152 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+
+import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.toProto;
+import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.toProtos;
+import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.toProto;
+
+import java.io.DataOutput;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.security.token.Token;
+
+import com.google.protobuf.Message;
+
+/** Sender */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class Sender {
+  /** Initialize a operation. */
+  private static void op(final DataOutput out, final Op op
+      ) throws IOException {
+    out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
+    op.write(out);
+  }
+
+  private static void send(final DataOutputStream out, final Op opcode,
+      final Message proto) throws IOException {
+    op(out, opcode);
+    proto.writeDelimitedTo(out);
+    out.flush();
+  }
+
+  /** Send OP_READ_BLOCK */
+  public static void opReadBlock(DataOutputStream out, ExtendedBlock blk,
+      long blockOffset, long blockLen, String clientName,
+      Token<BlockTokenIdentifier> blockToken)
+      throws IOException {
+
+    OpReadBlockProto proto = OpReadBlockProto.newBuilder()
+      .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken))
+      .setOffset(blockOffset)
+      .setLen(blockLen)
+      .build();
+
+    send(out, Op.READ_BLOCK, proto);
+  }
+  
+
+  /** Send OP_WRITE_BLOCK */
+  public static void opWriteBlock(DataOutputStream out, ExtendedBlock blk,
+      int pipelineSize, BlockConstructionStage stage, long newGs,
+      long minBytesRcvd, long maxBytesRcvd, String client, DatanodeInfo src,
+      DatanodeInfo[] targets, Token<BlockTokenIdentifier> blockToken)
+      throws IOException {
+    ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(blk, client,
+        blockToken);
+    
+    OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
+      .setHeader(header)
+      .addAllTargets(
+          toProtos(targets, 1))
+      .setStage(toProto(stage))
+      .setPipelineSize(pipelineSize)
+      .setMinBytesRcvd(minBytesRcvd)
+      .setMaxBytesRcvd(maxBytesRcvd)
+      .setLatestGenerationStamp(newGs);
+    
+    if (src != null) {
+      proto.setSource(toProto(src));
+    }
+
+    send(out, Op.WRITE_BLOCK, proto.build());
+  }
+
+  /** Send {@link Op#TRANSFER_BLOCK} */
+  public static void opTransferBlock(DataOutputStream out, ExtendedBlock blk,
+      String client, DatanodeInfo[] targets,
+      Token<BlockTokenIdentifier> blockToken) throws IOException {
+    
+    OpTransferBlockProto proto = OpTransferBlockProto.newBuilder()
+      .setHeader(DataTransferProtoUtil.buildClientHeader(
+          blk, client, blockToken))
+      .addAllTargets(toProtos(targets, 0))
+      .build();
+
+    send(out, Op.TRANSFER_BLOCK, proto);
+  }
+
+  /** Send OP_REPLACE_BLOCK */
+  public static void opReplaceBlock(DataOutputStream out,
+      ExtendedBlock blk, String delHint, DatanodeInfo src,
+      Token<BlockTokenIdentifier> blockToken) throws IOException {
+    OpReplaceBlockProto proto = OpReplaceBlockProto.newBuilder()
+      .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
+      .setDelHint(delHint)
+      .setSource(toProto(src))
+      .build();
+    
+    send(out, Op.REPLACE_BLOCK, proto);
+  }
+
+  /** Send OP_COPY_BLOCK */
+  public static void opCopyBlock(DataOutputStream out, ExtendedBlock blk,
+      Token<BlockTokenIdentifier> blockToken)
+      throws IOException {
+    OpCopyBlockProto proto = OpCopyBlockProto.newBuilder()
+      .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
+      .build();
+    
+    send(out, Op.COPY_BLOCK, proto);
+  }
+
+  /** Send OP_BLOCK_CHECKSUM */
+  public static void opBlockChecksum(DataOutputStream out, ExtendedBlock blk,
+      Token<BlockTokenIdentifier> blockToken)
+      throws IOException {
+    OpBlockChecksumProto proto = OpBlockChecksumProto.newBuilder()
+      .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
+      .build();
+    
+    send(out, Op.BLOCK_CHECKSUM, proto);
+  }
+}

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1134869&r1=1134868&r2=1134869&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java Sun Jun 12 06:34:37 2011
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.balancer;
 
+import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
+
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
 import java.io.DataInputStream;
@@ -51,12 +53,11 @@ import org.apache.hadoop.conf.Configured
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@@ -74,8 +75,6 @@ import org.apache.hadoop.util.StringUtil
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
-import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
-
 /** <p>The balancer is a tool that balances disk space usage on an HDFS cluster
  * when some datanodes become full or when new empty nodes join the cluster.
  * The tool is deployed as an application program that can be run by the 
@@ -349,7 +348,7 @@ public class Balancer {
     private void sendRequest(DataOutputStream out) throws IOException {
       final ExtendedBlock eb = new ExtendedBlock(nnc.blockpoolID, block.getBlock());
       final Token<BlockTokenIdentifier> accessToken = nnc.getAccessToken(eb);
-      DataTransferProtocol.Sender.opReplaceBlock(out, eb, source.getStorageID(), 
+      Sender.opReplaceBlock(out, eb, source.getStorageID(), 
           proxySource.getDatanode(), accessToken);
     }
     

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1134869&r1=1134868&r2=1134869&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Sun Jun 12 06:34:37 2011
@@ -34,13 +34,13 @@ import java.util.zip.Checksum;
 
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.fs.FSOutputSummer;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java?rev=1134869&r1=1134868&r2=1134869&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java Sun Jun 12 06:34:37 2011
@@ -33,7 +33,7 @@ import org.apache.commons.logging.Log;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.SocketOutputStream;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1134869&r1=1134868&r2=1134869&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Sun Jun 12 06:34:37 2011
@@ -18,7 +18,32 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.server.common.Util.now;
 
 import java.io.BufferedOutputStream;
@@ -66,16 +91,17 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
+import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager;
@@ -1947,7 +1973,7 @@ public class DataNode extends Configured
               EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE));
         }
 
-        DataTransferProtocol.Sender.opWriteBlock(out,
+        Sender.opWriteBlock(out,
             b, 0, stage, 0, 0, 0, clientname, srcNode, targets, accessToken);
 
         // send data & checksum