You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ha...@apache.org on 2009/08/12 18:37:15 UTC

svn commit: r803588 - in /hadoop/hdfs/branches/HDFS-265: ./ ivy/ lib/ src/contrib/hdfsproxy/ src/docs/src/documentation/content/xdocs/ src/docs/src/documentation/resources/images/ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hd...

Author: hairong
Date: Wed Aug 12 16:37:13 2009
New Revision: 803588

URL: http://svn.apache.org/viewvc?rev=803588&view=rev
Log:
Merge -r 800618:803337 from hdfs trunk to move the change to the append trunk

Added:
    hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/faultinject_framework.xml
      - copied unchanged from r803337, hadoop/hdfs/trunk/src/docs/src/documentation/content/xdocs/faultinject_framework.xml
    hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/resources/images/FI-framework.gif
      - copied unchanged from r803337, hadoop/hdfs/trunk/src/docs/src/documentation/resources/images/FI-framework.gif
    hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/resources/images/FI-framework.odg
      - copied unchanged from r803337, hadoop/hdfs/trunk/src/docs/src/documentation/resources/images/FI-framework.odg
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/common/ThreadLocalDateFormat.java
      - copied unchanged from r803337, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/ThreadLocalDateFormat.java
    hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java
      - copied unchanged from r803337, hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java
    hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/fi/FiTestUtil.java
      - copied unchanged from r803337, hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/fi/FiTestUtil.java
    hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/protocol/
      - copied from r803337, hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/protocol/
    hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/protocol/ClientProtocolAspects.aj
      - copied unchanged from r803337, hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/protocol/ClientProtocolAspects.aj
    hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataTransferProtocolAspects.aj
      - copied unchanged from r803337, hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataTransferProtocolAspects.aj
    hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
      - copied unchanged from r803337, hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestThreadLocalDateFormat.java
      - copied unchanged from r803337, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestThreadLocalDateFormat.java
Modified:
    hadoop/hdfs/branches/HDFS-265/   (props changed)
    hadoop/hdfs/branches/HDFS-265/CHANGES.txt
    hadoop/hdfs/branches/HDFS-265/build.xml   (props changed)
    hadoop/hdfs/branches/HDFS-265/ivy.xml
    hadoop/hdfs/branches/HDFS-265/ivy/ivysettings.xml
    hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-0.21.0-dev.jar
    hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-test-0.21.0-dev.jar
    hadoop/hdfs/branches/HDFS-265/src/contrib/hdfsproxy/   (props changed)
    hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/site.xml
    hadoop/hdfs/branches/HDFS-265/src/java/   (props changed)
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java   (props changed)
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
    hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs-with-mr/   (props changed)
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java
    hadoop/hdfs/branches/HDFS-265/src/webapps/datanode/   (props changed)
    hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-265/src/webapps/secondary/   (props changed)

Propchange: hadoop/hdfs/branches/HDFS-265/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Aug 12 16:37:13 2009
@@ -1,2 +1,2 @@
 /hadoop/core/branches/branch-0.19/hdfs:713112
-/hadoop/hdfs/trunk:796829-800617
+/hadoop/hdfs/trunk:796829-800617,800619-803337

Modified: hadoop/hdfs/branches/HDFS-265/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/CHANGES.txt?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-265/CHANGES.txt Wed Aug 12 16:37:13 2009
@@ -13,13 +13,14 @@
 
     HDFS-461. Tool to analyze file size distribution in HDFS. (shv)
 
-    HDFS-446. Improvements to Offline Image Viewer. (Jakob Homan via shv)
-
   IMPROVEMENTS
 
     HDFS-381. Remove blocks from DataNode maps when corresponding file
     is deleted. (Suresh Srinivas via rangadi)
 
+    HDFS-377. Separate codes which implement DataTransferProtocol.
+    (szetszwo)
+
     HDFS-396. NameNode image and edits directories are specified as URIs.
     (Luca Telloli via rangadi)
 
@@ -49,9 +50,14 @@
     only by the run-test-*-faul-inject targets.  (Konstantin Boudnik via
     szetszwo)
 
+    HDFS-446. Improvements to Offline Image Viewer. (Jakob Homan via shv)
+
     HADOOP-6160. Fix releaseaudit target to run on specific directories.
     (gkesavan)
 
+    HDFS-501. Use enum to define the constants in DataTransferProtocol.
+    (szetszwo)
+
     HDFS-508. Factor out BlockInfo from BlocksMap. (shv)
 
     HDFS-510. Rename DatanodeBlockInfo to be ReplicaInfo.
@@ -70,7 +76,24 @@
     HDFS-504. Update the modification time of a file when the file 
     is closed. (Chun Zhang via dhruba)
 
+    HDFS-498. Add development guide and documentation for the fault injection
+    framework.  (Konstantin Boudnik via szetszwo)
+
+    HDFS-524. Further DataTransferProtocol code refactoring.  (szetszwo)
+
+    HDFS-527. Remove/deprecate unnecessary DFSClient constructors.  (szetszwo)
+
+    HDFS-529. Use BlockInfo instead of Block to avoid redundant block searches
+    in BlockManager. (shv)
+
+    HDFS-530. Refactor TestFileAppend* to remove code duplication.
+    (Konstantin Boudnik via szetszwo)
+
+    HDFS-451. Add fault injection tests, Pipeline_Fi_06,07,14,15, for
+    DataTransferProtocol.  (szetszwo)
+
   BUG FIXES
+
     HDFS-76. Better error message to users when commands fail because of 
     lack of quota. Allow quota to be set even if the limit is lower than
     current consumption. (Boris Shkolnik via rangadi)
@@ -78,9 +101,6 @@
     HADOOP-4687. HDFS is split from Hadoop Core. It is a subproject under 
     Hadoop (Owen O'Malley)
 
-    HDFS-377. Separate codes which implement DataTransferProtocol.
-    (szetszwo)
-
     HADOOP-6096. Fix Eclipse project and classpath files following project
     split. (tomwhite)
 
@@ -117,9 +137,6 @@
     HDFS-484. Fix bin-package and package target to package jar files.
     (gkesavan)
 
-    HDFS-501. Use enum to define the constants in DataTransferProtocol.
-    (szetszwo)
-
     HDFS-490. Eliminate the deprecated warnings introduced by H-5438.
     (He Yongqiang via szetszwo)
 
@@ -129,9 +146,15 @@
     HDFS-167. Fix a bug in DFSClient that caused infinite retries on write.
     (Bill Zeller via szetszwo)
 
+    HDFS-534. Include avro in ivy.  (szetszwo)
+
 Release 0.20.1 - Unreleased
 
   IMPROVEMENTS
 
     HDFS-438. Improve help message for space quota command. (Raghu Angadi)
 
+  BUG FIXES
+
+    HDFS-525. The SimpleDateFormat object in ListPathsServlet is not thread
+    safe.  (Suresh Srinivas via szetszwo)

Propchange: hadoop/hdfs/branches/HDFS-265/build.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Aug 12 16:37:13 2009
@@ -1,3 +1,3 @@
 /hadoop/core/branches/branch-0.19/hdfs/build.xml:713112
 /hadoop/core/trunk/build.xml:779102
-/hadoop/hdfs/trunk/build.xml:796829-800617
+/hadoop/hdfs/trunk/build.xml:796829-800617,800619-803337

Modified: hadoop/hdfs/branches/HDFS-265/ivy.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/ivy.xml?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/ivy.xml (original)
+++ hadoop/hdfs/branches/HDFS-265/ivy.xml Wed Aug 12 16:37:13 2009
@@ -252,6 +252,10 @@
       name="slf4j-api"
       rev="${slf4j-api.version}"
       conf="common->master"/>
+    <dependency org="org.apache.hadoop"
+      name="avro"
+      rev="1.0.0"
+      conf="common->default"/>
     <dependency org="org.eclipse.jdt"
       name="core"
       rev="${core.version}"

Modified: hadoop/hdfs/branches/HDFS-265/ivy/ivysettings.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/ivy/ivysettings.xml?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/ivy/ivysettings.xml (original)
+++ hadoop/hdfs/branches/HDFS-265/ivy/ivysettings.xml Wed Aug 12 16:37:13 2009
@@ -74,7 +74,7 @@
     rather than look for them online.
 
     -->
-    <module organisation="org.apache.hadoop" name=".*" resolver="internal"/>
+    <module organisation="org.apache.hadoop" name="Hadoop.*" resolver="internal"/>
     <!--until commons cli is external, we need to pull it in from the snapshot repository -if present -->
     <module organisation="org.apache.commons" name=".*" resolver="external-and-snapshots"/>
   </modules>

Modified: hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-0.21.0-dev.jar?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-test-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-test-0.21.0-dev.jar?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
Binary files - no diff available.

Propchange: hadoop/hdfs/branches/HDFS-265/src/contrib/hdfsproxy/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Aug 12 16:37:13 2009
@@ -1,3 +1,3 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/contrib/hdfsproxy:713112
 /hadoop/core/trunk/src/contrib/hdfsproxy:776175-784663
-/hadoop/hdfs/trunk/src/contrib/hdfsproxy:796829-800617
+/hadoop/hdfs/trunk/src/contrib/hdfsproxy:796829-800617,800619-803337

Modified: hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/site.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/site.xml?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/site.xml (original)
+++ hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/site.xml Wed Aug 12 16:37:13 2009
@@ -60,6 +60,9 @@
 		<hdfs_SLG        			label="Synthetic Load Generator Guide"  href="SLG_user_guide.html" />
 		<hdfs_imageviewer						label="Offline Image Viewer Guide"	href="hdfs_imageviewer.html" />
 		<hdfs_libhdfs   				label="C API libhdfs"         						href="libhdfs.html" /> 
+                <docs label="Testing">
+                    <faultinject_framework              label="Fault Injection"                                                     href="faultinject_framework.html" />
+                </docs>
    </docs> 
    
    <docs label="HOD">

Propchange: hadoop/hdfs/branches/HDFS-265/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Aug 12 16:37:13 2009
@@ -1,3 +1,3 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/java:713112
 /hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
-/hadoop/hdfs/trunk/src/java:796829-800617
+/hadoop/hdfs/trunk/src/java:796829-800617,800619-803337

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java Wed Aug 12 16:37:13 2009
@@ -127,8 +127,8 @@
   public static final Log LOG = LogFactory.getLog(DFSClient.class);
   public static final int MAX_BLOCK_ACQUIRE_FAILURES = 3;
   private static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
-  private ClientProtocol namenode;
-  private ClientProtocol rpcNamenode;
+  private final ClientProtocol namenode;
+  private final ClientProtocol rpcNamenode;
   final UnixUserGroupInformation ugi;
   volatile boolean clientRunning = true;
   Random r = new Random();
@@ -206,41 +206,41 @@
         ClientDatanodeProtocol.versionID, addr, conf);
   }
         
-  /** 
-   * Create a new DFSClient connected to the default namenode.
+  /**
+   * Same as this(NameNode.getAddress(conf), conf);
+   * @see #DFSClient(InetSocketAddress, Configuration)
+   * @deprecated Deprecated at 0.21
    */
+  @Deprecated
   public DFSClient(Configuration conf) throws IOException {
-    this(NameNode.getAddress(conf), conf, null);
+    this(NameNode.getAddress(conf), conf);
   }
 
-  /** 
-   * Create a new DFSClient connected to the given namenode server.
+  /**
+   * Same as this(nameNodeAddr, conf, null);
+   * @see #DFSClient(InetSocketAddress, Configuration, org.apache.hadoop.fs.FileSystem.Statistics)
+   */
+  public DFSClient(InetSocketAddress nameNodeAddr, Configuration conf
+      ) throws IOException {
+    this(nameNodeAddr, conf, null);
+  }
+
+  /**
+   * Same as this(nameNodeAddr, null, conf, stats);
+   * @see #DFSClient(InetSocketAddress, ClientProtocol, Configuration, org.apache.hadoop.fs.FileSystem.Statistics) 
    */
   public DFSClient(InetSocketAddress nameNodeAddr, Configuration conf,
                    FileSystem.Statistics stats)
     throws IOException {
-    this(conf, stats);
-    this.rpcNamenode = createRPCNamenode(nameNodeAddr, conf, ugi);
-    this.namenode = createNamenode(this.rpcNamenode);
+    this(nameNodeAddr, null, conf, stats);
   }
 
   /** 
-   * Create a new DFSClient connected to the given namenode
-   * and rpcNamenode objects.
-   * 
-   * This constructor was written to allow easy testing of the DFSClient class.
-   * End users will most likely want to use one of the other constructors.
+   * Create a new DFSClient connected to the given nameNodeAddr or rpcNamenode.
+   * Exactly one of nameNodeAddr or rpcNamenode must be null.
    */
-  public DFSClient(ClientProtocol namenode, ClientProtocol rpcNamenode,
-                   Configuration conf, FileSystem.Statistics stats)
-    throws IOException {
-      this(conf, stats);
-      this.namenode = namenode;
-      this.rpcNamenode = rpcNamenode;
-  }
-
-  
-  private DFSClient(Configuration conf, FileSystem.Statistics stats)
+  DFSClient(InetSocketAddress nameNodeAddr, ClientProtocol rpcNamenode,
+      Configuration conf, FileSystem.Statistics stats)
     throws IOException {
     this.conf = conf;
     this.stats = stats;
@@ -271,11 +271,18 @@
     }
     defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
     defaultReplication = (short) conf.getInt("dfs.replication", 3);
-  }
 
-  public DFSClient(InetSocketAddress nameNodeAddr, 
-                   Configuration conf) throws IOException {
-    this(nameNodeAddr, conf, null);
+    if (nameNodeAddr != null && rpcNamenode == null) {
+      this.rpcNamenode = createRPCNamenode(nameNodeAddr, conf, ugi);
+      this.namenode = createNamenode(this.rpcNamenode);
+    } else if (nameNodeAddr == null && rpcNamenode != null) {
+      //This case is used for testing.
+      this.namenode = this.rpcNamenode = rpcNamenode;
+    } else {
+      throw new IllegalArgumentException(
+          "Expecting exactly one of nameNodeAddr and rpcNamenode being null: "
+          + "nameNodeAddr=" + nameNodeAddr + ", rpcNamenode=" + rpcNamenode);
+    }
   }
 
   private void checkOpen() throws IOException {

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java Wed Aug 12 16:37:13 2009
@@ -27,7 +27,6 @@
 import java.net.URISyntaxException;
 import java.net.URL;
 import java.text.ParseException;
-import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.Random;
@@ -45,6 +44,7 @@
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.server.common.ThreadLocalDateFormat;
 import org.apache.hadoop.hdfs.server.namenode.ListPathsServlet;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
@@ -74,7 +74,7 @@
   protected UserGroupInformation ugi; 
   protected final Random ran = new Random();
 
-  protected static final SimpleDateFormat df = ListPathsServlet.df;
+  protected static final ThreadLocalDateFormat df = ListPathsServlet.df;
 
   @Override
   public void initialize(URI name, Configuration conf) throws IOException {

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java Wed Aug 12 16:37:13 2009
@@ -249,8 +249,8 @@
 
   /** Receiver */
   public static abstract class Receiver {
-    /** Initialize a operation. */
-    public final Op op(DataInputStream in) throws IOException {
+    /** Read an Op.  It also checks protocol version. */
+    protected final Op readOp(DataInputStream in) throws IOException {
       final short version = in.readShort();
       if (version != DATA_TRANSFER_VERSION) {
         throw new IOException( "Version Mismatch" );
@@ -258,8 +258,32 @@
       return Op.read(in);
     }
 
+    /** Process op by the corresponding method. */
+    protected final void processOp(Op op, DataInputStream in
+        ) throws IOException {
+      switch(op) {
+      case READ_BLOCK:
+        opReadBlock(in);
+        break;
+      case WRITE_BLOCK:
+        opWriteBlock(in);
+        break;
+      case REPLACE_BLOCK:
+        opReplaceBlock(in);
+        break;
+      case COPY_BLOCK:
+        opCopyBlock(in);
+        break;
+      case BLOCK_CHECKSUM:
+        opBlockChecksum(in);
+        break;
+      default:
+        throw new IOException("Unknown op " + op + " in data stream");
+      }
+    }
+
     /** Receive OP_READ_BLOCK */
-    public final void opReadBlock(DataInputStream in) throws IOException {
+    private void opReadBlock(DataInputStream in) throws IOException {
       final long blockId = in.readLong();          
       final long blockGs = in.readLong();
       final long offset = in.readLong();
@@ -270,13 +294,16 @@
       opReadBlock(in, blockId, blockGs, offset, length, client, accesstoken);
     }
 
-    /** Abstract OP_READ_BLOCK method. */
-    public abstract void opReadBlock(DataInputStream in,
+    /**
+     * Abstract OP_READ_BLOCK method.
+     * Read a block.
+     */
+    protected abstract void opReadBlock(DataInputStream in,
         long blockId, long blockGs, long offset, long length,
         String client, AccessToken accesstoken) throws IOException;
     
     /** Receive OP_WRITE_BLOCK */
-    public final void opWriteBlock(DataInputStream in) throws IOException {
+    private void opWriteBlock(DataInputStream in) throws IOException {
       final long blockId = in.readLong();          
       final long blockGs = in.readLong();
       final int pipelineSize = in.readInt(); // num of datanodes in entire pipeline
@@ -298,14 +325,17 @@
           client, src, targets, accesstoken);
     }
 
-    /** Abstract OP_WRITE_BLOCK method. */
-    public abstract void opWriteBlock(DataInputStream in,
+    /**
+     * Abstract OP_WRITE_BLOCK method. 
+     * Write a block.
+     */
+    protected abstract void opWriteBlock(DataInputStream in,
         long blockId, long blockGs, int pipelineSize, boolean isRecovery,
         String client, DatanodeInfo src, DatanodeInfo[] targets,
         AccessToken accesstoken) throws IOException;
 
     /** Receive OP_REPLACE_BLOCK */
-    public final void opReplaceBlock(DataInputStream in) throws IOException {
+    private void opReplaceBlock(DataInputStream in) throws IOException {
       final long blockId = in.readLong();          
       final long blockGs = in.readLong();
       final String sourceId = Text.readString(in); // read del hint
@@ -315,13 +345,16 @@
       opReplaceBlock(in, blockId, blockGs, sourceId, src, accesstoken);
     }
 
-    /** Abstract OP_REPLACE_BLOCK method. */
-    public abstract void opReplaceBlock(DataInputStream in,
+    /**
+     * Abstract OP_REPLACE_BLOCK method.
+     * It is used for balancing purpose; send to a destination
+     */
+    protected abstract void opReplaceBlock(DataInputStream in,
         long blockId, long blockGs, String sourceId, DatanodeInfo src,
         AccessToken accesstoken) throws IOException;
 
     /** Receive OP_COPY_BLOCK */
-    public final void opCopyBlock(DataInputStream in) throws IOException {
+    private void opCopyBlock(DataInputStream in) throws IOException {
       final long blockId = in.readLong();          
       final long blockGs = in.readLong();
       final AccessToken accesstoken = readAccessToken(in);
@@ -329,12 +362,15 @@
       opCopyBlock(in, blockId, blockGs, accesstoken);
     }
 
-    /** Abstract OP_COPY_BLOCK method. */
-    public abstract void opCopyBlock(DataInputStream in,
+    /**
+     * Abstract OP_COPY_BLOCK method.
+     * It is used for balancing purpose; send to a proxy source.
+     */
+    protected abstract void opCopyBlock(DataInputStream in,
         long blockId, long blockGs, AccessToken accesstoken) throws IOException;
 
     /** Receive OP_BLOCK_CHECKSUM */
-    public final void opBlockChecksum(DataInputStream in) throws IOException {
+    private void opBlockChecksum(DataInputStream in) throws IOException {
       final long blockId = in.readLong();          
       final long blockGs = in.readLong();
       final AccessToken accesstoken = readAccessToken(in);
@@ -342,8 +378,11 @@
       opBlockChecksum(in, blockId, blockGs, accesstoken);
     }
 
-    /** Abstract OP_BLOCK_CHECKSUM method. */
-    public abstract void opBlockChecksum(DataInputStream in,
+    /**
+     * Abstract OP_BLOCK_CHECKSUM method.
+     * Get the checksum of a block 
+     */
+    protected abstract void opBlockChecksum(DataInputStream in,
         long blockId, long blockGs, AccessToken accesstoken) throws IOException;
 
     /** Read an AccessToken */

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Wed Aug 12 16:37:13 2009
@@ -77,7 +77,7 @@
   private String clientName;
   DatanodeInfo srcDataNode = null;
   private Checksum partialCrc = null;
-  private DataNode datanode = null;
+  private final DataNode datanode;
 
   BlockReceiver(Block block, DataInputStream in, String inAddr,
                 String myAddr, boolean isRecovery, String clientName, 
@@ -128,6 +128,9 @@
     }
   }
 
+  /** Return the datanode object. */
+  DataNode getDataNode() {return datanode;}
+
   /**
    * close files.
    */

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Wed Aug 12 16:37:13 2009
@@ -490,6 +490,11 @@
     return myMetrics;
   }
   
+  /** Return DatanodeRegistration */
+  public DatanodeRegistration getDatanodeRegistration() {
+    return dnRegistration;
+  }
+
   /**
    * Return the namenode's identifier
    */

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Wed Aug 12 16:37:13 2009
@@ -43,6 +43,8 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
+import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessToken;
 import org.apache.hadoop.security.AccessTokenHandler;
@@ -57,24 +59,34 @@
   public static final Log LOG = DataNode.LOG;
   static final Log ClientTraceLog = DataNode.ClientTraceLog;
   
-  Socket s;
-  final String remoteAddress; // address of remote side
-  final String localAddress;  // local address of this daemon
-  DataNode datanode;
-  DataXceiverServer dataXceiverServer;
+  private final Socket s;
+  private final boolean isLocal; //is a local connection?
+  private final String remoteAddress; // address of remote side
+  private final String localAddress;  // local address of this daemon
+  private final DataNode datanode;
+  private final DataXceiverServer dataXceiverServer;
+
+  private long opStartTime; //the start time of receiving an Op
   
   public DataXceiver(Socket s, DataNode datanode, 
       DataXceiverServer dataXceiverServer) {
-    
     this.s = s;
+    this.isLocal = s.getInetAddress().equals(s.getLocalAddress());
     this.datanode = datanode;
     this.dataXceiverServer = dataXceiverServer;
     dataXceiverServer.childSockets.put(s, s);
     remoteAddress = s.getRemoteSocketAddress().toString();
     localAddress = s.getLocalSocketAddress().toString();
-    LOG.debug("Number of active connections is: " + datanode.getXceiverCount());
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Number of active connections is: "
+          + datanode.getXceiverCount());
+    }
   }
 
+  /** Return the datanode object. */
+  DataNode getDataNode() {return datanode;}
+
   /**
    * Read/write data from/to the DataXceiveServer.
    */
@@ -84,8 +96,8 @@
       in = new DataInputStream(
           new BufferedInputStream(NetUtils.getInputStream(s), 
                                   SMALL_BUFFER_SIZE));
-      final DataTransferProtocol.Op op = op(in);
-      boolean local = s.getInetAddress().equals(s.getLocalAddress());
+      final DataTransferProtocol.Op op = readOp(in);
+
       // Make sure the xciver count is not exceeded
       int curXceiverCount = datanode.getXceiverCount();
       if (curXceiverCount > dataXceiverServer.maxXceiverCount) {
@@ -93,45 +105,16 @@
                               + " exceeds the limit of concurrent xcievers "
                               + dataXceiverServer.maxXceiverCount);
       }
-      long startTime = DataNode.now();
-      switch ( op ) {
-      case READ_BLOCK:
-        opReadBlock(in);
-        datanode.myMetrics.readBlockOp.inc(DataNode.now() - startTime);
-        if (local)
-          datanode.myMetrics.readsFromLocalClient.inc();
-        else
-          datanode.myMetrics.readsFromRemoteClient.inc();
-        break;
-      case WRITE_BLOCK:
-        opWriteBlock(in);
-        datanode.myMetrics.writeBlockOp.inc(DataNode.now() - startTime);
-        if (local)
-          datanode.myMetrics.writesFromLocalClient.inc();
-        else
-          datanode.myMetrics.writesFromRemoteClient.inc();
-        break;
-      case REPLACE_BLOCK: // for balancing purpose; send to a destination
-        opReplaceBlock(in);
-        datanode.myMetrics.replaceBlockOp.inc(DataNode.now() - startTime);
-        break;
-      case COPY_BLOCK:
-            // for balancing purpose; send to a proxy source
-        opCopyBlock(in);
-        datanode.myMetrics.copyBlockOp.inc(DataNode.now() - startTime);
-        break;
-      case BLOCK_CHECKSUM: //get the checksum of a block
-        opBlockChecksum(in);
-        datanode.myMetrics.blockChecksumOp.inc(DataNode.now() - startTime);
-        break;
-      default:
-        throw new IOException("Unknown opcode " + op + " in data stream");
-      }
+
+      opStartTime = DataNode.now();
+      processOp(op, in);
     } catch (Throwable t) {
       LOG.error(datanode.dnRegistration + ":DataXceiver",t);
     } finally {
-      LOG.debug(datanode.dnRegistration + ":Number of active connections is: "
-                               + datanode.getXceiverCount());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(datanode.dnRegistration + ":Number of active connections is: "
+            + datanode.getXceiverCount());
+      }
       IOUtils.closeStream(in);
       IOUtils.closeSocket(s);
       dataXceiverServer.childSockets.remove(s);
@@ -142,7 +125,7 @@
    * Read a block from the disk.
    */
   @Override
-  public void opReadBlock(DataInputStream in,
+  protected void opReadBlock(DataInputStream in,
       long blockId, long blockGs, long startOffset, long length,
       String clientName, AccessToken accessToken) throws IOException {
     final Block block = new Block(blockId, 0 , blockGs);
@@ -213,13 +196,18 @@
       IOUtils.closeStream(out);
       IOUtils.closeStream(blockSender);
     }
+
+    //update metrics
+    updateDuration(datanode.myMetrics.readBlockOp);
+    updateCounter(datanode.myMetrics.readsFromLocalClient,
+                  datanode.myMetrics.readsFromRemoteClient);
   }
 
   /**
    * Write a block to disk.
    */
   @Override
-  public void opWriteBlock(DataInputStream in, long blockId, long blockGs,
+  protected void opWriteBlock(DataInputStream in, long blockId, long blockGs,
       int pipelineSize, boolean isRecovery,
       String client, DatanodeInfo srcDataNode, DatanodeInfo[] targets,
       AccessToken accessToken) throws IOException {
@@ -377,13 +365,18 @@
       IOUtils.closeSocket(mirrorSock);
       IOUtils.closeStream(blockReceiver);
     }
+
+    //update metrics
+    updateDuration(datanode.myMetrics.writeBlockOp);
+    updateCounter(datanode.myMetrics.writesFromLocalClient,
+                  datanode.myMetrics.writesFromRemoteClient);
   }
 
   /**
    * Get block checksum (MD5 of CRC32).
    */
   @Override
-  public void opBlockChecksum(DataInputStream in,
+  protected void opBlockChecksum(DataInputStream in,
       long blockId, long blockGs, AccessToken accessToken) throws IOException {
     final Block block = new Block(blockId, 0 , blockGs);
     DataOutputStream out = new DataOutputStream(NetUtils.getOutputStream(s,
@@ -433,13 +426,16 @@
       IOUtils.closeStream(checksumIn);
       IOUtils.closeStream(metadataIn);
     }
+
+    //update metrics
+    updateDuration(datanode.myMetrics.blockChecksumOp);
   }
 
   /**
    * Read a block from the disk and then sends it to a destination.
    */
   @Override
-  public void opCopyBlock(DataInputStream in,
+  protected void opCopyBlock(DataInputStream in,
       long blockId, long blockGs, AccessToken accessToken) throws IOException {
     // Read in the header
     Block block = new Block(blockId, 0, blockGs);
@@ -499,6 +495,9 @@
       IOUtils.closeStream(reply);
       IOUtils.closeStream(blockSender);
     }
+
+    //update metrics    
+    updateDuration(datanode.myMetrics.copyBlockOp);
   }
 
   /**
@@ -506,7 +505,7 @@
    * remove the copy from the source.
    */
   @Override
-  public void opReplaceBlock(DataInputStream in,
+  protected void opReplaceBlock(DataInputStream in,
       long blockId, long blockGs, String sourceID, DatanodeInfo proxySource,
       AccessToken accessToken) throws IOException {
     /* read header */
@@ -606,8 +605,20 @@
       IOUtils.closeStream(blockReceiver);
       IOUtils.closeStream(proxyReply);
     }
+
+    //update metrics
+    updateDuration(datanode.myMetrics.replaceBlockOp);
   }
-  
+
+  private void updateDuration(MetricsTimeVaryingRate mtvr) {
+    mtvr.inc(DataNode.now() - opStartTime);
+  }
+
+  private void updateCounter(MetricsTimeVaryingInt localCounter,
+      MetricsTimeVaryingInt remoteCounter) {
+    (isLocal? localCounter: remoteCounter).inc();
+  }
+
   /**
    * Utility function for sending a response.
    * @param s socket to write to

Propchange: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Aug 12 16:37:13 2009
@@ -1,2 +1,3 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java:713112
 /hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java:776175-785643,785929-786278
+/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:800619-803337

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java Wed Aug 12 16:37:13 2009
@@ -893,11 +893,11 @@
             } else {
               // new replica is larger in size than existing block.
               // Mark pre-existing replicas as corrupt.
-              int numNodes = blocksMap.numNodes(block);
+              int numNodes = storedBlock.numNodes();
               int count = 0;
               DatanodeDescriptor nodes[] = new DatanodeDescriptor[numNodes];
-              Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block);
-              for (; it != null && it.hasNext(); ) {
+              Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(storedBlock);
+              while (it.hasNext()) {
                 DatanodeDescriptor dd = it.next();
                 if (!dd.equals(node)) {
                   nodes[count++] = dd;
@@ -1262,9 +1262,9 @@
     return blocksMap.size() - (int)pendingDeletionBlocksCount;
   }
 
-  DatanodeDescriptor[] getNodes(Block block) {
+  DatanodeDescriptor[] getNodes(BlockInfo block) {
     DatanodeDescriptor[] nodes =
-      new DatanodeDescriptor[blocksMap.numNodes(block)];
+      new DatanodeDescriptor[block.numNodes()];
     Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block);
     for (int i = 0; it != null && it.hasNext(); i++) {
       nodes[i] = it.next();

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java Wed Aug 12 16:37:13 2009
@@ -108,9 +108,20 @@
     return map.get(b);
   }
 
-  /** Returned Iterator does not support. */
+  /**
+   * Searches for the block in the BlocksMap and 
+   * returns Iterator that iterates through the nodes the block belongs to.
+   */
   Iterator<DatanodeDescriptor> nodeIterator(Block b) {
-    return new NodeIterator(map.get(b));
+    return nodeIterator(map.get(b));
+  }
+
+  /**
+   * For a block that has already been retrieved from the BlocksMap
+   * returns Iterator that iterates through the nodes the block belongs to.
+   */
+  Iterator<DatanodeDescriptor> nodeIterator(BlockInfo storedBlock) {
+    return new NodeIterator(storedBlock);
   }
 
   /** counts number of containing nodes. Better than using iterator. */

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Aug 12 16:37:13 2009
@@ -1030,13 +1030,16 @@
     synchronized (this) {
       INodeFileUnderConstruction file = (INodeFileUnderConstruction)dir.getFileINode(src);
 
-      Block[] blocks = file.getBlocks();
+      BlockInfo[] blocks = file.getBlocks();
       if (blocks != null && blocks.length > 0) {
-        Block last = blocks[blocks.length-1];
+        BlockInfo last = blocks[blocks.length-1];
+        // this is a redundant search in blocksMap
+        // should be resolved by the new implementation of append
         BlockInfo storedBlock = blockManager.getStoredBlock(last);
+        assert last == storedBlock : "last block should be in the blocksMap";
         if (file.getPreferredBlockSize() > storedBlock.getNumBytes()) {
           long fileLength = file.computeContentSummary().getLength();
-          DatanodeDescriptor[] targets = blockManager.getNodes(last);
+          DatanodeDescriptor[] targets = blockManager.getNodes(storedBlock);
           // remove the replica locations of this block from the node
           for (int i = 0; i < targets.length; i++) {
             targets[i].removeBlock(storedBlock);
@@ -1578,8 +1581,8 @@
       }
       // setup the Inode.targets for the last block from the blockManager
       //
-      Block[] blocks = pendingFile.getBlocks();
-      Block last = blocks[blocks.length-1];
+      BlockInfo[] blocks = pendingFile.getBlocks();
+      BlockInfo last = blocks[blocks.length-1];
       DatanodeDescriptor[] targets = blockManager.getNodes(last);
       pendingFile.setTargets(targets);
     }

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java Wed Aug 12 16:37:13 2009
@@ -19,6 +19,7 @@
 
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.server.common.ThreadLocalDateFormat;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.util.VersionInfo;
@@ -27,14 +28,12 @@
 
 import java.io.IOException;
 import java.io.PrintWriter;
-import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Stack;
 import java.util.TimeZone;
 import java.util.regex.Pattern;
-import java.util.regex.PatternSyntaxException;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
@@ -46,9 +45,9 @@
 public class ListPathsServlet extends DfsServlet {
   /** For java.io.Serializable */
   private static final long serialVersionUID = 1L;
+  public static final ThreadLocalDateFormat df = 
+    new ThreadLocalDateFormat("yyyy-MM-dd'T'HH:mm:ssZ");
 
-  public static final SimpleDateFormat df =
-    new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ");
   static {
     df.setTimeZone(TimeZone.getTimeZone("UTC"));
   }
@@ -163,13 +162,10 @@
         }
         catch(RemoteException re) {re.writeXml(p, doc);}
       }
-    } catch (PatternSyntaxException e) {
-      out.println(e.toString());
-    } finally {
       if (doc != null) {
         doc.endDocument();
       }
-
+    } finally {
       if (out != null) {
         out.close();
       }

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Wed Aug 12 16:37:13 2009
@@ -324,7 +324,7 @@
   
   private void lostFoundMove(FileStatus file, LocatedBlocks blocks)
     throws IOException {
-    DFSClient dfs = new DFSClient(conf);
+    final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf);
     try {
     if (!lfInited) {
       lostFoundInit(dfs);

Modified: hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj Wed Aug 12 16:37:13 2009
@@ -17,15 +17,14 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import java.io.IOException;
+import java.io.OutputStream;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fi.DataTransferTestUtil;
 import org.apache.hadoop.fi.ProbabilityModel;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.util.DiskChecker.*;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.io.DataOutputStream;
+import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 
 /**
  * This aspect takes care about faults injected into datanode.BlockReceiver 
@@ -34,14 +33,20 @@
 public aspect BlockReceiverAspects {
   public static final Log LOG = LogFactory.getLog(BlockReceiverAspects.class);
 
-  pointcut callReceivePacket() :
+  pointcut callReceivePacket(BlockReceiver blockreceiver) :
     call (* OutputStream.write(..))
       && withincode (* BlockReceiver.receivePacket(..))
 // to further limit the application of this aspect a very narrow 'target' can be used as follows
 //  && target(DataOutputStream)
-      && !within(BlockReceiverAspects +);
+      && !within(BlockReceiverAspects +)
+      && this(blockreceiver);
 	
-  before () throws IOException : callReceivePacket () {
+  before(BlockReceiver blockreceiver
+      ) throws IOException : callReceivePacket(blockreceiver) {
+    LOG.info("FI: callReceivePacket");
+    DataTransferTestUtil.getPipelineTest().fiCallReceivePacket.run(
+        blockreceiver.getDataNode());
+
     if (ProbabilityModel.injectCriteria(BlockReceiver.class.getSimpleName())) {
       LOG.info("Before the injection point");
       Thread.dumpStack();

Propchange: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Aug 12 16:37:13 2009
@@ -1,3 +1,3 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
 /hadoop/core/trunk/src/test/hdfs:776175-785643
-/hadoop/hdfs/trunk/src/test/hdfs:796829-800617
+/hadoop/hdfs/trunk/src/test/hdfs:796829-800617,800619-803337

Propchange: hadoop/hdfs/branches/HDFS-265/src/test/hdfs-with-mr/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Aug 12 16:37:13 2009
@@ -1,3 +1,3 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs-with-mr:713112
 /hadoop/core/trunk/src/test/hdfs-with-mr:776175-784663
-/hadoop/hdfs/trunk/src/test/hdfs-with-mr:796829-800617
+/hadoop/hdfs/trunk/src/test/hdfs-with-mr:796829-800617,800619-803337

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java Wed Aug 12 16:37:13 2009
@@ -23,13 +23,12 @@
 import java.util.Random;
 
 import junit.framework.TestCase;
+import junit.framework.Assert;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.*;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -61,7 +60,11 @@
       return r;
     }
   };
-  
+  static final int BLOCK_SIZE = 1024;
+  static final int NUM_BLOCKS = 10;
+  static final int FILE_SIZE = NUM_BLOCKS * BLOCK_SIZE + 1;
+  static long seed = -1;
+
   static int nextInt() {return RANDOM.get().nextInt();}
   static int nextInt(int n) {return RANDOM.get().nextInt(n);}
   static int nextLong() {return RANDOM.get().nextInt();}
@@ -116,4 +119,49 @@
       throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
     }
   }
+
+  /**
+   *  create a buffer that contains the entire test file data.
+   */
+  static byte[] initBuffer(int size) {
+    if (seed == -1)
+      seed = nextLong();
+    return randomBytes(seed, size);
+  }
+
+  /**
+   *  Creates a file but does not close it
+   *  Make sure to call close() on the returned stream
+   *  @throws IOException an exception might be thrown
+   */
+  static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
+      throws IOException {
+    return fileSys.create(name, true,
+        fileSys.getConf().getInt("io.file.buffer.size", 4096),
+        (short) repl, (long) BLOCK_SIZE);
+  }
+
+  /**
+   *  Compare the content of a file created from FileSystem and Path with
+   *  the specified byte[] buffer's content
+   *  @throws IOException an exception might be thrown
+   */
+  static void checkFullFile(FileSystem fs, Path name, int len,
+                            final byte[] compareContent, String message) throws IOException {
+    FSDataInputStream stm = fs.open(name);
+    byte[] actual = new byte[len];
+    stm.readFully(0, actual);
+    checkData(actual, 0, compareContent, message);
+    stm.close();
+  }
+
+  private static void checkData(final byte[] actual, int from,
+                                final byte[] expected, String message) {
+    for (int idx = 0; idx < actual.length; idx++) {
+      Assert.assertEquals(message+" byte "+(from+idx)+" differs. expected "+
+                   expected[from+idx]+" actual "+actual[idx],
+                   expected[from+idx], actual[idx]);
+      actual[idx] = 0;
+    }
+  }
 }
\ No newline at end of file

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java Wed Aug 12 16:37:13 2009
@@ -25,6 +25,7 @@
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.StringUtils;
 
 public class TestAbandonBlock extends junit.framework.TestCase {
@@ -49,7 +50,7 @@
       fout.sync();
   
       //try reading the block by someone
-      DFSClient dfsclient = new DFSClient(CONF);
+      final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF), CONF);
       LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(src, 0, 1);
       LocatedBlock b = blocks.get(0); 
       try {

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java Wed Aug 12 16:37:13 2009
@@ -223,7 +223,7 @@
     conf.setInt("dfs.client.block.write.locateFollowingBlock.retries", 1);
         
     TestNameNode tnn = new TestNameNode(conf);
-    DFSClient client = new DFSClient(tnn, tnn, conf, null);
+    final DFSClient client = new DFSClient(null, tnn, conf, null);
     OutputStream os = client.create("testfile", true);
     os.write(20); // write one random byte
     

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java Wed Aug 12 16:37:13 2009
@@ -26,7 +26,6 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -43,38 +42,15 @@
  * support HDFS appends.
  */
 public class TestFileAppend extends TestCase {
-  static final int blockSize = 1024;
-  static final int numBlocks = 10;
-  static final int fileSize = numBlocks * blockSize + 1;
   boolean simulatedStorage = false;
 
-  private long seed;
-  private byte[] fileContents = null;
-
-  //
-  // create a buffer that contains the entire test file data.
-  //
-  private void initBuffer(int size) {
-    seed = AppendTestUtil.nextLong();
-    fileContents = AppendTestUtil.randomBytes(seed, size);
-  }
-
-  /*
-   * creates a file but does not close it
-   */ 
-  private FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
-    throws IOException {
-    FSDataOutputStream stm = fileSys.create(name, true,
-                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
-                                            (short)repl, (long)blockSize);
-    return stm;
-  }
+  private static byte[] fileContents = null;
 
   //
   // writes to file but does not close it
   //
   private void writeFile(FSDataOutputStream stm) throws IOException {
-    byte[] buffer = AppendTestUtil.randomBytes(seed, fileSize);
+    byte[] buffer = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
     stm.write(buffer);
   }
 
@@ -89,16 +65,16 @@
     while (!done) {
       try {
         Thread.sleep(1000);
-      } catch (InterruptedException e) {}
+      } catch (InterruptedException e) {;}
       done = true;
       BlockLocation[] locations = fileSys.getFileBlockLocations(
-          fileSys.getFileStatus(name), 0, fileSize);
-      if (locations.length < numBlocks) {
+          fileSys.getFileStatus(name), 0, AppendTestUtil.FILE_SIZE);
+      if (locations.length < AppendTestUtil.NUM_BLOCKS) {
         System.out.println("Number of blocks found " + locations.length);
         done = false;
         continue;
       }
-      for (int idx = 0; idx < numBlocks; idx++) {
+      for (int idx = 0; idx < AppendTestUtil.NUM_BLOCKS; idx++) {
         if (locations[idx].getHosts().length < repl) {
           System.out.println("Block index " + idx + " not yet replciated.");
           done = false;
@@ -106,43 +82,24 @@
         }
       }
     }
-    FSDataInputStream stm = fileSys.open(name);
-    byte[] expected = new byte[numBlocks * blockSize];
+    byte[] expected = 
+        new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE];
     if (simulatedStorage) {
       for (int i= 0; i < expected.length; i++) {  
         expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
       }
     } else {
-      for (int i= 0; i < expected.length; i++) {  
-        expected[i] = fileContents[i];
-      }
+      System.arraycopy(fileContents, 0, expected, 0, expected.length);
     }
     // do a sanity check. Read the file
-    byte[] actual = new byte[numBlocks * blockSize];
-    stm.readFully(0, actual);
-    checkData(actual, 0, expected, "Read 1");
-  }
-
-  private void checkFullFile(FileSystem fs, Path name) throws IOException {
-    FSDataInputStream stm = fs.open(name);
-    byte[] actual = new byte[fileSize];
-    stm.readFully(0, actual);
-    checkData(actual, 0, fileContents, "Read 2");
-    stm.close();
+    AppendTestUtil.checkFullFile(fileSys, name,
+        AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE,
+        expected, "Read 1");
   }
 
-  private void checkData(byte[] actual, int from, byte[] expected, String message) {
-    for (int idx = 0; idx < actual.length; idx++) {
-      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
-                   expected[from+idx]+" actual "+actual[idx],
-                   expected[from+idx], actual[idx]);
-      actual[idx] = 0;
-    }
-  }
-
-
   /**
    * Test that copy on write for blocks works correctly
+   * @throws IOException an exception might be thrown
    */
   public void testCopyOnWrite() throws IOException {
     Configuration conf = new Configuration();
@@ -159,7 +116,7 @@
       // create a new file, write to it and close it.
       //
       Path file1 = new Path("/filestatus.dat");
-      FSDataOutputStream stm = createFile(fs, file1, 1);
+      FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
       writeFile(stm);
       stm.close();
 
@@ -178,11 +135,9 @@
       //
       for (int i = 0; i < blocks.size(); i = i + 2) {
         Block b = blocks.get(i).getBlock();
-        FSDataset fsd = dataset;
-        File f = fsd.getFile(b);
+        File f = dataset.getFile(b);
         File link = new File(f.toString() + ".link");
-        System.out.println("Creating hardlink for File " + f + 
-                           " to " + link);
+        System.out.println("Creating hardlink for File " + f + " to " + link);
         HardLink.createHardLink(f, link);
       }
 
@@ -193,7 +148,7 @@
         Block b = blocks.get(i).getBlock();
         System.out.println("testCopyOnWrite detaching block " + b);
         assertTrue("Detaching block " + b + " should have returned true",
-                   dataset.detachBlock(b, 1) == true);
+            dataset.detachBlock(b, 1));
       }
 
       // Since the blocks were already detached earlier, these calls should
@@ -203,7 +158,7 @@
         Block b = blocks.get(i).getBlock();
         System.out.println("testCopyOnWrite detaching block " + b);
         assertTrue("Detaching block " + b + " should have returned false",
-                   dataset.detachBlock(b, 1) == false);
+            !dataset.detachBlock(b, 1));
       }
 
     } finally {
@@ -214,30 +169,31 @@
 
   /**
    * Test a simple flush on a simple HDFS file.
+   * @throws IOException an exception might be thrown
    */
   public void testSimpleFlush() throws IOException {
     Configuration conf = new Configuration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
-    initBuffer(fileSize);
+    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     FileSystem fs = cluster.getFileSystem();
     try {
 
       // create a new file.
       Path file1 = new Path("/simpleFlush.dat");
-      FSDataOutputStream stm = createFile(fs, file1, 1);
+      FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
       System.out.println("Created file simpleFlush.dat");
 
       // write to file
-      int mid = fileSize/2;
+      int mid = AppendTestUtil.FILE_SIZE /2;
       stm.write(fileContents, 0, mid);
       stm.sync();
       System.out.println("Wrote and Flushed first part of file.");
 
       // write the remainder of the file
-      stm.write(fileContents, mid, fileSize - mid);
+      stm.write(fileContents, mid, AppendTestUtil.FILE_SIZE - mid);
       System.out.println("Written second part of file");
       stm.sync();
       stm.sync();
@@ -250,7 +206,8 @@
       System.out.println("Closed file.");
 
       // verify that entire file is good
-      checkFullFile(fs, file1);
+      AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE,
+          fileContents, "Read 2");
 
     } catch (IOException e) {
       System.out.println("Exception :" + e);
@@ -267,36 +224,38 @@
 
   /**
    * Test that file data can be flushed.
+   * @throws IOException an exception might be thrown
    */
   public void testComplexFlush() throws IOException {
     Configuration conf = new Configuration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
-    initBuffer(fileSize);
+    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     FileSystem fs = cluster.getFileSystem();
     try {
 
       // create a new file.
       Path file1 = new Path("/complexFlush.dat");
-      FSDataOutputStream stm = createFile(fs, file1, 1);
+      FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
       System.out.println("Created file complexFlush.dat");
 
       int start = 0;
-      for (start = 0; (start + 29) < fileSize; ) {
+      for (start = 0; (start + 29) < AppendTestUtil.FILE_SIZE; ) {
         stm.write(fileContents, start, 29);
         stm.sync();
         start += 29;
       }
-      stm.write(fileContents, start, fileSize-start);
+      stm.write(fileContents, start, AppendTestUtil.FILE_SIZE -start);
 
       // verify that full blocks are sane
       checkFile(fs, file1, 1);
       stm.close();
 
       // verify that entire file is good
-      checkFullFile(fs, file1);
+      AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE,
+          fileContents, "Read 2");
     } catch (IOException e) {
       System.out.println("Exception :" + e);
       throw e; 

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java Wed Aug 12 16:37:13 2009
@@ -24,7 +24,6 @@
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -56,9 +55,7 @@
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
   }
 
-  static final int blockSize = 1024;
   static final int numBlocks = 5;
-  static final int fileSize = numBlocks * blockSize + 1;
   boolean simulatedStorage = false;
 
   private byte[] fileContents = null;
@@ -73,54 +70,14 @@
   int numAppendsPerThread = 2000;
 ****/
   Workload[] workload = null;
-  ArrayList<Path> testFiles = new ArrayList<Path>();
+  final ArrayList<Path> testFiles = new ArrayList<Path>();
   volatile static boolean globalStatus = true;
 
-  //
-  // create a buffer that contains the entire test file data.
-  //
-  private void initBuffer(int size) {
-    long seed = AppendTestUtil.nextLong();
-    fileContents = AppendTestUtil.randomBytes(seed, size);
-  }
-
-  /*
-   * creates a file but does not close it
-   */ 
-  private FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
-    throws IOException {
-    FSDataOutputStream stm = fileSys.create(name, true,
-                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
-                                            (short)repl, (long)blockSize);
-    return stm;
-  }
-
-  private void checkFile(FileSystem fs, Path name, int len) throws IOException {
-    FSDataInputStream stm = fs.open(name);
-    byte[] actual = new byte[len];
-    stm.readFully(0, actual);
-    checkData(actual, 0, fileContents, "Read 2");
-    stm.close();
-  }
-
-  private void checkFullFile(FileSystem fs, Path name) throws IOException {
-    checkFile(fs, name, fileSize);
-  }
-
-  private void checkData(byte[] actual, int from, byte[] expected, String message) {
-    for (int idx = 0; idx < actual.length; idx++) {
-      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
-                   expected[from+idx]+" actual "+actual[idx],
-                   expected[from+idx], actual[idx]);
-      actual[idx] = 0;
-    }
-  }
-
-
   /**
    * Creates one file, writes a few bytes to it and then closed it.
    * Reopens the same file for appending, write all blocks and then close.
    * Verify that all data exists in file.
+   * @throws IOException an exception might be thrown
    */ 
   public void testSimpleAppend() throws IOException {
     Configuration conf = new Configuration();
@@ -129,7 +86,7 @@
     }
     conf.setInt("dfs.datanode.handler.count", 50);
     conf.setBoolean("dfs.support.append", true);
-    initBuffer(fileSize);
+    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     FileSystem fs = cluster.getFileSystem();
     try {
@@ -137,7 +94,7 @@
 
         // create a new file.
         Path file1 = new Path("/simpleAppend.dat");
-        FSDataOutputStream stm = createFile(fs, file1, 1);
+        FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
         System.out.println("Created file simpleAppend.dat");
   
         // write to file
@@ -161,14 +118,16 @@
         // ensure getPos is set to reflect existing size of the file
         assertTrue(stm.getPos() > 0);
 
-        System.out.println("Writing " + (fileSize - mid2) + " bytes to file " + file1);
-        stm.write(fileContents, mid2, fileSize - mid2);
+        System.out.println("Writing " + (AppendTestUtil.FILE_SIZE - mid2) +
+            " bytes to file " + file1);
+        stm.write(fileContents, mid2, AppendTestUtil.FILE_SIZE - mid2);
         System.out.println("Written second part of file");
         stm.close();
         System.out.println("Wrote and Closed second part of file.");
   
         // verify that entire file is good
-        checkFullFile(fs, file1);
+        AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE,
+            fileContents, "Read 2");
       }
 
       { // test appending to an non-existing file.
@@ -285,7 +244,7 @@
       for (int i = 0; i < numAppendsPerThread; i++) {
    
         // pick a file at random and remove it from pool
-        Path testfile = null;
+        Path testfile;
         synchronized (testFiles) {
           if (testFiles.size() == 0) {
             System.out.println("Completed write to almost all files.");
@@ -304,7 +263,7 @@
           len = fs.getFileStatus(testfile).getLen();
 
           // if file is already full, then pick another file
-          if (len >= fileSize) {
+          if (len >= AppendTestUtil.FILE_SIZE) {
             System.out.println("File " + testfile + " is full.");
             continue;
           }
@@ -312,7 +271,7 @@
           // do small size appends so that we can trigger multiple
           // appends to the same file.
           //
-          int left = (int)(fileSize - len)/3;
+          int left = (int)(AppendTestUtil.FILE_SIZE - len)/3;
           if (left <= 0) {
             left = 1;
           }
@@ -335,8 +294,7 @@
                                  " expected size " + (len + sizeToAppend) +
                                  " waiting for namenode metadata update.");
               Thread.sleep(5000);
-            } catch (InterruptedException e) { 
-            }
+            } catch (InterruptedException e) {;}
           }
 
           assertTrue("File " + testfile + " size is " + 
@@ -344,7 +302,8 @@
                      " but expected " + (len + sizeToAppend),
                     fs.getFileStatus(testfile).getLen() == (len + sizeToAppend));
 
-          checkFile(fs, testfile, (int)(len + sizeToAppend));
+          AppendTestUtil.checkFullFile(fs, testfile, (int)(len + sizeToAppend),
+              fileContents, "Read 2");
         } catch (Throwable e) {
           globalStatus = false;
           if (e != null && e.toString() != null) {
@@ -368,9 +327,10 @@
 
   /**
    * Test that appends to files at random offsets.
+   * @throws IOException an exception might be thrown
    */
   public void testComplexAppend() throws IOException {
-    initBuffer(fileSize);
+    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
     Configuration conf = new Configuration();
     conf.setInt("heartbeat.recheck.interval", 2000);
     conf.setInt("dfs.heartbeat.interval", 2);
@@ -392,7 +352,8 @@
       for (int i = 0; i < numberOfFiles; i++) {
         short replication = (short)(AppendTestUtil.nextInt(numDatanodes) + 1);
         Path testFile = new Path("/" + i + ".dat");
-        FSDataOutputStream stm = createFile(fs, testFile, replication);
+        FSDataOutputStream stm =
+            AppendTestUtil.createFile(fs, testFile, replication);
         stm.close();
         testFiles.add(testFile);
       }

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java Wed Aug 12 16:37:13 2009
@@ -66,7 +66,10 @@
     };  
   }
 
-  /** TC1: Append on block boundary. */
+  /**
+   * TC1: Append on block boundary.
+   * @throws IOException an exception might be thrown
+   */
   public void testTC1() throws Exception {
     final Path p = new Path("/TC1/foo");
     System.out.println("p=" + p);
@@ -91,7 +94,10 @@
     AppendTestUtil.check(fs, p, len1 + len2);
   }
 
-  /** TC2: Append on non-block boundary. */
+  /**
+   * TC2: Append on non-block boundary.
+   * @throws IOException an exception might be thrown
+   */
   public void testTC2() throws Exception {
     final Path p = new Path("/TC2/foo");
     System.out.println("p=" + p);
@@ -116,7 +122,10 @@
     AppendTestUtil.check(fs, p, len1 + len2);
   }
 
-  /** TC5: Only one simultaneous append. */
+  /**
+   * TC5: Only one simultaneous append.
+   * @throws IOException an exception might be thrown
+   */
   public void testTC5() throws Exception {
     final Path p = new Path("/TC5/foo");
     System.out.println("p=" + p);
@@ -143,7 +152,10 @@
     out.close();        
   }
 
-  /** TC7: Corrupted replicas are present. */
+  /**
+   * TC7: Corrupted replicas are present.
+   * @throws IOException an exception might be thrown
+   */
   public void testTC7() throws Exception {
     final short repl = 2;
     final Path p = new Path("/TC7/foo");
@@ -188,7 +200,10 @@
     AppendTestUtil.check(fs, p, len1 + len2);
   }
 
-  /** TC11: Racing rename */
+  /**
+   * TC11: Racing rename
+   * @throws IOException an exception might be thrown
+   */
   public void testTC11() throws Exception {
     final Path p = new Path("/TC11/foo");
     System.out.println("p=" + p);
@@ -241,7 +256,10 @@
     }
   }
 
-  /** TC12: Append to partial CRC chunk */
+  /** 
+   * TC12: Append to partial CRC chunk
+   * @throws IOException an exception might be thrown
+   */
   public void testTC12() throws Exception {
     final Path p = new Path("/TC12/foo");
     System.out.println("p=" + p);

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java Wed Aug 12 16:37:13 2009
@@ -26,6 +26,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.RemoteException;
 
 /**
@@ -63,7 +64,7 @@
     Configuration conf = new Configuration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     FileSystem fs = cluster.getFileSystem();
-    DFSClient dfsClient = new DFSClient(conf);
+    final DFSClient dfsClient = new DFSClient(NameNode.getAddress(conf), conf);
     try {
 
       //
@@ -83,7 +84,7 @@
       // make sure getFileInfo throws the appropriate exception for non-relative
       // filenames
       try {
-        FileStatus foo = dfsClient.getFileInfo("non-relative");
+        dfsClient.getFileInfo("non-relative");
         fail("getFileInfo for a non-relative path did not thro IOException");
       } catch (RemoteException re) {
         assertTrue(re.toString().contains("Invalid file name"));

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=803588&r1=803587&r2=803588&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java Wed Aug 12 16:37:13 2009
@@ -26,6 +26,7 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -75,7 +76,7 @@
       DatanodeInfo[] dataNodes=null;
       boolean notWritten;
       do {
-        DFSClient dfsclient = new DFSClient(CONF);
+        final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF), CONF);
         locatedBlocks = dfsclient.getNamenode().
           getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
         assertEquals(2, locatedBlocks.size());

Propchange: hadoop/hdfs/branches/HDFS-265/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Aug 12 16:37:13 2009
@@ -1,3 +1,3 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
 /hadoop/core/trunk/src/webapps/datanode:776175-784663
-/hadoop/hdfs/trunk/src/webapps/datanode:796829-800617
+/hadoop/hdfs/trunk/src/webapps/datanode:796829-800617,800619-803337

Propchange: hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Aug 12 16:37:13 2009
@@ -1,3 +1,3 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
 /hadoop/core/trunk/src/webapps/hdfs:776175-784663
-/hadoop/hdfs/trunk/src/webapps/hdfs:796829-800617
+/hadoop/hdfs/trunk/src/webapps/hdfs:796829-800617,800619-803337

Propchange: hadoop/hdfs/branches/HDFS-265/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Aug 12 16:37:13 2009
@@ -1,3 +1,3 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
 /hadoop/core/trunk/src/webapps/secondary:776175-784663
-/hadoop/hdfs/trunk/src/webapps/secondary:796829-800617
+/hadoop/hdfs/trunk/src/webapps/secondary:796829-800617,800619-803337