You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sh...@apache.org on 2009/08/28 03:07:29 UTC

svn commit: r808714 - in /hadoop/hdfs/branches/HDFS-265: ./ lib/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/server/common/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/...

Author: shv
Date: Fri Aug 28 01:07:27 2009
New Revision: 808714

URL: http://svn.apache.org/viewvc?rev=808714&view=rev
Log:
HDFS-565. merge -r 805653:808670 from trunk to the append branch.

Added:
    hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java   (with props)
Removed:
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/common/ThreadLocalDateFormat.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestThreadLocalDateFormat.java
Modified:
    hadoop/hdfs/branches/HDFS-265/CHANGES.txt
    hadoop/hdfs/branches/HDFS-265/build.xml
    hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-0.21.0-dev.jar
    hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-test-0.21.0-dev.jar
    hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-0.21.0-dev.jar
    hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-examples-0.21.0-dev.jar
    hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-test-0.21.0-dev.jar
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java
    hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs-with-mr/org/apache/hadoop/fs/DistributedFSCheck.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java

Modified: hadoop/hdfs/branches/HDFS-265/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/CHANGES.txt?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-265/CHANGES.txt Fri Aug 28 01:07:27 2009
@@ -2,6 +2,12 @@
 
 Trunk (unreleased changes)
 
+  INCOMPATIBLE CHANGES
+
+    HDFS-538. Per the contract elucidated in HADOOP-6201, throw
+    FileNotFoundException from FileSystem::listStatus rather than returning
+    null. (Jakob Homan via cdouglas)
+
   NEW FEATURES
 
     HDFS-436. Introduce AspectJ framework for HDFS code and tests.
@@ -116,6 +122,13 @@
     HDFS-543. Break FSDatasetInterface#writToBlock() into writeToRemporary,
     writeToRBW, ad append. (hairong)
 
+    HDFS-549. Allow a non-fault-inject test, which is specified by -Dtestcase,
+    to be executed by the run-test-hdfs-fault-inject target.  (Konstantin
+    Boudnik via szetszwo)
+
+    HDFS-563. Simplify the codes in FSNamesystem.getBlockLocations(..).
+    (szetszwo)
+
   BUG FIXES
 
     HDFS-76. Better error message to users when commands fail because of 
@@ -172,6 +185,15 @@
     HDFS-547. TestHDFSFileSystemContract#testOutputStreamClosedTwice
     sometimes fails with CloseByInterruptException. (hairong)
 
+    HDFS-561. Fix write pipeline READ_TIMEOUT in DataTransferProtocol.
+    (Kan Zhang via szetszwo)
+
+    HDFS-553. BlockSender reports wrong failed position in ChecksumException.
+    (hairong)
+
+    HDFS-568. Update hadoop-mapred-examples-0.21.0-dev.jar for MAPREDUCE-874.
+    (szetszwo)
+ 
 Release 0.20.1 - Unreleased
 
   IMPROVEMENTS
@@ -180,10 +202,10 @@
 
   BUG FIXES
 
-    HDFS-525. The SimpleDateFormat object in ListPathsServlet is not thread
-    safe.  (Suresh Srinivas via szetszwo)
-
     HDFS-167. Fix a bug in DFSClient that caused infinite retries on write.
     (Bill Zeller via szetszwo)
 
     HDFS-527. Remove/deprecate unnecessary DFSClient constructors.  (szetszwo)
+
+    HDFS-525. The SimpleDateFormat object in ListPathsServlet is not thread
+    safe. (Suresh Srinivas and cdouglas)

Modified: hadoop/hdfs/branches/HDFS-265/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/build.xml?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/build.xml (original)
+++ hadoop/hdfs/branches/HDFS-265/build.xml Fri Aug 28 01:07:27 2009
@@ -636,6 +636,7 @@
         </batchtest>
         <batchtest todir="${test.build.dir}" if="tests.testcase.fi">
           <fileset dir="${test.src.dir}/aop" includes="**/${testcase}.java"/>
+          <fileset dir="${test.src.dir}/hdfs" includes="**/${testcase}.java"/>
         </batchtest>
       </junit>
       <antcall target="checkfailure"/>
@@ -695,6 +696,7 @@
       </batchtest>
       <batchtest todir="${test.build.dir}" if="tests.testcase.fi">
         <fileset dir="${test.src.dir}/aop" includes="**/${testcase}.java"/>
+        <fileset dir="${test.src.dir}/hdfs-with-mr" includes="**/${testcase}.java"/>
       </batchtest>
     </junit>
     <antcall target="checkfailure"/>

Modified: hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-0.21.0-dev.jar?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-test-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-test-0.21.0-dev.jar?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-0.21.0-dev.jar?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-examples-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-examples-0.21.0-dev.jar?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-test-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-test-0.21.0-dev.jar?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java Fri Aug 28 01:07:27 2009
@@ -2338,7 +2338,7 @@
     // it. When all the packets for a block are sent out and acks for each
     // if them are received, the DataStreamer closes the current block.
     //
-    private class DataStreamer extends Daemon {
+    class DataStreamer extends Daemon {
       private static final int MAX_RECOVERY_ERROR_COUNT = 5; // try block recovery 5 times
       private int recoveryErrorCount = 0; // number of times block recovery failed
       private volatile boolean streamerClosed = false;
@@ -2348,8 +2348,8 @@
       private DataInputStream blockReplyStream;
       private ResponseProcessor response = null;
       private volatile DatanodeInfo[] nodes = null; // list of targets for current block
-      private volatile boolean hasError = false;
-      private volatile int errorIndex = 0;
+      volatile boolean hasError = false;
+      volatile int errorIndex = 0;
   
       /*
        * streamer thread is the only thread that opens streams to datanode, 
@@ -2840,7 +2840,8 @@
           LOG.debug("Connecting to " + nodes[0].getName());
           InetSocketAddress target = NetUtils.createSocketAddr(nodes[0].getName());
           s = socketFactory.createSocket();
-          int timeoutValue = (socketTimeout > 0) ? (3000 * nodes.length + socketTimeout) : 0;
+          int timeoutValue = (socketTimeout > 0) ? (HdfsConstants.READ_TIMEOUT_EXTENSION
+              * nodes.length + socketTimeout) : 0;
           NetUtils.connect(s, target, timeoutValue);
           s.setSoTimeout(timeoutValue);
           s.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Fri Aug 28 01:07:27 2009
@@ -258,7 +258,9 @@
   @Override
   public FileStatus[] listStatus(Path p) throws IOException {
     FileStatus[] infos = dfs.listPaths(getPathName(p));
-    if (infos == null) return null;
+    if (infos == null) 
+      throw new FileNotFoundException("File " + p + " does not exist.");
+    
     FileStatus[] stats = new FileStatus[infos.length];
     for (int i = 0; i < infos.length; i++) {
       stats[i] = makeQualified(infos[i]);

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java Fri Aug 28 01:07:27 2009
@@ -27,9 +27,11 @@
 import java.net.URISyntaxException;
 import java.net.URL;
 import java.text.ParseException;
+import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.Random;
+import java.util.TimeZone;
 
 import javax.security.auth.login.LoginException;
 
@@ -44,7 +46,6 @@
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.server.common.ThreadLocalDateFormat;
 import org.apache.hadoop.hdfs.server.namenode.ListPathsServlet;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
@@ -74,7 +75,21 @@
   protected UserGroupInformation ugi; 
   protected final Random ran = new Random();
 
-  protected static final ThreadLocalDateFormat df = ListPathsServlet.df;
+  public static final String HFTP_TIMEZONE = "UTC";
+  public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ";
+
+  public static final SimpleDateFormat getDateFormat() {
+    final SimpleDateFormat df = new SimpleDateFormat(HFTP_DATE_FORMAT);
+    df.setTimeZone(TimeZone.getTimeZone(HFTP_TIMEZONE));
+    return df;
+  }
+
+  protected static final ThreadLocal<SimpleDateFormat> df =
+    new ThreadLocal<SimpleDateFormat>() {
+      protected SimpleDateFormat initialValue() {
+        return getDateFormat();
+      }
+    };
 
   @Override
   public void initialize(URI name, Configuration conf) throws IOException {
@@ -168,10 +183,11 @@
       long modif;
       long atime = 0;
       try {
-        modif = df.parse(attrs.getValue("modified")).getTime();
+        final SimpleDateFormat ldf = df.get();
+        modif = ldf.parse(attrs.getValue("modified")).getTime();
         String astr = attrs.getValue("accesstime");
         if (astr != null) {
-          atime = df.parse(astr).getTime();
+          atime = ldf.parse(astr).getTime();
         }
       } catch (ParseException e) { throw new SAXException(e); }
       FileStatus fs = "file".equals(qname)

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java Fri Aug 28 01:07:27 2009
@@ -60,6 +60,7 @@
 
   // Timeouts for communicating with DataNode for streaming writes/reads
   public static int READ_TIMEOUT = 60 * 1000;
+  public static int READ_TIMEOUT_EXTENSION = 5 * 1000;
   public static int WRITE_TIMEOUT = 8 * 60 * 1000;
   public static int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline
 

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java Fri Aug 28 01:07:27 2009
@@ -290,8 +290,9 @@
           int dLen = Math.min(dLeft, bytesPerChecksum);
           checksum.update(buf, dOff, dLen);
           if (!checksum.compare(buf, cOff)) {
+            long failedPos = offset + len -dLeft;
             throw new ChecksumException("Checksum failed at " + 
-                                        (offset + len - dLeft), len);
+                                        failedPos, failedPos);
           }
           dLeft -= dLen;
           dOff += dLen;

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Fri Aug 28 01:07:27 2009
@@ -267,7 +267,8 @@
         mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
         mirrorSock = datanode.newSocket();
         try {
-          int timeoutValue = targets.length * datanode.socketTimeout;
+          int timeoutValue = datanode.socketTimeout
+              + (HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length);
           int writeTimeout = datanode.socketWriteTimeout + 
                       (HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
           NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Aug 28 01:07:27 2009
@@ -646,8 +646,7 @@
 
   /**
    * Get block locations within the specified range.
-   * 
-   * @see #getBlockLocations(String, long, long)
+   * @see ClientProtocol#getBlockLocations(String, long, long)
    */
   LocatedBlocks getBlockLocations(String clientMachine, String src,
       long offset, long length) throws IOException {
@@ -670,18 +669,9 @@
   /**
    * Get block locations within the specified range.
    * @see ClientProtocol#getBlockLocations(String, long, long)
-   */
-  public LocatedBlocks getBlockLocations(String src, long offset, long length
-      ) throws IOException {
-    return getBlockLocations(src, offset, length, false);
-  }
-
-  /**
-   * Get block locations within the specified range.
-   * @see ClientProtocol#getBlockLocations(String, long, long)
    * @throws FileNotFoundException
    */
-  public LocatedBlocks getBlockLocations(String src, long offset, long length,
+  LocatedBlocks getBlockLocations(String src, long offset, long length,
       boolean doAccessTime) throws IOException {
     if (offset < 0) {
       throw new IOException("Negative offset is not supported. File: " + src );
@@ -693,7 +683,7 @@
     if (inode == null)
       throw new FileNotFoundException();
     final LocatedBlocks ret = getBlockLocationsInternal(src, inode,
-        offset, length, Integer.MAX_VALUE, doAccessTime);  
+        offset, length, doAccessTime);  
     if (auditLog.isInfoEnabled()) {
       logAuditEvent(UserGroupInformation.getCurrentUGI(),
                     Server.getRemoteIp(),
@@ -706,25 +696,18 @@
                                                        INodeFile inode,
                                                        long offset, 
                                                        long length,
-                                                       int nrBlocksToReturn,
                                                        boolean doAccessTime
                                                        ) throws IOException {
-    if(inode == null) {
-      return null;
-    }
     if (doAccessTime && isAccessTimeSupported()) {
       dir.setTimes(src, inode, -1, now(), false);
     }
-    Block[] blocks = inode.getBlocks();
+    final Block[] blocks = inode.getBlocks();
     if (blocks == null) {
       return null;
     }
-    if (blocks.length == 0) {
-      return inode.createLocatedBlocks(new ArrayList<LocatedBlock>(blocks.length));
-    }
-    
-    List<LocatedBlock> results = blockManager.getBlockLocations(blocks,
-        offset, length, nrBlocksToReturn);
+    final List<LocatedBlock> results = blocks.length == 0?
+        new ArrayList<LocatedBlock>(0):
+        blockManager.getBlockLocations(blocks, offset, length, Integer.MAX_VALUE);
     return inode.createLocatedBlocks(results);
   }
 

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java Fri Aug 28 01:07:27 2009
@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.hdfs.HftpFileSystem;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.server.common.ThreadLocalDateFormat;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.util.VersionInfo;
@@ -28,12 +28,13 @@
 
 import java.io.IOException;
 import java.io.PrintWriter;
+import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Stack;
-import java.util.TimeZone;
 import java.util.regex.Pattern;
+import java.util.regex.PatternSyntaxException;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
@@ -45,12 +46,13 @@
 public class ListPathsServlet extends DfsServlet {
   /** For java.io.Serializable */
   private static final long serialVersionUID = 1L;
-  public static final ThreadLocalDateFormat df = 
-    new ThreadLocalDateFormat("yyyy-MM-dd'T'HH:mm:ssZ");
 
-  static {
-    df.setTimeZone(TimeZone.getTimeZone("UTC"));
-  }
+  public static final ThreadLocal<SimpleDateFormat> df =
+    new ThreadLocal<SimpleDateFormat>() {
+      protected SimpleDateFormat initialValue() {
+        return HftpFileSystem.getDateFormat();
+      }
+    };
 
   /**
    * Write a node to output.
@@ -58,10 +60,11 @@
    * For files, it also includes size, replication and block-size. 
    */
   static void writeInfo(FileStatus i, XMLOutputter doc) throws IOException {
+    final SimpleDateFormat ldf = df.get();
     doc.startTag(i.isDir() ? "directory" : "file");
     doc.attribute("path", i.getPath().toUri().getPath());
-    doc.attribute("modified", df.format(new Date(i.getModificationTime())));
-    doc.attribute("accesstime", df.format(new Date(i.getAccessTime())));
+    doc.attribute("modified", ldf.format(new Date(i.getModificationTime())));
+    doc.attribute("accesstime", ldf.format(new Date(i.getAccessTime())));
     if (!i.isDir()) {
       doc.attribute("size", String.valueOf(i.getLen()));
       doc.attribute("replication", String.valueOf(i.getReplication()));
@@ -92,7 +95,7 @@
     root.put("recursive", recur ? "yes" : "no");
     root.put("filter", filter);
     root.put("exclude", exclude);
-    root.put("time", df.format(new Date()));
+    root.put("time", df.get().format(new Date()));
     root.put("version", VersionInfo.getVersion());
     return root;
   }
@@ -162,10 +165,11 @@
         }
         catch(RemoteException re) {re.writeXml(p, doc);}
       }
+    } finally {
       if (doc != null) {
         doc.endDocument();
       }
-    } finally {
+
       if (out != null) {
         out.close();
       }

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Aug 28 01:07:27 2009
@@ -174,7 +174,7 @@
   /** Return the {@link FSNamesystem} object.
    * @return {@link FSNamesystem} object.
    */
-  public FSNamesystem getNamesystem() {
+  FSNamesystem getNamesystem() {
     return namesystem;
   }
 

Modified: hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java Fri Aug 28 01:07:27 2009
@@ -52,6 +52,7 @@
    */
   public static class DataTransferTest implements PipelineTest {
     private List<Pipeline> pipelines = new ArrayList<Pipeline>();
+    private volatile boolean isSuccess = false;
 
     /** Simulate action for the receiverOpWriteBlock pointcut */
     public final ActionContainer<DatanodeID> fiReceiverOpWriteBlock
@@ -62,6 +63,22 @@
     /** Simulate action for the statusRead pointcut */
     public final ActionContainer<DatanodeID> fiStatusRead
         = new ActionContainer<DatanodeID>();
+    /** Verification action for the pipelineInitNonAppend pointcut */
+    public final ActionContainer<Integer> fiPipelineInitErrorNonAppend
+        = new ActionContainer<Integer>();
+    /** Verification action for the pipelineErrorAfterInit pointcut */
+    public final ActionContainer<Integer> fiPipelineErrorAfterInit
+        = new ActionContainer<Integer>();
+
+    /** Get test status */
+    public boolean isSuccess() {
+      return this.isSuccess;
+    }
+
+    /** Set test status */
+    public void markSuccess() {
+      this.isSuccess = true;
+    }
 
     /** Initialize the pipeline. */
     public Pipeline initPipeline(LocatedBlock lb) {
@@ -127,8 +144,9 @@
 
     @Override
     public void run(DatanodeID id) {
-      final Pipeline p = getPipelineTest().getPipeline(id);
-      if (p.contains(index, id)) {
+      final DataTransferTest test = getDataTransferTest();
+      final Pipeline p = test.getPipeline(id);
+      if (!test.isSuccess() && p.contains(index, id)) {
         final String s = toString(id);
         FiTestUtil.LOG.info(s);
         throw new OutOfMemoryError(s);
@@ -145,7 +163,8 @@
 
     @Override
     public void run(DatanodeID id) throws DiskOutOfSpaceException {
-      final Pipeline p = getPipelineTest().getPipeline(id);
+      final DataTransferTest test = getDataTransferTest();
+      final Pipeline p = test.getPipeline(id);
       if (p.contains(index, id)) {
         final String s = toString(id);
         FiTestUtil.LOG.info(s);
@@ -173,8 +192,9 @@
 
     @Override
     public void run(DatanodeID id) {
-      final Pipeline p = getPipelineTest().getPipeline(id);
-      if (p.contains(index, id)) {
+      final DataTransferTest test = getDataTransferTest();
+      final Pipeline p = test.getPipeline(id);
+      if (!test.isSuccess() && p.contains(index, id)) {
         final String s = toString(id) + ", duration=" + duration;
         FiTestUtil.LOG.info(s);
         if (duration <= 0) {
@@ -185,4 +205,36 @@
       }
     }
   }
+
+  /** Action for pipeline error verification */
+  public static class VerificationAction implements Action<Integer> {
+    /** The name of the test */
+    final String currentTest;
+    /** The error index of the datanode */
+    final int errorIndex;
+
+    /**
+     * Create a verification action for errors at datanode i in the pipeline.
+     * 
+     * @param currentTest The name of the test
+     * @param i The error index of the datanode
+     */
+    public VerificationAction(String currentTest, int i) {
+      this.currentTest = currentTest;
+      this.errorIndex = i;
+    }
+
+    /** {@inheritDoc} */
+    public String toString() {
+      return currentTest + ", errorIndex=" + errorIndex;
+    }
+
+    @Override
+    public void run(Integer i) {
+      if (i == errorIndex) {
+        FiTestUtil.LOG.info(this + ", successfully verified.");
+        getDataTransferTest().markSuccess();
+      }
+    }
+  }
 }
\ No newline at end of file

Added: hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj?rev=808714&view=auto
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj (added)
+++ hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj Fri Aug 28 01:07:27 2009
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fi.DataTransferTestUtil;
+import org.apache.hadoop.hdfs.DFSClient.DFSOutputStream.DataStreamer;
+
+import org.junit.Assert;
+
+/** Aspects for DFSClient */
+public aspect DFSClientAspects {
+  public static final Log LOG = LogFactory.getLog(DFSClientAspects.class);
+
+  pointcut callCreateBlockOutputStream(DataStreamer datastreamer):
+    call(* createBlockOutputStream(..)) && target(datastreamer);
+
+  before(DataStreamer datastreamer) : callCreateBlockOutputStream(datastreamer) {
+    Assert.assertFalse(datastreamer.hasError);
+    Assert.assertEquals(0, datastreamer.errorIndex);
+  }
+
+  pointcut pipelineInitNonAppend(DataStreamer datastreamer):
+    callCreateBlockOutputStream(datastreamer) 
+    && cflow(execution(* nextBlockOutputStream(..)))
+    && within(DataStreamer);
+
+  after(DataStreamer datastreamer) returning : pipelineInitNonAppend(datastreamer) {
+    LOG.info("FI: after pipelineInitNonAppend: hasError="
+        + datastreamer.hasError + " errorIndex=" + datastreamer.errorIndex);
+    try {
+      if (datastreamer.hasError) {
+        DataTransferTestUtil.getDataTransferTest().fiPipelineInitErrorNonAppend
+            .run(datastreamer.errorIndex);
+      }
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  pointcut pipelineInitAppend(DataStreamer datastreamer):
+    callCreateBlockOutputStream(datastreamer) 
+    && cflow(execution(* initAppend(..)))
+    && within(DataStreamer);
+
+  after(DataStreamer datastreamer) returning : pipelineInitAppend(datastreamer) {
+    LOG.info("FI: after pipelineInitAppend: hasError=" + datastreamer.hasError
+        + " errorIndex=" + datastreamer.errorIndex);
+  }
+
+  pointcut pipelineErrorAfterInit(boolean onError, boolean isAppend,
+      DataStreamer datastreamer):
+    call(* processDatanodeError(boolean, boolean))
+    && args(onError, isAppend)
+    && target(datastreamer)
+    && if(onError && !isAppend);
+
+  before(DataStreamer datastreamer) : pipelineErrorAfterInit(boolean, boolean, datastreamer) {
+    LOG.info("FI: before pipelineErrorAfterInit: errorIndex="
+        + datastreamer.errorIndex);
+    try {
+      DataTransferTestUtil.getDataTransferTest().fiPipelineErrorAfterInit
+          .run(datastreamer.errorIndex);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+}

Modified: hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java Fri Aug 28 01:07:27 2009
@@ -26,6 +26,7 @@
 import org.apache.hadoop.fi.DataTransferTestUtil.DoosAction;
 import org.apache.hadoop.fi.DataTransferTestUtil.OomAction;
 import org.apache.hadoop.fi.DataTransferTestUtil.SleepAction;
+import org.apache.hadoop.fi.DataTransferTestUtil.VerificationAction;
 import org.apache.hadoop.fi.FiTestUtil.Action;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -33,6 +34,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
+
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -45,6 +47,7 @@
   static {
     conf.setInt("dfs.datanode.handler.count", 1);
     conf.setInt("dfs.replication", REPLICATION);
+    conf.setInt("dfs.socket.timeout", 5000);
   }
 
   static private FSDataOutputStream createFile(FileSystem fs, Path p
@@ -63,8 +66,8 @@
   private static void write1byte(String methodName) throws IOException {
     final MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION, true,
         null);
+    final FileSystem dfs = cluster.getFileSystem();
     try {
-      final FileSystem dfs = cluster.getFileSystem();
       final Path p = new Path("/" + methodName + "/foo");
       final FSDataOutputStream out = createFile(dfs, p);
       out.write(1);
@@ -76,6 +79,7 @@
       Assert.assertEquals(1, b);
     }
     finally {
+      dfs.close();
       cluster.shutdown();
     }
   }
@@ -90,6 +94,93 @@
     write1byte(methodName);
   }
   
+  private static void runReceiverOpWriteBlockTest(String methodName,
+      int errorIndex, Action<DatanodeID> a) throws IOException {
+    FiTestUtil.LOG.info("Running " + methodName + " ...");
+    final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
+        .initTest();
+    t.fiReceiverOpWriteBlock.set(a);
+    t.fiPipelineInitErrorNonAppend.set(new VerificationAction(methodName,
+        errorIndex));
+    write1byte(methodName);
+    Assert.assertTrue(t.isSuccess());
+  }
+  
+  private static void runStatusReadTest(String methodName, int errorIndex,
+      Action<DatanodeID> a) throws IOException {
+    FiTestUtil.LOG.info("Running " + methodName + " ...");
+    final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
+        .initTest();
+    t.fiStatusRead.set(a);
+    t.fiPipelineInitErrorNonAppend.set(new VerificationAction(methodName,
+        errorIndex));
+    write1byte(methodName);
+    Assert.assertTrue(t.isSuccess());
+  }
+
+  private static void runCallReceivePacketTest(String methodName,
+      int errorIndex, Action<DatanodeID> a) throws IOException {
+    FiTestUtil.LOG.info("Running " + methodName + " ...");
+    final DataTransferTest t = (DataTransferTest)DataTransferTestUtil.initTest();
+    t.fiCallReceivePacket.set(a);
+    t.fiPipelineErrorAfterInit.set(new VerificationAction(methodName, errorIndex));
+    write1byte(methodName);
+    Assert.assertTrue(t.isSuccess());
+  }
+
+  /**
+   * Pipeline setup:
+   * DN0 never responses after received setup request from client.
+   * Client gets an IOException and determine DN0 bad.
+   */
+  @Test
+  public void pipeline_Fi_01() throws IOException {
+    final String methodName = FiTestUtil.getMethodName();
+    runReceiverOpWriteBlockTest(methodName, 0, new SleepAction(methodName, 0, 0));
+  }
+
+  /**
+   * Pipeline setup:
+   * DN1 never responses after received setup request from client.
+   * Client gets an IOException and determine DN1 bad.
+   */
+  @Test
+  public void pipeline_Fi_02() throws IOException {
+    final String methodName = FiTestUtil.getMethodName();
+    runReceiverOpWriteBlockTest(methodName, 1, new SleepAction(methodName, 1, 0));
+  }
+
+  /**
+   * Pipeline setup:
+   * DN2 never responses after received setup request from client.
+   * Client gets an IOException and determine DN2 bad.
+   */
+  @Test
+  public void pipeline_Fi_03() throws IOException {
+    final String methodName = FiTestUtil.getMethodName();
+    runReceiverOpWriteBlockTest(methodName, 2, new SleepAction(methodName, 2, 0));
+  }
+
+  /**
+   * Pipeline setup, DN1 never responses after received setup ack from DN2.
+   * Client gets an IOException and determine DN1 bad.
+   */
+  @Test
+  public void pipeline_Fi_04() throws IOException {
+    final String methodName = FiTestUtil.getMethodName();
+    runStatusReadTest(methodName, 1, new SleepAction(methodName, 1, 0));
+  }
+
+  /**
+   * Pipeline setup, DN0 never responses after received setup ack from DN1.
+   * Client gets an IOException and determine DN0 bad.
+   */
+  @Test
+  public void pipeline_Fi_05() throws IOException {
+    final String methodName = FiTestUtil.getMethodName();
+    runStatusReadTest(methodName, 0, new SleepAction(methodName, 0, 0));
+  }
+
   /**
    * Pipeline setup with DN0 very slow but it won't lead to timeout.
    * Client finishes setup successfully.
@@ -120,18 +211,37 @@
     runSlowDatanodeTest(methodName, new SleepAction(methodName, 2, 3000));
   }
 
-  private static void runCallReceivePacketTest(String methodName,
-      Action<DatanodeID> a) throws IOException {
-    FiTestUtil.LOG.info("Running " + methodName + " ...");
-    ((DataTransferTest)DataTransferTestUtil.initTest()).fiCallReceivePacket.set(a);
-    write1byte(methodName);
+  /**
+   * Pipeline setup, DN0 throws an OutOfMemoryException right after it
+   * received a setup request from client.
+   * Client gets an IOException and determine DN0 bad.
+   */
+  @Test
+  public void pipeline_Fi_09() throws IOException {
+    final String methodName = FiTestUtil.getMethodName();
+    runReceiverOpWriteBlockTest(methodName, 0, new OomAction(methodName, 0));
   }
 
-  private static void runStatusReadTest(String methodName, Action<DatanodeID> a
-      ) throws IOException {
-    FiTestUtil.LOG.info("Running " + methodName + " ...");
-    ((DataTransferTest)DataTransferTestUtil.initTest()).fiStatusRead.set(a);
-    write1byte(methodName);
+  /**
+   * Pipeline setup, DN1 throws an OutOfMemoryException right after it
+   * received a setup request from DN0.
+   * Client gets an IOException and determine DN1 bad.
+   */
+  @Test
+  public void pipeline_Fi_10() throws IOException {
+    final String methodName = FiTestUtil.getMethodName();
+    runReceiverOpWriteBlockTest(methodName, 1, new OomAction(methodName, 1));
+  }
+
+  /**
+   * Pipeline setup, DN2 throws an OutOfMemoryException right after it
+   * received a setup request from DN1.
+   * Client gets an IOException and determine DN2 bad.
+   */
+  @Test
+  public void pipeline_Fi_11() throws IOException {
+    final String methodName = FiTestUtil.getMethodName();
+    runReceiverOpWriteBlockTest(methodName, 2, new OomAction(methodName, 2));
   }
 
   /**
@@ -142,7 +252,18 @@
   @Test
   public void pipeline_Fi_12() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    runStatusReadTest(methodName, new OomAction(methodName, 1));
+    runStatusReadTest(methodName, 1, new OomAction(methodName, 1));
+  }
+
+  /**
+   * Pipeline setup, DN0 throws an OutOfMemoryException right after it
+   * received a setup ack from DN1.
+   * Client gets an IOException and determine DN0 bad.
+   */
+  @Test
+  public void pipeline_Fi_13() throws IOException {
+    final String methodName = FiTestUtil.getMethodName();
+    runStatusReadTest(methodName, 0, new OomAction(methodName, 0));
   }
 
   /**
@@ -153,7 +274,7 @@
   @Test
   public void pipeline_Fi_14() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    runCallReceivePacketTest(methodName, new DoosAction(methodName, 0));
+    runCallReceivePacketTest(methodName, 0, new DoosAction(methodName, 0));
   }
 
   /**
@@ -164,9 +285,9 @@
   @Test
   public void pipeline_Fi_15() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    runCallReceivePacketTest(methodName, new DoosAction(methodName, 1));
+    runCallReceivePacketTest(methodName, 1, new DoosAction(methodName, 1));
   }
-
+  
   /**
    * Streaming: Write a packet, DN2 throws a DiskOutOfSpaceError
    * when it writes the data to disk.
@@ -175,6 +296,6 @@
   @Test
   public void pipeline_Fi_16() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    runCallReceivePacketTest(methodName, new DoosAction(methodName, 2));
+    runCallReceivePacketTest(methodName, 2, new DoosAction(methodName, 2));
   }
 }
\ No newline at end of file

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs-with-mr/org/apache/hadoop/fs/DistributedFSCheck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs-with-mr/org/apache/hadoop/fs/DistributedFSCheck.java?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs-with-mr/org/apache/hadoop/fs/DistributedFSCheck.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs-with-mr/org/apache/hadoop/fs/DistributedFSCheck.java Fri Aug 28 01:07:27 2009
@@ -21,6 +21,7 @@
 import java.io.BufferedReader;
 import java.io.DataInputStream;
 import java.io.File;
+import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
@@ -132,9 +133,13 @@
       return;
     }
     
-    FileStatus children[] = fs.listStatus(rootFile);
-    if (children == null)
+    FileStatus [] children = null;
+    try {
+      children = fs.listStatus(rootFile);
+    } catch (FileNotFoundException fnfe ){
       throw new IOException("Could not get listing for " + rootFile);
+    }
+
     for (int i = 0; i < children.length; i++)
       listSubtree(children[i], writer);
   }

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Fri Aug 28 01:07:27 2009
@@ -19,18 +19,19 @@
 
 import java.io.File;
 import java.io.IOException;
+import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
 import java.net.URI;
+import java.nio.channels.FileChannel;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.nio.channels.FileChannel;
 import java.util.Random;
-import java.io.RandomAccessFile;
 
 import javax.security.auth.login.LoginException;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.net.*;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
@@ -38,12 +39,15 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.security.*;
+import org.apache.hadoop.net.DNSToSwitchMapping;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.net.StaticMapping;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -517,7 +521,7 @@
    * @return {@link FSNamesystem} object.
    */
   public FSNamesystem getNamesystem() {
-    return nameNode.getNamesystem();
+    return NameNodeAdapter.getNamesystem(nameNode);
   }
 
   /**
@@ -918,7 +922,7 @@
    * Set the softLimit and hardLimit of client lease periods
    */
   void setLeasePeriod(long soft, long hard) {
-    final FSNamesystem namesystem = nameNode.getNamesystem();
+    final FSNamesystem namesystem = getNamesystem();
     namesystem.leaseManager.setLeasePeriod(soft, hard);
     namesystem.lmthread.interrupt();
   }

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java Fri Aug 28 01:07:27 2009
@@ -78,7 +78,7 @@
 
       Thread.sleep(5000);
       FSNamesystemMetrics fsMetrics = 
-                     cluster.getNameNode().getNamesystem().getFSNamesystemMetrics();
+                     cluster.getNamesystem().getFSNamesystemMetrics();
       assertEquals(1,fsMetrics.numExpiredHeartbeats.getCurrentIntervalValue());
     }finally {
       cluster.shutdown();

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java Fri Aug 28 01:07:27 2009
@@ -34,7 +34,7 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 
 /**
  * This class tests the decommissioning of nodes.
@@ -158,7 +158,7 @@
   /*
    * decommission one random node.
    */
-  private String decommissionNode(NameNode namenode,
+  private String decommissionNode(FSNamesystem namesystem,
                                   Configuration conf,
                                   DFSClient client, 
                                   FileSystem localFileSys)
@@ -183,7 +183,7 @@
     ArrayList<String> nodes = new ArrayList<String>(decommissionedNodes);
     nodes.add(nodename);
     writeConfigFile(localFileSys, excludeFile, nodes);
-    namenode.getNamesystem().refreshNodes(conf);
+    namesystem.refreshNodes(conf);
     return nodename;
   }
 
@@ -277,7 +277,7 @@
                            replicas + " replicas.");
         checkFile(fileSys, file1, replicas);
         printFileLocations(fileSys, file1);
-        String downnode = decommissionNode(cluster.getNameNode(), conf,
+        String downnode = decommissionNode(cluster.getNamesystem(), conf,
                                            client, localFileSys);
         decommissionedNodes.add(downnode);
         waitNodeState(fileSys, downnode, NodeState.DECOMMISSIONED);

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Fri Aug 28 01:07:27 2009
@@ -34,6 +34,7 @@
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.AccessToken;
 
@@ -95,8 +96,8 @@
       DFSTestUtil.waitReplication(fs, fileName, (short)1);
 
       // get the block belonged to the created file
-      LocatedBlocks blocks = cluster.getNamesystem().getBlockLocations(
-          fileName.toString(), 0, (long)fileLen);
+      LocatedBlocks blocks = NameNodeAdapter.getBlockLocations(
+          cluster.getNameNode(), fileName.toString(), 0, (long)fileLen);
       assertEquals(blocks.locatedBlockCount(), 1);
       LocatedBlock block = blocks.get(0);
       

Added: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=808714&view=auto
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (added)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Fri Aug 28 01:07:27 2009
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+
+/**
+ * This is a utility class to expose NameNode functionality for unit tests.
+ */
+public class NameNodeAdapter {
+  /**
+   * Get the namesystem from the namenode
+   */
+  public static FSNamesystem getNamesystem(NameNode namenode) {
+    return namenode.getNamesystem();
+  }
+
+  /**
+   * Get block locations within the specified range.
+   */
+  public static LocatedBlocks getBlockLocations(NameNode namenode,
+      String src, long offset, long length) throws IOException {
+    return namenode.getNamesystem().getBlockLocations(
+        src, offset, length, false);
+  }
+}
\ No newline at end of file

Propchange: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=808714&r1=808713&r2=808714&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Fri Aug 28 01:07:27 2009
@@ -30,6 +30,7 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.namenode.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 
 /**
  * Test for metrics published by the Namenode
@@ -53,7 +54,7 @@
   protected void setUp() throws Exception {
     cluster = new MiniDFSCluster(CONF, 3, true, null);
     cluster.waitActive();
-    namesystem = cluster.getNameNode().getNamesystem();
+    namesystem = cluster.getNamesystem();
     fs = (DistributedFileSystem) cluster.getFileSystem();
     metrics = namesystem.getFSNamesystemMetrics();
   }
@@ -106,7 +107,8 @@
     createFile(file, 100, (short)2);
     
     // Corrupt first replica of the block
-    LocatedBlock block = namesystem.getBlockLocations(file, 0, 1).get(0);
+    LocatedBlock block = NameNodeAdapter.getBlockLocations(
+        cluster.getNameNode(), file, 0, 1).get(0);
     namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
     updateMetrics();
     assertEquals(1, metrics.corruptBlocks.get());
@@ -140,7 +142,8 @@
     createFile(file, 100, (short)1);
     
     // Corrupt the only replica of the block to result in a missing block
-    LocatedBlock block = namesystem.getBlockLocations(file, 0, 1).get(0);
+    LocatedBlock block = NameNodeAdapter.getBlockLocations(
+        cluster.getNameNode(), file, 0, 1).get(0);
     namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
     updateMetrics();
     assertEquals(1, metrics.underReplicatedBlocks.get());