You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2011/06/07 03:11:17 UTC

svn commit: r1132846 [1/3] - in /hadoop/hdfs/branches/HDFS-1073: ./ bin/ src/c++/libhdfs/ src/c++/libhdfs/m4/ src/c++/libhdfs/tests/ src/contrib/hdfsproxy/ src/contrib/hdfsproxy/bin/ src/docs/src/documentation/content/xdocs/ src/java/ src/java/org/apac...

Author: todd
Date: Tue Jun  7 01:11:15 2011
New Revision: 1132846

URL: http://svn.apache.org/viewvc?rev=1132846&view=rev
Log:
Merge trunk into HDFS-1073. Some TODOs left for usage of the new LayoutVersion improvements

Added:
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSClientAdapter.java
      - copied unchanged from r1132839, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClientAdapter.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
      - copied, changed from r1132839, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-22-dfs-dir.tgz
      - copied unchanged from r1132839, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-22-dfs-dir.tgz
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
      - copied unchanged from r1132839, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
Modified:
    hadoop/hdfs/branches/HDFS-1073/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/CHANGES.txt
    hadoop/hdfs/branches/HDFS-1073/bin/distribute-exclude.sh
    hadoop/hdfs/branches/HDFS-1073/bin/hdfs
    hadoop/hdfs/branches/HDFS-1073/bin/hdfs-config.sh
    hadoop/hdfs/branches/HDFS-1073/bin/refresh-namenodes.sh
    hadoop/hdfs/branches/HDFS-1073/bin/start-balancer.sh
    hadoop/hdfs/branches/HDFS-1073/bin/start-dfs.sh
    hadoop/hdfs/branches/HDFS-1073/bin/start-secure-dns.sh
    hadoop/hdfs/branches/HDFS-1073/bin/stop-balancer.sh
    hadoop/hdfs/branches/HDFS-1073/bin/stop-dfs.sh
    hadoop/hdfs/branches/HDFS-1073/bin/stop-secure-dns.sh
    hadoop/hdfs/branches/HDFS-1073/build.xml
    hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/configure.ac
    hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/m4/apsupport.m4
    hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/tests/test-libhdfs.sh
    hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/bin/hdfsproxy
    hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/bin/proxy-util
    hadoop/hdfs/branches/HDFS-1073/src/docs/src/documentation/content/xdocs/hdfs_design.xml
    hadoop/hdfs/branches/HDFS-1073/src/java/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/DFSck.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/GetConf.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
    hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUtil.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetConf.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored.xml
    hadoop/hdfs/branches/HDFS-1073/src/webapps/datanode/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/webapps/hdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/webapps/hdfs/dfsclusterhealth.xsl
    hadoop/hdfs/branches/HDFS-1073/src/webapps/secondary/   (props changed)

Propchange: hadoop/hdfs/branches/HDFS-1073/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Jun  7 01:11:15 2011
@@ -2,4 +2,4 @@
 /hadoop/hdfs/branches/HDFS-1052:987665-1095512
 /hadoop/hdfs/branches/HDFS-265:796829-820463
 /hadoop/hdfs/branches/branch-0.21:820487
-/hadoop/hdfs/trunk:1086482-1128452
+/hadoop/hdfs/trunk:1086482-1132839

Modified: hadoop/hdfs/branches/HDFS-1073/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/CHANGES.txt?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-1073/CHANGES.txt Tue Jun  7 01:11:15 2011
@@ -258,7 +258,7 @@ Trunk (unreleased changes)
     HDFS-1813. Federation: Authentication using BlockToken in RPC to datanode 
                fails. (jitendra)
 
-    HDFS_1630. Support fsedits checksum. (hairong)
+    HDFS-1630. Support fsedits checksum. (hairong)
 
     HDFS-1606. Provide a stronger data guarantee in the write pipeline by
     adding a new datanode when an existing datanode failed.  (szetszwo)
@@ -287,6 +287,12 @@ Trunk (unreleased changes)
 
   IMPROVEMENTS
 
+    HDFS-2019. Fix all the places where Java method File.list is used with
+    FileUtil.list API (Bharath Mundlapudi via mattf)
+
+    HDFS-1934. Fix NullPointerException when certain File APIs return null
+    (Bharath Mundlapudi via mattf)
+
     HDFS-1510. Added test-patch.properties required by test-patch.sh (nigel)
 
     HDFS-1628. Display full path in AccessControlException.  (John George
@@ -433,10 +439,10 @@ Trunk (unreleased changes)
     HDFS-1573. Add useful tracing information to Lease Renewer thread names
     (todd)
 
-    HDFS-1939.  In ivy.xml, test conf should not extend common conf.
+    HDFS-1939. In ivy.xml, test conf should not extend common conf.
     (Eric Yang via szetszwo)
 
-    HDFS-1332.  Include more information in exceptions and debug messages
+    HDFS-1332  Include more information in exceptions and debug messages
     when BlockPlacementPolicy cannot be satisfied.  (Ted Yu via szetszwo)
 
     HDFS-1958. Confirmation should be more lenient of user input when
@@ -445,17 +451,40 @@ Trunk (unreleased changes)
     HDFS-1905. Improve namenode -format command by not making -clusterId
     parameter mandatory. (Bharath Mundlapudi via suresh)
 
-    HDFS-1877.  Add a new test for concurrent read and write.  (CW Chung
+    HDFS-1877. Add a new test for concurrent read and write.  (CW Chung
     via szetszwo)
 
     HDFS-1959. Better error message for missing namenode directory. (eli)
 
-    HDFS-1996.  ivy: hdfs test jar should be independent to common test jar.
+    HDFS-1996. ivy: hdfs test jar should be independent to common test jar.
     (Eric Yang via szetszwo)
 
     HDFS-1812. TestHDFSCLI should clean up cluster in teardown method.
     (Uma Maheswara Rao G via todd)
 
+    HDFS-1884. Improve TestDFSStorageStateRecovery to properly throw in the
+    case of errors. (Aaron T. Myers via todd)
+
+    HDFS-1727. fsck command should display command usage if user passes any
+    illegal argument. (Sravan Kumar via todd)
+
+    HDFS-1636. If dfs.name.dir points to an empty dir, namenode format
+    shouldn't require confirmation. (Harsh J Chouraria via todd)
+
+    HDFS-1966. Encapsulate individual DataTransferProtocol op headers.
+    (szetszwo)
+
+    HDFS-2024. Format TestWriteRead source codes.  (CW Chung via szetszwo)
+
+    HDFS-1968. Enhance TestWriteRead to support position/sequential read,
+    append, truncate and verbose options.  (CW Chung via szetszwo)
+
+    HDFS-1986. Add option to get http/https address from 
+    DFSUtil#getInfoServer(). (Tanping via suresh)
+
+    HDFS-2029. In TestWriteRead, check visible length immediately after
+    openning the file and fix code style.  (John George via szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
@@ -635,6 +664,38 @@ Trunk (unreleased changes)
     HDFS-1592. Datanode startup doesn't honor volumes.tolerated. 
     (Bharath Mundlapudi via jitendra)
 
+    HDFS-1920. libhdfs does not build for ARM processors.
+    (Trevor Robinson via eli)
+
+    HDFS-1936. Layout version change from HDFS-1822 causes upgrade failure.
+    (suresh)
+
+    HDFS-2021. Update numBytesAcked before sending the ack in PacketResponder.
+    (John George via szetszwo)
+
+    HDFS-2020. Fix TestDFSUpgradeFromImage by removing the use of DataNode
+    as a singleton. (suresh via todd)
+
+    HDFS-2022. ant binary should build libhdfs. (Eric Yang via eli)
+
+    HDFS-2014. Change HDFS scripts to work in developer enviroment post
+    RPM packaging changes. (Eric Yang via suresh)
+
+    HDFS-1995. Federation: Minor bug fixes and modification cluster web UI.
+    (Tanping Wang via suresh)
+
+    HDFS-1907. Fix position read for reading still-being-written file in
+    DFSInputStream.  (John George via szetszwo)
+
+    HDFS-1923. In TestFiDataTransferProtocol2, reduce random sleep time period
+    and increase the number of datanodes.  (szetszwo)
+
+    HDFS-1149. Lease reassignment should be persisted to the edit log.
+    (Aaron T. Myers via todd)
+
+    HDFS-1998. Federation: Make refresh-namenodes.sh refresh all the
+    namenode. (Tanping Wang via suresh)
+
 Release 0.22.0 - Unreleased
 
   INCOMPATIBLE CHANGES
@@ -875,6 +936,12 @@ Release 0.22.0 - Unreleased
 
     HDFS-1957. Add documentation for HFTP. (Ari Rabkin via todd)
 
+    HDFS-1454. Update the documentation to reflect that clients don't write
+    blocks to local disk before copying to HDFS. (Harsh J Chouraria via todd)
+
+    HDFS-1980. Move build/webapps deeper in the build directory heirarchy
+    to aid eclipse users. (todd)
+
   OPTIMIZATIONS
 
     HDFS-1140. Speedup INode.getPathComponents. (Dmytro Molkov via shv)
@@ -1081,6 +1148,15 @@ Release 0.22.0 - Unreleased
     HDFS-2000. Missing deprecation for io.bytes.per.checksum.
     (Aaron T. Myers vie eli)
 
+    HDFS-977. DataNode.createInterDataNodeProtocolProxy() guards a log
+    at the wrong level. (Harsh J Chouraria via todd)
+
+    HDFS-1969. Running rollback on new-version namenode destroys the
+    namespace. (todd)
+
+    HDFS-2039. TestNameNodeMetrics uses a bad test root path, preventing it
+    from running inside Eclipse. (todd)
+
 Release 0.21.1 - Unreleased
     HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
 

Modified: hadoop/hdfs/branches/HDFS-1073/bin/distribute-exclude.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/distribute-exclude.sh?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/distribute-exclude.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/distribute-exclude.sh Tue Jun  7 01:11:15 2011
@@ -36,7 +36,11 @@
 bin=`dirname "$0"`
 bin=`cd "$bin"; pwd`
 
-. "$bin/../libexec/hdfs-config.sh"
+if [ -e "$bin/../libexec/hdfs-config.sh" ]; then
+  . "$bin/../libexec/hdfs-config.sh"
+else
+  . "$bin/hdfs-config.sh" 
+fi
 
 if [ "$1" = '' ] ; then
   "Error: please specify local exclude file as a first argument"

Modified: hadoop/hdfs/branches/HDFS-1073/bin/hdfs
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/hdfs?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/hdfs (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/hdfs Tue Jun  7 01:11:15 2011
@@ -19,7 +19,11 @@ bin=`which $0`
 bin=`dirname ${bin}`
 bin=`cd "$bin"; pwd`
 
-. "$bin"/../libexec/hdfs-config.sh
+if [ -e "$bin/../libexec/hdfs-config.sh" ]; then
+  . "$bin"/../libexec/hdfs-config.sh
+else
+  . "$bin/hdfs-config.sh"
+fi
 
 function print_usage(){
   echo "Usage: hdfs [--config confdir] COMMAND"
@@ -108,17 +112,17 @@ else
 fi
 
 # for developers, add hdfs classes to CLASSPATH
-if [ -d "$HADOOP_PREFIX/build/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/classes
+if [ -d "$HADOOP_HDFS_HOME/build/classes" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/classes
 fi
-if [ -d "$HADOOP_PREFIX/build/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build
+if [ -d "$HADOOP_HDFS_HOME/build/web/webapps" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/web
 fi
-if [ -d "$HADOOP_PREFIX/build/test/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/test/classes
+if [ -d "$HADOOP_HDFS_HOME/build/test/classes" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/test/classes
 fi
-if [ -d "$HADOOP_PREFIX/build/tools" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/tools
+if [ -d "$HADOOP_HDFS_HOME/build/tools" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/tools
 fi
 
 # for releases, add core hdfs jar & webapps to CLASSPATH

Modified: hadoop/hdfs/branches/HDFS-1073/bin/hdfs-config.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/hdfs-config.sh?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/hdfs-config.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/hdfs-config.sh Tue Jun  7 01:11:15 2011
@@ -24,9 +24,13 @@ bin=`cd "$bin"; pwd`
 
 export HADOOP_PREFIX="${HADOOP_PREFIX:-$bin/..}"
 
-if [ -d "$bin" ]; then
+if [ -e "$bin/../libexec/hadoop-config.sh" ]; then
   . $bin/../libexec/hadoop-config.sh
-elif [ -e "${HADOOP_HDFS_HOME}"/bin/hadoop-config.sh ]; then
+elif [ -e "${HADOOP_COMMON_HOME}/bin/hadoop-config.sh" ]; then
+  . "$HADOOP_COMMON_HOME"/bin/hadoop-config.sh
+elif [ -e "${HADOOP_HOME}/bin/hadoop-config.sh" ]; then
+  . "$HADOOP_HOME"/bin/hadoop-config.sh
+elif [ -e "${HADOOP_HDFS_HOME}/bin/hadoop-config.sh" ]; then
   . "$HADOOP_HDFS_HOME"/bin/hadoop-config.sh
 else
   echo "Hadoop common not found."

Modified: hadoop/hdfs/branches/HDFS-1073/bin/refresh-namenodes.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/refresh-namenodes.sh?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/refresh-namenodes.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/refresh-namenodes.sh Tue Jun  7 01:11:15 2011
@@ -23,21 +23,27 @@
 bin=`dirname "$0"`
 bin=`cd "$bin"; pwd`
 
-. "$bin/../libexec/hdfs-config.sh"
-
-namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -namenodes)
+if [ -e "$bin/../libexec/hdfs-config.sh" ]; then
+  . "$bin/../libexec/hdfs-config.sh"
+else
+  . "$bin/hdfs-config.sh"
+fi
 
-for namenode in $namenodes ; do
-  echo "Refreshing namenode [$namenode]"
-  "$HADOOP_PREFIX/bin/hdfs" dfsadmin -refreshNodes
-  if [ "$?" != '0' ] ; then errorFlag='1' ; fi
-done
+namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -nnRpcAddresses)
+if [ "$?" != '0' ] ; then errorFlag='1' ; 
+else
+  for namenode in $namenodes ; do
+    echo "Refreshing namenode [$namenode]"
+    "$HADOOP_PREFIX/bin/hdfs" dfsadmin -fs hdfs://$namenode -refreshNodes
+    if [ "$?" != '0' ] ; then errorFlag='1' ; fi
+  done
+fi
 
 if [ "$errorFlag" = '1' ] ; then
   echo "Error: refresh of namenodes failed, see error messages above."
   exit 1
 else
-  echo "Refresh of all namenodes succeeded."
+  echo "Refresh of namenodes done."
 fi
 
 

Modified: hadoop/hdfs/branches/HDFS-1073/bin/start-balancer.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/start-balancer.sh?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/start-balancer.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/start-balancer.sh Tue Jun  7 01:11:15 2011
@@ -18,7 +18,11 @@
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
-. "$bin"/../libexec/hdfs-config.sh
+if [ -e "$bin/../libexec/hdfs-config.sh" ]; then
+  . "$bin"/../libexec/hdfs-config.sh
+else
+  . "$bin/hdfs-config.sh"
+fi
 
 # Start balancer daemon.
 

Modified: hadoop/hdfs/branches/HDFS-1073/bin/start-dfs.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/start-dfs.sh?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/start-dfs.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/start-dfs.sh Tue Jun  7 01:11:15 2011
@@ -25,7 +25,11 @@ usage="Usage: start-dfs.sh [-upgrade|-ro
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
-. "$bin"/../libexec/hdfs-config.sh
+if [ -e "$bin/../libexec/hdfs-config.sh" ]; then
+  . "$bin"/../libexec/hdfs-config.sh
+else
+  . "$bin/hdfs-config.sh"
+fi
 
 # get arguments
 if [ $# -ge 1 ]; then

Modified: hadoop/hdfs/branches/HDFS-1073/bin/start-secure-dns.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/start-secure-dns.sh?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/start-secure-dns.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/start-secure-dns.sh Tue Jun  7 01:11:15 2011
@@ -22,7 +22,12 @@ usage="Usage (run as root in order to st
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
-. "$bin"/../libexec/hdfs-config.sh
+if [ -e "$bin/../libexec/hdfs-config.sh" ]; then
+  . "$bin"/../libexec/hdfs-config.sh
+else
+  . "$bin/hdfs-config.sh"
+fi
+
 
 if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
   "$HADOOP_PREFIX"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start datanode $dataStartOpt

Modified: hadoop/hdfs/branches/HDFS-1073/bin/stop-balancer.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/stop-balancer.sh?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/stop-balancer.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/stop-balancer.sh Tue Jun  7 01:11:15 2011
@@ -18,7 +18,12 @@
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
-. "$bin"/../libexec/hdfs-config.sh
+if [ -e "$bin/../libexec/hdfs-config.sh" ]; then
+  . "$bin"/../libexec/hdfs-config.sh
+else
+  . "$bin/hdfs-config.sh"
+fi
+
 
 # Stop balancer daemon.
 # Run this on the machine where the balancer is running

Modified: hadoop/hdfs/branches/HDFS-1073/bin/stop-dfs.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/stop-dfs.sh?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/stop-dfs.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/stop-dfs.sh Tue Jun  7 01:11:15 2011
@@ -18,7 +18,11 @@
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
-. "$bin"/../libexec/hdfs-config.sh
+if [ -e "$bin/../libexec/hdfs-config.sh" ]; then
+  . "$bin"/../libexec/hdfs-config.sh
+else
+  . "$bin/hdfs-config.sh"
+fi
 
 #---------------------------------------------------------
 # namenodes

Modified: hadoop/hdfs/branches/HDFS-1073/bin/stop-secure-dns.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/stop-secure-dns.sh?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/stop-secure-dns.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/stop-secure-dns.sh Tue Jun  7 01:11:15 2011
@@ -22,7 +22,11 @@ usage="Usage (run as root in order to st
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
-. "$bin"/../libexec/hdfs-config.sh
+if [ -e "$bin/../libexec/hdfs-config.sh" ]; then
+  . "$bin"/../libexec/hdfs-config.sh
+else
+  . "$bin/hdfs-config.sh"
+fi
 
 if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
   "$HADOOP_PREFIX"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop datanode

Modified: hadoop/hdfs/branches/HDFS-1073/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/build.xml?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/build.xml (original)
+++ hadoop/hdfs/branches/HDFS-1073/build.xml Tue Jun  7 01:11:15 2011
@@ -53,7 +53,8 @@
   <property name="build-fi.dir" value="${basedir}/build-fi"/>
   <property name="build.classes" value="${build.dir}/classes"/>
   <property name="build.src" value="${build.dir}/src"/>
-  <property name="build.webapps" value="${build.dir}/webapps"/>
+  <property name="build.webapps.root.dir" value="${build.dir}/web"/>
+  <property name="build.webapps" value="${build.webapps.root.dir}/webapps"/>
   <property name="build.anttasks" value="${build.dir}/ant"/>
 
   <!-- convert spaces to _ so that mac os doesn't break things -->
@@ -238,7 +239,7 @@
     <pathelement location="${test.build.extraconf}"/>
     <pathelement location="${test.hdfs.build.classes}" />
     <pathelement location="${test.src.dir}"/>
-    <pathelement location="${build.dir}"/>
+    <pathelement location="${build.webapps.root.dir}"/>
     <pathelement location="${build.tools}"/>
     <pathelement path="${clover.jar}"/>
     <path refid="ivy-test.classpath"/>
@@ -257,7 +258,7 @@
     <path refid="classpath"/>
     <pathelement location="${test.hdfs.build.classes}" />
     <pathelement location="${test.src.dir}"/>
-    <pathelement location="${build.dir}"/>
+    <pathelement location="${build.webapps.root.dir}"/>
   </path>
 
 
@@ -325,6 +326,10 @@
 
   </target>
 
+  <target name="set-c++-libhdfs">
+    <property name="libhdfs" value="true"/>
+  </target>
+
   <import file="${test.src.dir}/aop/build/aop.xml"/>
 
   <target name="compile-hdfs-classes" depends="init">
@@ -434,6 +439,7 @@
     <delete dir="${test.cache.data}"/>
     <mkdir dir="${test.cache.data}"/>
     <copy file="${test.src.dir}/hdfs/org/apache/hadoop/hdfs/hadoop-14-dfs-dir.tgz" todir="${test.cache.data}"/>
+    <copy file="${test.src.dir}/hdfs/org/apache/hadoop/hdfs/hadoop-22-dfs-dir.tgz" todir="${test.cache.data}"/>
     <copy file="${test.src.dir}/hdfs/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt" todir="${test.cache.data}"/>
     <copy file="${test.src.dir}/hdfs/org/apache/hadoop/cli/testHDFSConf.xml" todir="${test.cache.data}"/>
     <copy file="${test.src.dir}/hdfs/org/apache/hadoop/cli/clitest_data/data15bytes" todir="${test.cache.data}"/>
@@ -1096,7 +1102,7 @@
     </macro_tar>
   </target>
 
-  <target name="bin-package" depends="compile, jar, jar-test, ant-tasks, jsvc" 
+  <target name="bin-package" depends="set-c++-libhdfs, compile, compile-c++-libhdfs, jar, jar-test, ant-tasks, jsvc" 
 		description="assembles artifacts for binary target">
     <mkdir dir="${dist.dir}"/>
     <mkdir dir="${dist.dir}/lib"/>
@@ -1434,7 +1440,7 @@
      </subant>  	
   </target>
 
- <target name="test-c++-libhdfs" depends="compile-c++-libhdfs, compile-core" if="islibhdfs" unless="clover.enabled">
+ <target name="test-c++-libhdfs" depends="compile-c++-libhdfs, compile-core" if="libhdfs" unless="clover.enabled">
     <delete dir="${test.libhdfs.dir}"/>
     <mkdir dir="${test.libhdfs.dir}"/>
     <mkdir dir="${test.libhdfs.dir}/conf"/>
@@ -1458,16 +1464,7 @@
     </exec>
   </target>
 
-  <target name="check-c++-libhdfs">
-    <condition property="islibhdfs">
-      <and>
-        <isset property="compile.c++"/>
-        <isset property="libhdfs"/>
-      </and>
-    </condition>
-  </target>
-
-  <target name="check-libhdfs-configure" depends="init,check-c++-libhdfs" if="islibhdfs">
+  <target name="check-libhdfs-configure" depends="init" if="libhdfs">
     <condition property="need.libhdfs.configure">
        <not> <available file="${c++.libhdfs.src}/configure"/> </not>
     </condition>
@@ -1481,7 +1478,7 @@
     </exec>
   </target>
 
-  <target name="check-libhdfs-makefile" depends="init,check-c++-libhdfs" if="islibhdfs">
+  <target name="check-libhdfs-makefile" depends="init" if="libhdfs">
     <condition property="need.libhdfs.makefile">
        <not> <available file="${c++.libhdfs.src}/Makefile"/> </not>
     </condition>
@@ -1500,7 +1497,7 @@
     </exec>
   </target>
 
-  <target name="compile-c++-libhdfs" depends="create-libhdfs-makefile" if="islibhdfs">
+  <target name="compile-c++-libhdfs" depends="create-libhdfs-makefile" if="libhdfs">
     <exec executable="${make.cmd}" dir="${build.c++.libhdfs}" searchpath="yes"
           failonerror="yes">
       <env key="ac_cv_func_malloc_0_nonnull" value="yes"/>
@@ -1665,6 +1662,7 @@
         <library pathref="ivy-common.classpath" exported="true" />
         <library pathref="ivy-hdfs.classpath" exported="true" />
         <library pathref="ivy-test.classpath" exported="false" />
+        <library path="${build.webapps.root.dir}" exported="false" />
         <library path="${conf.dir}" exported="false" />
       </classpath>
     </eclipse>

Propchange: hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Jun  7 01:11:15 2011
@@ -1,4 +1,4 @@
 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
 /hadoop/core/trunk/src/c++/libhdfs:776175-784663
 /hadoop/hdfs/branches/HDFS-1052/src/c++/libhdfs:987665-1095512
-/hadoop/hdfs/trunk/src/c++/libhdfs:1086482-1128452
+/hadoop/hdfs/trunk/src/c++/libhdfs:1086482-1132839

Modified: hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/configure.ac
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/c%2B%2B/libhdfs/configure.ac?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/configure.ac (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/configure.ac Tue Jun  7 01:11:15 2011
@@ -82,9 +82,16 @@ AP_PROG_JAR()
 AP_JVM_LIBDIR()
 if test "$supported_os" != "darwin"
 then
-  CFLAGS="$CFLAGS -m${JVM_ARCH} -I$JAVA_HOME/include -I$JAVA_HOME/include/$supported_os"
-  LDFLAGS="$LDFLAGS -m${JVM_ARCH} -L$LIB_JVM_DIR -ljvm -Wl,-x"
+  case $host_cpu in
+  arm*) ;;
+  *)
+    CFLAGS="$CFLAGS -m${JVM_ARCH}"
+    LDFLAGS="$LDFLAGS -m${JVM_ARCH}"
+    ;;
+  esac
   AC_MSG_RESULT([VALUE OF JVM_ARCH IS :$JVM_ARCH])
+  CFLAGS="$CFLAGS -I$JAVA_HOME/include -I$JAVA_HOME/include/$supported_os"
+  LDFLAGS="$LDFLAGS -L$LIB_JVM_DIR -ljvm -Wl,-x"
 fi
 
 dnl -------------------------------------------------------------------------

Modified: hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/m4/apsupport.m4
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/c%2B%2B/libhdfs/m4/apsupport.m4?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/m4/apsupport.m4 (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/m4/apsupport.m4 Tue Jun  7 01:11:15 2011
@@ -118,6 +118,10 @@ AC_DEFUN([AP_SUPPORTED_HOST],[
     CFLAGS="$CFLAGS -DCPU=\\\"s390\\\""
     supported_os="s390"
     HOST_CPU=s390;;
+  arm*)
+    CFLAGS="$CFLAGS -DCPU=\\\"arm\\\""
+    supported_os="arm"
+    HOST_CPU=arm;;
   *)
     AC_MSG_RESULT([failed])
     AC_MSG_ERROR([Unsupported CPU architecture "$host_cpu"]);;

Modified: hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/tests/test-libhdfs.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/c%2B%2B/libhdfs/tests/test-libhdfs.sh?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/tests/test-libhdfs.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/tests/test-libhdfs.sh Tue Jun  7 01:11:15 2011
@@ -104,8 +104,8 @@ CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/to
 if [ -d "$HADOOP_PREFIX/build/classes" ]; then
   CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/classes
 fi
-if [ -d "$HADOOP_PREFIX/build/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build
+if [ -d "$HADOOP_PREFIX/build/web/webapps" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/web
 fi
 if [ -d "$HADOOP_PREFIX/build/test/classes" ]; then
   CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/test/classes

Propchange: hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Jun  7 01:11:15 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/contrib/hdfsproxy:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/contrib/hdfsproxy:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy:820487
-/hadoop/hdfs/trunk/src/contrib/hdfsproxy:1086482-1128452
+/hadoop/hdfs/trunk/src/contrib/hdfsproxy:1086482-1132839

Modified: hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/bin/hdfsproxy
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/bin/hdfsproxy?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/bin/hdfsproxy (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/bin/hdfsproxy Tue Jun  7 01:11:15 2011
@@ -84,8 +84,8 @@ CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/to
 if [ -d "$HDFSPROXY_HOME/build/classes" ]; then
   CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build/classes
 fi
-if [ -d "$HDFSPROXY_HOME/build/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build
+if [ -d "$HDFSPROXY_HOME/build/web/webapps" ]; then
+  CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build/web
 fi
 if [ -d "$HDFSPROXY_HOME/build/test/hdfs/classes" ]; then
   CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build/test/hdfs/classes

Modified: hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/bin/proxy-util
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/bin/proxy-util?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/bin/proxy-util (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/bin/proxy-util Tue Jun  7 01:11:15 2011
@@ -85,8 +85,8 @@ CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/to
 if [ -d "$HDFSPROXY_HOME/build/classes" ]; then
   CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build/classes
 fi
-if [ -d "$HDFSPROXY_HOME/build/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build
+if [ -d "$HDFSPROXY_HOME/build/web/webapps" ]; then
+  CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build/web
 fi
 if [ -d "$HDFSPROXY_HOME/build/test/hdfs/classes" ]; then
   CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build/test/hdfs/classes

Modified: hadoop/hdfs/branches/HDFS-1073/src/docs/src/documentation/content/xdocs/hdfs_design.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/docs/src/documentation/content/xdocs/hdfs_design.xml?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/docs/src/documentation/content/xdocs/hdfs_design.xml (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/docs/src/documentation/content/xdocs/hdfs_design.xml Tue Jun  7 01:11:15 2011
@@ -387,41 +387,11 @@
         </p>
       </section>
 
- 
-      <section>
-        <!-- XXX staging never described / referenced in its section -->
-        <title> Staging </title>
-        <p>
-        A client request to create a file does not reach the NameNode immediately. In fact, initially the HDFS 
-        client caches the file data into a temporary local file. Application writes are transparently redirected to 
-        this temporary local file. When the local file accumulates data worth over one HDFS block size, the 
-        client contacts the NameNode. The NameNode inserts the file name into the file system hierarchy 
-        and allocates a data block for it. The NameNode responds to the client request with the identity 
-        of the DataNode and the destination data block. Then the client flushes the block of data from the 
-        local temporary file to the specified DataNode. When a file is closed, the remaining un-flushed data 
-        in the temporary local file is transferred to the DataNode. The client then tells the NameNode that 
-        the file is closed. At this point, the NameNode commits the file creation operation into a persistent 
-        store. If the NameNode dies before the file is closed, the file is lost. 
-        </p>
-        <p>
-        The above approach has been adopted after careful consideration of target applications that run on 
-        HDFS. These applications need streaming writes to files. If a client writes to a remote file directly 
-        without any client side buffering, the network speed and the congestion in the network impacts 
-        throughput considerably. This approach is not without precedent. Earlier distributed file systems, 
-        e.g. <acronym title="Andrew File System">AFS</acronym>, have used client side caching to 
-        improve performance. A POSIX requirement has been relaxed to achieve higher performance of 
-        data uploads. 
-        </p>
-      </section>
-
       <section>
         <title> Replication Pipelining </title>
         <p>
-        When a client is writing data to an HDFS file, its data is first written to a local file as explained 
-        in the previous section. Suppose the HDFS file has a replication factor of three. When the local 
-        file accumulates a full block of user data, the client retrieves a list of DataNodes from the NameNode. 
-        This list contains the DataNodes that will host a replica of that block. The client then flushes the 
-        data block to the first DataNode. The first DataNode starts receiving the data in small portions (4 KB), 
+        When a client is writing data to an HDFS file with a replication factor of 3, the NameNode retrieves a list of DataNodes using a replication target choosing algorithm.
+        This list contains the DataNodes that will host a replica of that block. The client then writes to the first DataNode. The first DataNode starts receiving the data in small portions (4 KB), 
         writes each portion to its local repository and transfers that portion to the second DataNode in the list. 
         The second DataNode, in turn starts receiving each portion of the data block, writes that portion to its 
         repository and then flushes that portion to the third DataNode. Finally, the third DataNode writes the 

Propchange: hadoop/hdfs/branches/HDFS-1073/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Jun  7 01:11:15 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/java:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/java:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/java:820487
-/hadoop/hdfs/trunk/src/java:1086482-1128452
+/hadoop/hdfs/trunk/src/java:1086482-1132839

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSInputStream.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSInputStream.java Tue Jun  7 01:11:15 2011
@@ -306,11 +306,22 @@ public class DFSInputStream extends FSIn
       blocks = getFinalizedBlockRange(offset, length);
     }
     else {
-      if (length + offset > locatedBlocks.getFileLength()) {
+      final boolean readPastEnd = offset + length > locatedBlocks.getFileLength();
+      /* if requested length is greater than current file length
+       * then, it could possibly be from the current block being
+       * written to. First get the finalized block range and then
+       * if necessary, get the length of last block being written
+       * to.
+       */
+      if (readPastEnd)
         length = locatedBlocks.getFileLength() - offset;
-      }
+
       blocks = getFinalizedBlockRange(offset, length);
-      blocks.add(locatedBlocks.getLastLocatedBlock());
+      /* requested length is greater than what finalized blocks 
+       * have.
+       */
+      if (readPastEnd)
+        blocks.add(locatedBlocks.getLastLocatedBlock());
     }
     return blocks;
   }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSUtil.java Tue Jun  7 01:11:15 2011
@@ -444,22 +444,23 @@ public class DFSUtil {
   }
 
   /**
-   * return HTTP server info from the configuration
+   * return server http or https address from the configuration
    * @param conf
    * @param namenode - namenode address
-   * @return http server info
+   * @param httpsAddress -If true, and if security is enabled, returns server 
+   *                      https address. If false, returns server http address.
+   * @return server http or https address
    */
   public static String getInfoServer(
-      InetSocketAddress namenode, Configuration conf) {
+      InetSocketAddress namenode, Configuration conf, boolean httpsAddress) {
     String httpAddress = null;
     
-    String httpAddressKey = UserGroupInformation.isSecurityEnabled() ?
-        DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY
+    String httpAddressKey = (UserGroupInformation.isSecurityEnabled() 
+        && httpsAddress) ? DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY
         : DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
-    String httpAddressDefault = UserGroupInformation.isSecurityEnabled() ?
-        DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT 
-        :DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
-    
+    String httpAddressDefault = (UserGroupInformation.isSecurityEnabled() 
+        && httpsAddress) ? DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT
+        : DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
     if(namenode != null) {
       // if non-default namenode, try reverse look up 
       // the nameServiceID if it is available

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java Tue Jun  7 01:11:15 2011
@@ -51,10 +51,10 @@ public interface DataTransferProtocol {
    * when protocol changes. It is not very obvious. 
    */
   /*
-   * Version 24:
-   *    Remove deprecated fields.
+   * Version 25:
+   *    Encapsulate individual operation headers.
    */
-  public static final int DATA_TRANSFER_VERSION = 24;
+  public static final int DATA_TRANSFER_VERSION = 25;
 
   /** Operation */
   public enum Op {
@@ -89,7 +89,332 @@ public interface DataTransferProtocol {
     public void write(DataOutput out) throws IOException {
       out.write(code);
     }
-  };
+
+    /** Base class for all headers. */
+    private static abstract class BaseHeader implements Writable {
+      private ExtendedBlock block;
+      private Token<BlockTokenIdentifier> blockToken;
+      
+      private BaseHeader() {}
+      
+      private BaseHeader(
+          final ExtendedBlock block,
+          final Token<BlockTokenIdentifier> blockToken) {
+        this.block = block;
+        this.blockToken = blockToken;
+      }
+
+      /** @return the extended block. */
+      public final ExtendedBlock getBlock() {
+        return block;
+      }
+
+      /** @return the block token. */
+      public final Token<BlockTokenIdentifier> getBlockToken() {
+        return blockToken;
+      }
+
+      @Override
+      public void write(DataOutput out) throws IOException {
+        block.writeId(out);
+        blockToken.write(out);
+      }
+
+      @Override
+      public void readFields(DataInput in) throws IOException {
+        block = new ExtendedBlock();
+        block.readId(in);
+
+        blockToken = new Token<BlockTokenIdentifier>();
+        blockToken.readFields(in);
+      }
+    }
+
+    /** Base header for all client operation. */
+    private static abstract class ClientOperationHeader extends BaseHeader {
+      private String clientName;
+      
+      private ClientOperationHeader() {}
+      
+      private ClientOperationHeader(
+          final ExtendedBlock block,
+          final Token<BlockTokenIdentifier> blockToken,
+          final String clientName) {
+        super(block, blockToken);
+        this.clientName = clientName;
+      }
+
+      /** @return client name. */
+      public final String getClientName() {
+        return clientName;
+      }
+
+      @Override
+      public void write(DataOutput out) throws IOException {
+        super.write(out);
+        Text.writeString(out, clientName);
+      }
+
+      @Override
+      public void readFields(DataInput in) throws IOException {
+        super.readFields(in);
+        clientName = Text.readString(in);
+      }
+    }
+
+    /** {@link Op#READ_BLOCK} header. */
+    public static class ReadBlockHeader extends ClientOperationHeader {
+      private long offset;
+      private long length;
+
+      /** Default constructor */
+      public ReadBlockHeader() {}
+
+      /** Constructor with all parameters */
+      public ReadBlockHeader(
+          final ExtendedBlock blk,
+          final Token<BlockTokenIdentifier> blockToken,
+          final String clientName,
+          final long offset,
+          final long length) {
+        super(blk, blockToken, clientName);
+        this.offset = offset;
+        this.length = length;
+      }
+
+      /** @return the offset */
+      public long getOffset() {
+        return offset;
+      }
+
+      /** @return the length */
+      public long getLength() {
+        return length;
+      }
+
+      @Override
+      public void write(DataOutput out) throws IOException {
+        super.write(out);
+        out.writeLong(offset);
+        out.writeLong(length);
+      }
+
+      @Override
+      public void readFields(DataInput in) throws IOException {
+        super.readFields(in);
+        offset = in.readLong();
+        length = in.readLong();
+      }
+    }
+
+    /** {@link Op#WRITE_BLOCK} header. */
+    public static class WriteBlockHeader extends ClientOperationHeader {
+      private DatanodeInfo[] targets;
+
+      private DatanodeInfo source;
+      private BlockConstructionStage stage;
+      private int pipelineSize;
+      private long minBytesRcvd;
+      private long maxBytesRcvd;
+      private long latestGenerationStamp;
+      
+      /** Default constructor */
+      public WriteBlockHeader() {}
+
+      /** Constructor with all parameters */
+      public WriteBlockHeader(
+          final ExtendedBlock blk,
+          final Token<BlockTokenIdentifier> blockToken,
+          final String clientName,
+          final DatanodeInfo[] targets,
+          final DatanodeInfo source,
+          final BlockConstructionStage stage,
+          final int pipelineSize,
+          final long minBytesRcvd,
+          final long maxBytesRcvd,
+          final long latestGenerationStamp
+          ) throws IOException {
+        super(blk, blockToken, clientName);
+        this.targets = targets;
+        this.source = source;
+        this.stage = stage;
+        this.pipelineSize = pipelineSize;
+        this.minBytesRcvd = minBytesRcvd;
+        this.maxBytesRcvd = maxBytesRcvd;
+        this.latestGenerationStamp = latestGenerationStamp;
+      }
+
+      /** @return targets. */
+      public DatanodeInfo[] getTargets() {
+        return targets;
+      }
+
+      /** @return the source */
+      public DatanodeInfo getSource() {
+        return source;
+      }
+
+      /** @return the stage */
+      public BlockConstructionStage getStage() {
+        return stage;
+      }
+
+      /** @return the pipeline size */
+      public int getPipelineSize() {
+        return pipelineSize;
+      }
+
+      /** @return the minimum bytes received. */
+      public long getMinBytesRcvd() {
+        return minBytesRcvd;
+      }
+
+      /** @return the maximum bytes received. */
+      public long getMaxBytesRcvd() {
+        return maxBytesRcvd;
+      }
+
+      /** @return the latest generation stamp */
+      public long getLatestGenerationStamp() {
+        return latestGenerationStamp;
+      }
+
+      @Override
+      public void write(DataOutput out) throws IOException {
+        super.write(out);
+        Sender.write(out, 1, targets);
+
+        out.writeBoolean(source != null);
+        if (source != null) {
+          source.write(out);
+        }
+
+        stage.write(out);
+        out.writeInt(pipelineSize);
+        WritableUtils.writeVLong(out, minBytesRcvd);
+        WritableUtils.writeVLong(out, maxBytesRcvd);
+        WritableUtils.writeVLong(out, latestGenerationStamp);
+      }
+
+      @Override
+      public void readFields(DataInput in) throws IOException {
+        super.readFields(in);
+        targets = Receiver.readDatanodeInfos(in);
+
+        source = in.readBoolean()? DatanodeInfo.read(in): null;
+        stage = BlockConstructionStage.readFields(in);
+        pipelineSize = in.readInt(); // num of datanodes in entire pipeline
+        minBytesRcvd = WritableUtils.readVLong(in);
+        maxBytesRcvd = WritableUtils.readVLong(in);
+        latestGenerationStamp = WritableUtils.readVLong(in);
+      }
+    }
+
+    /** {@link Op#TRANSFER_BLOCK} header. */
+    public static class TransferBlockHeader extends ClientOperationHeader {
+      private DatanodeInfo[] targets;
+
+      /** Default constructor */
+      public TransferBlockHeader() {}
+
+      /** Constructor with all parameters */
+      public TransferBlockHeader(
+          final ExtendedBlock blk,
+          final Token<BlockTokenIdentifier> blockToken,
+          final String clientName,
+          final DatanodeInfo[] targets) throws IOException {
+        super(blk, blockToken, clientName);
+        this.targets = targets;
+      }
+
+      /** @return targets. */
+      public DatanodeInfo[] getTargets() {
+        return targets;
+      }
+
+      @Override
+      public void write(DataOutput out) throws IOException {
+        super.write(out);
+        Sender.write(out, 0, targets);
+      }
+
+      @Override
+      public void readFields(DataInput in) throws IOException {
+        super.readFields(in);
+        targets = Receiver.readDatanodeInfos(in);
+      }
+    }
+
+    /** {@link Op#REPLACE_BLOCK} header. */
+    public static class ReplaceBlockHeader extends BaseHeader {
+      private String delHint;
+      private DatanodeInfo source;
+
+      /** Default constructor */
+      public ReplaceBlockHeader() {}
+
+      /** Constructor with all parameters */
+      public ReplaceBlockHeader(final ExtendedBlock blk,
+          final Token<BlockTokenIdentifier> blockToken,
+          final String storageId,
+          final DatanodeInfo src) throws IOException {
+        super(blk, blockToken);
+        this.delHint = storageId;
+        this.source = src;
+      }
+
+      /** @return delete-hint. */
+      public String getDelHint() {
+        return delHint;
+      }
+
+      /** @return source datanode. */
+      public DatanodeInfo getSource() {
+        return source;
+      }
+
+      @Override
+      public void write(DataOutput out) throws IOException {
+        super.write(out);
+        Text.writeString(out, delHint);
+        source.write(out);
+      }
+
+      @Override
+      public void readFields(DataInput in) throws IOException {
+        super.readFields(in);
+        delHint = Text.readString(in);
+        source = DatanodeInfo.read(in);
+      }
+    }
+
+    /** {@link Op#COPY_BLOCK} header. */
+    public static class CopyBlockHeader extends BaseHeader {
+      /** Default constructor */
+      public CopyBlockHeader() {}
+
+      /** Constructor with all parameters */
+      public CopyBlockHeader(
+          final ExtendedBlock block,
+          final Token<BlockTokenIdentifier> blockToken) {
+        super(block, blockToken);
+      }
+    }
+
+    /** {@link Op#BLOCK_CHECKSUM} header. */
+    public static class BlockChecksumHeader extends BaseHeader {
+      /** Default constructor */
+      public BlockChecksumHeader() {}
+
+      /** Constructor with all parameters */
+      public BlockChecksumHeader(
+          final ExtendedBlock block,
+          final Token<BlockTokenIdentifier> blockToken) {
+        super(block, blockToken);
+      }
+    }
+  }
+
 
   /** Status */
   public enum Status {
@@ -189,24 +514,27 @@ public interface DataTransferProtocol {
   @InterfaceStability.Evolving
   public static class Sender {
     /** Initialize a operation. */
-    public static void op(DataOutputStream out, Op op) throws IOException {
+    private static void op(final DataOutput out, final Op op
+        ) throws IOException {
       out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
       op.write(out);
     }
 
+    /** Send an operation request. */
+    private static void send(final DataOutputStream out, final Op opcode,
+        final Op.BaseHeader parameters) throws IOException {
+      op(out, opcode);
+      parameters.write(out);
+      out.flush();
+    }
+
     /** Send OP_READ_BLOCK */
     public static void opReadBlock(DataOutputStream out, ExtendedBlock blk,
         long blockOffset, long blockLen, String clientName,
         Token<BlockTokenIdentifier> blockToken)
         throws IOException {
-      op(out, Op.READ_BLOCK);
-
-      blk.writeId(out);
-      out.writeLong(blockOffset);
-      out.writeLong(blockLen);
-      Text.writeString(out, clientName);
-      blockToken.write(out);
-      out.flush();
+      send(out, Op.READ_BLOCK, new Op.ReadBlockHeader(blk, blockToken,
+          clientName, blockOffset, blockLen));
     }
     
     /** Send OP_WRITE_BLOCK */
@@ -215,74 +543,43 @@ public interface DataTransferProtocol {
         long minBytesRcvd, long maxBytesRcvd, String client, DatanodeInfo src,
         DatanodeInfo[] targets, Token<BlockTokenIdentifier> blockToken)
         throws IOException {
-      op(out, Op.WRITE_BLOCK);
-
-      blk.writeId(out);
-      out.writeInt(pipelineSize);
-      stage.write(out);
-      WritableUtils.writeVLong(out, newGs);
-      WritableUtils.writeVLong(out, minBytesRcvd);
-      WritableUtils.writeVLong(out, maxBytesRcvd);
-      Text.writeString(out, client);
-
-      out.writeBoolean(src != null);
-      if (src != null) {
-        src.write(out);
-      }
-      write(out, 1, targets);
-      blockToken.write(out);
+      send(out, Op.WRITE_BLOCK, new Op.WriteBlockHeader(blk, blockToken,
+          client, targets, src, stage, pipelineSize, minBytesRcvd, maxBytesRcvd,
+          newGs));
     }
 
     /** Send {@link Op#TRANSFER_BLOCK} */
     public static void opTransferBlock(DataOutputStream out, ExtendedBlock blk,
         String client, DatanodeInfo[] targets,
         Token<BlockTokenIdentifier> blockToken) throws IOException {
-      op(out, Op.TRANSFER_BLOCK);
-
-      blk.writeId(out);
-      Text.writeString(out, client);
-      write(out, 0, targets);
-      blockToken.write(out);
-      out.flush();
+      send(out, Op.TRANSFER_BLOCK, new Op.TransferBlockHeader(blk, blockToken,
+          client, targets));
     }
 
     /** Send OP_REPLACE_BLOCK */
     public static void opReplaceBlock(DataOutputStream out,
-        ExtendedBlock blk, String storageId, DatanodeInfo src,
+        ExtendedBlock blk, String delHint, DatanodeInfo src,
         Token<BlockTokenIdentifier> blockToken) throws IOException {
-      op(out, Op.REPLACE_BLOCK);
-
-      blk.writeId(out);
-      Text.writeString(out, storageId);
-      src.write(out);
-      blockToken.write(out);
-      out.flush();
+      send(out, Op.REPLACE_BLOCK, new Op.ReplaceBlockHeader(blk, blockToken,
+          delHint, src));
     }
 
     /** Send OP_COPY_BLOCK */
     public static void opCopyBlock(DataOutputStream out, ExtendedBlock blk,
         Token<BlockTokenIdentifier> blockToken)
         throws IOException {
-      op(out, Op.COPY_BLOCK);
-
-      blk.writeId(out);
-      blockToken.write(out);
-      out.flush();
+      send(out, Op.COPY_BLOCK, new Op.CopyBlockHeader(blk, blockToken));
     }
 
     /** Send OP_BLOCK_CHECKSUM */
     public static void opBlockChecksum(DataOutputStream out, ExtendedBlock blk,
         Token<BlockTokenIdentifier> blockToken)
         throws IOException {
-      op(out, Op.BLOCK_CHECKSUM);
-      
-      blk.writeId(out);
-      blockToken.write(out);
-      out.flush();
+      send(out, Op.BLOCK_CHECKSUM, new Op.BlockChecksumHeader(blk, blockToken));
     }
 
     /** Write an array of {@link DatanodeInfo} */
-    private static void write(final DataOutputStream out,
+    private static void write(final DataOutput out,
         final int start, 
         final DatanodeInfo[] datanodeinfos) throws IOException {
       out.writeInt(datanodeinfos.length - start);
@@ -334,14 +631,10 @@ public interface DataTransferProtocol {
 
     /** Receive OP_READ_BLOCK */
     private void opReadBlock(DataInputStream in) throws IOException {
-      final ExtendedBlock blk = new ExtendedBlock();
-      blk.readId(in);
-      final long offset = in.readLong();
-      final long length = in.readLong();
-      final String client = Text.readString(in);
-      final Token<BlockTokenIdentifier> blockToken = readBlockToken(in);
-
-      opReadBlock(in, blk, offset, length, client, blockToken);
+      final Op.ReadBlockHeader h = new Op.ReadBlockHeader();
+      h.readFields(in);
+      opReadBlock(in, h.getBlock(), h.getOffset(), h.getLength(),
+          h.getClientName(), h.getBlockToken());
     }
 
     /**
@@ -353,22 +646,12 @@ public interface DataTransferProtocol {
     
     /** Receive OP_WRITE_BLOCK */
     private void opWriteBlock(DataInputStream in) throws IOException {
-      final ExtendedBlock blk = new ExtendedBlock();
-      blk.readId(in);
-      final int pipelineSize = in.readInt(); // num of datanodes in entire pipeline
-      final BlockConstructionStage stage = 
-        BlockConstructionStage.readFields(in);
-      final long newGs = WritableUtils.readVLong(in);
-      final long minBytesRcvd = WritableUtils.readVLong(in);
-      final long maxBytesRcvd = WritableUtils.readVLong(in);
-      final String client = Text.readString(in); // working on behalf of this client
-      final DatanodeInfo src = in.readBoolean()? DatanodeInfo.read(in): null;
-
-      final DatanodeInfo targets[] = readDatanodeInfos(in);
-      final Token<BlockTokenIdentifier> blockToken = readBlockToken(in);
-
-      opWriteBlock(in, blk, pipelineSize, stage,
-          newGs, minBytesRcvd, maxBytesRcvd, client, src, targets, blockToken);
+      final Op.WriteBlockHeader h = new Op.WriteBlockHeader();
+      h.readFields(in);
+      opWriteBlock(in, h.getBlock(), h.getPipelineSize(), h.getStage(),
+          h.getLatestGenerationStamp(),
+          h.getMinBytesRcvd(), h.getMaxBytesRcvd(),
+          h.getClientName(), h.getSource(), h.getTargets(), h.getBlockToken());
     }
 
     /**
@@ -383,13 +666,10 @@ public interface DataTransferProtocol {
 
     /** Receive {@link Op#TRANSFER_BLOCK} */
     private void opTransferBlock(DataInputStream in) throws IOException {
-      final ExtendedBlock blk = new ExtendedBlock();
-      blk.readId(in);
-      final String client = Text.readString(in);
-      final DatanodeInfo targets[] = readDatanodeInfos(in);
-      final Token<BlockTokenIdentifier> blockToken = readBlockToken(in);
-
-      opTransferBlock(in, blk, client, targets, blockToken);
+      final Op.TransferBlockHeader h = new Op.TransferBlockHeader();
+      h.readFields(in);
+      opTransferBlock(in, h.getBlock(), h.getClientName(), h.getTargets(),
+          h.getBlockToken());
     }
 
     /**
@@ -404,13 +684,10 @@ public interface DataTransferProtocol {
 
     /** Receive OP_REPLACE_BLOCK */
     private void opReplaceBlock(DataInputStream in) throws IOException {
-      final ExtendedBlock blk = new ExtendedBlock();
-      blk.readId(in);
-      final String sourceId = Text.readString(in); // read del hint
-      final DatanodeInfo src = DatanodeInfo.read(in); // read proxy source
-      final Token<BlockTokenIdentifier> blockToken = readBlockToken(in);
-
-      opReplaceBlock(in, blk, sourceId, src, blockToken);
+      final Op.ReplaceBlockHeader h = new Op.ReplaceBlockHeader();
+      h.readFields(in);
+      opReplaceBlock(in, h.getBlock(), h.getDelHint(), h.getSource(),
+          h.getBlockToken());
     }
 
     /**
@@ -418,16 +695,14 @@ public interface DataTransferProtocol {
      * It is used for balancing purpose; send to a destination
      */
     protected abstract void opReplaceBlock(DataInputStream in,
-        ExtendedBlock blk, String sourceId, DatanodeInfo src,
+        ExtendedBlock blk, String delHint, DatanodeInfo src,
         Token<BlockTokenIdentifier> blockToken) throws IOException;
 
     /** Receive OP_COPY_BLOCK */
     private void opCopyBlock(DataInputStream in) throws IOException {
-      final ExtendedBlock blk = new ExtendedBlock();
-      blk.readId(in);
-      final Token<BlockTokenIdentifier> blockToken = readBlockToken(in);
-
-      opCopyBlock(in, blk, blockToken);
+      final Op.CopyBlockHeader h = new Op.CopyBlockHeader();
+      h.readFields(in);
+      opCopyBlock(in, h.getBlock(), h.getBlockToken());
     }
 
     /**
@@ -440,11 +715,9 @@ public interface DataTransferProtocol {
 
     /** Receive OP_BLOCK_CHECKSUM */
     private void opBlockChecksum(DataInputStream in) throws IOException {
-      final ExtendedBlock blk = new ExtendedBlock();
-      blk.readId(in);
-      final Token<BlockTokenIdentifier> blockToken = readBlockToken(in);
-
-      opBlockChecksum(in, blk, blockToken);
+      final Op.BlockChecksumHeader h = new Op.BlockChecksumHeader();
+      h.readFields(in);
+      opBlockChecksum(in, h.getBlock(), h.getBlockToken());
     }
 
     /**
@@ -456,7 +729,7 @@ public interface DataTransferProtocol {
         throws IOException;
 
     /** Read an array of {@link DatanodeInfo} */
-    private static DatanodeInfo[] readDatanodeInfos(final DataInputStream in
+    private static DatanodeInfo[] readDatanodeInfos(final DataInput in
         ) throws IOException {
       final int n = in.readInt();
       if (n < 0) {
@@ -469,14 +742,6 @@ public interface DataTransferProtocol {
       }
       return datanodeinfos;
     }
-
-    /** Read an AccessToken */
-    static private Token<BlockTokenIdentifier> readBlockToken(DataInputStream in
-        ) throws IOException {
-      final Token<BlockTokenIdentifier> t = new Token<BlockTokenIdentifier>();
-      t.readFields(in);
-      return t; 
-    }
   }
   
   /** reply **/

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java Tue Jun  7 01:11:15 2011
@@ -87,19 +87,16 @@ public interface FSConstants {
    */
   public static final String HDFS_URI_SCHEME = "hdfs";
 
-  // Version is reflected in the dfs image and edit log files.
-  // Version is reflected in the data storage file.
-  // Versions are negative.
-  // Decrement LAYOUT_VERSION to define a new version.
-  public static final int LAYOUT_VERSION = -37;
-  // Current version: 
-  // -35: Adding support for block pools and multiple namenodes
-  // -36: persistent transaction IDs
-  // -37: file names based on txids
+  /**
+   * Please see {@link LayoutVersion} on adding new layout version.
+   */
+  public static final int LAYOUT_VERSION = 
+    LayoutVersion.getCurrentLayoutVersion();
 
-  // Record of version numbers for specific changes:
+  // TODO: remove these in favor of LayoutVersion calls
   // Version where the edits log and image stored txn ID information
-  public static final int FIRST_STORED_TXIDS_VERSION = -36;
+  public static final int FIRST_STORED_TXIDS_VERSION = -37;
   // Version where the edits log and image file names are based on txn IDs
-  public static final int FIRST_TXNID_BASED_LAYOUT_VERSION = -37;
+  public static final int FIRST_TXNID_BASED_LAYOUT_VERSION = -38;
+
 }

Copied: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java (from r1132839, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java)
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java?p2=hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java&p1=hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java&r1=1132839&r2=1132846&rev=1132846&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java Tue Jun  7 01:11:15 2011
@@ -78,7 +78,9 @@ public class LayoutVersion {
     RESERVED_REL22(-33, -27, "Reserved for release 0.22"),
     RESERVED_REL23(-34, -30, "Reserved for release 0.23"),
     FEDERATION(-35, "Support for namenode federation"),
-    LEASE_REASSIGNMENT(-36, "Support for persisting lease holder reassignment");
+    LEASE_REASSIGNMENT(-36, "Support for persisting lease holder reassignment"),
+    STORED_TXIDS(-37, "Transaction IDs are stored in edits log and image files"),
+    TXID_BASED_LAYOUT(-38, "File names in NN Storage are based on transaction IDs");
     
     final int lv;
     final int ancestorLV;

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java Tue Jun  7 01:11:15 2011
@@ -172,5 +172,8 @@ public interface HdfsConstants {
      */
     COMMITTED;
   }
+  
+  public static final String NAMENODE_LEASE_HOLDER = "HDFS_NameNode";
+  public static final long NAMENODE_LEASE_RECHECK_INTERVAL = 2000;
 }
 

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/Storage.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/Storage.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/Storage.java Tue Jun  7 01:11:15 2011
@@ -33,6 +33,8 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.fs.FileUtil;
@@ -75,12 +77,6 @@ public abstract class Storage extends St
    * any upgrade code that uses this constant should also be removed. */
   public static final int PRE_GENERATIONSTAMP_LAYOUT_VERSION = -13;
   
-  // last layout version that did not support persistent rbw replicas
-  public static final int PRE_RBW_LAYOUT_VERSION = -19;
-  
-  // last layout version that is before federation
-  public static final int LAST_PRE_FEDERATION_LAYOUT_VERSION = -30;
-  
   /** Layout versions of 0.20.203 release */
   public static final int[] LAYOUT_VERSIONS_203 = {-19, -31};
 
@@ -796,8 +792,8 @@ public abstract class Storage extends St
     props.setProperty("layoutVersion", String.valueOf(layoutVersion));
     props.setProperty("storageType", storageType.toString());
     props.setProperty("namespaceID", String.valueOf(namespaceID));
-    // Set clusterID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
-    if (layoutVersion < LAST_PRE_FEDERATION_LAYOUT_VERSION) {
+    // Set clusterID in version with federation support
+    if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
       props.setProperty("clusterID", clusterID);
     }
     props.setProperty("cTime", String.valueOf(cTime));
@@ -892,7 +888,7 @@ public abstract class Storage extends St
     String property = props.getProperty(name);
     if (property == null) {
       throw new InconsistentFSStateException(sd.root, "file "
-          + STORAGE_FILE_VERSION + " has " + name + " mising.");
+          + STORAGE_FILE_VERSION + " has " + name + " missing.");
     }
     return property;
   }
@@ -917,8 +913,8 @@ public abstract class Storage extends St
   /** Validate and set clusterId from {@link Properties}*/
   protected void setClusterId(Properties props, int layoutVersion,
       StorageDirectory sd) throws InconsistentFSStateException {
-    // No Cluster ID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
-    if (layoutVersion < Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION) {
+    // Set cluster ID in version that supports federation
+    if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
       String cid = getProperty(props, sd, "clusterID");
       if (!(clusterID.equals("") || cid.equals("") || clusterID.equals(cid))) {
         throw new InconsistentFSStateException(sd.getRoot(),

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java Tue Jun  7 01:11:15 2011
@@ -28,8 +28,11 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
@@ -61,10 +64,6 @@ public class BlockPoolSliceStorage exten
 
   private String blockpoolID = ""; // id of the blockpool
 
-  BlockPoolSliceStorage() {
-    super(NodeType.DATA_NODE);
-  }
-
   public BlockPoolSliceStorage(StorageInfo storageInfo, String bpid) {
     super(NodeType.DATA_NODE, storageInfo);
     blockpoolID = bpid;
@@ -82,13 +81,14 @@ public class BlockPoolSliceStorage exten
   /**
    * Analyze storage directories. Recover from previous transitions if required.
    * 
+   * @param datanode Datanode to which this storage belongs to
    * @param nsInfo namespace information
    * @param dataDirs storage directories of block pool
    * @param startOpt startup option
    * @throws IOException on error
    */
-  void recoverTransitionRead(NamespaceInfo nsInfo, Collection<File> dataDirs,
-      StartupOption startOpt) throws IOException {
+  void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo,
+      Collection<File> dataDirs, StartupOption startOpt) throws IOException {
     assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() 
         : "Block-pool and name-node layout versions must be the same.";
 
@@ -138,7 +138,7 @@ public class BlockPoolSliceStorage exten
     // During startup some of them can upgrade or roll back
     // while others could be up-to-date for the regular startup.
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
-      doTransition(getStorageDir(idx), nsInfo, startOpt);
+      doTransition(datanode, getStorageDir(idx), nsInfo, startOpt);
       assert getLayoutVersion() == nsInfo.getLayoutVersion() 
           : "Data-node and name-node layout versions must be the same.";
       assert getCTime() == nsInfo.getCTime() 
@@ -226,12 +226,13 @@ public class BlockPoolSliceStorage exten
    * Upgrade if this.LV > LAYOUT_VERSION || this.cTime < namenode.cTime Regular
    * startup if this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime
    * 
-   * @param sd storage directory,
+   * @param dn DataNode to which this storage belongs to
+   * @param sd storage directory <SD>/current/<bpid>
    * @param nsInfo namespace info
    * @param startOpt startup option
    * @throws IOException
    */
-  private void doTransition(StorageDirectory sd, // i.e. <SD>/current/<bpid>
+  private void doTransition(DataNode datanode, StorageDirectory sd,
       NamespaceInfo nsInfo, StartupOption startOpt) throws IOException {
     if (startOpt == StartupOption.ROLLBACK)
       doRollback(sd, nsInfo); // rollback if applicable
@@ -257,7 +258,9 @@ public class BlockPoolSliceStorage exten
       return; // regular startup
     
     // verify necessity of a distributed upgrade
-    verifyDistributedUpgradeProgress(nsInfo);
+    UpgradeManagerDatanode um = 
+      datanode.getUpgradeManagerDatanode(nsInfo.getBlockPoolID());
+    verifyDistributedUpgradeProgress(um, nsInfo);
     if (this.layoutVersion > FSConstants.LAYOUT_VERSION
         || this.cTime < nsInfo.getCTime()) {
       doUpgrade(sd, nsInfo); // upgrade
@@ -291,7 +294,7 @@ public class BlockPoolSliceStorage exten
    */
   void doUpgrade(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException {
     // Upgrading is applicable only to release with federation or after
-    if (!(this.getLayoutVersion() < LAST_PRE_FEDERATION_LAYOUT_VERSION)) {
+    if (!LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
       return;
     }
     LOG.info("Upgrading block pool storage directory " + bpSd.getRoot()
@@ -346,10 +349,10 @@ public class BlockPoolSliceStorage exten
    * @throws IOException if the directory is not empty or it can not be removed
    */
   private void cleanupDetachDir(File detachDir) throws IOException {
-    if (layoutVersion >= PRE_RBW_LAYOUT_VERSION && detachDir.exists()
-        && detachDir.isDirectory()) {
+    if (!LayoutVersion.supports(Feature.APPEND_RBW_DIR, layoutVersion)
+        && detachDir.exists() && detachDir.isDirectory()) {
 
-      if (detachDir.list().length != 0) {
+      if (FileUtil.list(detachDir).length != 0) {
         throw new IOException("Detached directory " + detachDir
             + " is not empty. Please manually move each file under this "
             + "directory to the finalized directory if the finalized "
@@ -472,10 +475,8 @@ public class BlockPoolSliceStorage exten
     LOG.info( hardLink.linkStats.report() );
   }
 
-  private void verifyDistributedUpgradeProgress(NamespaceInfo nsInfo)
-      throws IOException {
-    UpgradeManagerDatanode um = 
-      DataNode.getUpgradeManagerDatanode(nsInfo.getBlockPoolID());
+  private void verifyDistributedUpgradeProgress(UpgradeManagerDatanode um,
+      NamespaceInfo nsInfo) throws IOException {
     assert um != null : "DataNode.upgradeManager is null.";
     um.setUpgradeState(false, getLayoutVersion());
     um.initializeUpgrade(nsInfo);

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Tue Jun  7 01:11:15 2011
@@ -1009,6 +1009,10 @@ class BlockReceiver implements Closeable
             }
             PipelineAck replyAck = new PipelineAck(expected, replies);
             
+            if (replyAck.isSuccess() && 
+                 pkt.offsetInBlock > replicaInfo.getBytesAcked())
+                replicaInfo.setBytesAcked(pkt.offsetInBlock);
+
             // send my ack back to upstream datanode
             replyAck.write(upstreamOut);
             upstreamOut.flush();
@@ -1019,10 +1023,6 @@ class BlockReceiver implements Closeable
               // remove the packet from the ack queue
               removeAckHead();
               // update bytes acked
-              if (replyAck.isSuccess() && 
-                  pkt.offsetInBlock > replicaInfo.getBytesAcked()) {
-                replicaInfo.setBytesAcked(pkt.offsetInBlock);
-              }
             }
         } catch (IOException e) {
           LOG.warn("IOException in BlockReceiver.run(): ", e);

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java Tue Jun  7 01:11:15 2011
@@ -29,7 +29,6 @@ import javax.servlet.http.HttpServletRes
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode.BPOfferService;
@@ -101,7 +100,7 @@ public class DataBlockScanner implements
   private void waitForInit(String bpid) {
     UpgradeManagerDatanode um = null;
     if(bpid != null && !bpid.equals(""))
-      um = DataNode.getUpgradeManagerDatanode(bpid);
+      um = datanode.getUpgradeManagerDatanode(bpid);
     
     while ((um != null && ! um.isUpgradeCompleted())
         || (getBlockPoolSetSize() < datanode.getAllBpOs().length)

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Tue Jun  7 01:11:15 2011
@@ -37,7 +37,6 @@ import java.net.URI;
 import java.net.UnknownHostException;
 import java.nio.channels.ServerSocketChannel;
 import java.nio.channels.SocketChannel;
-import java.security.NoSuchAlgorithmException;
 import java.security.PrivilegedExceptionAction;
 import java.security.SecureRandom;
 import java.util.AbstractList;
@@ -348,12 +347,12 @@ public class DataNode extends Configured
   boolean resetBlockReportTime = true;
   long initialBlockReportDelay = BLOCKREPORT_INITIAL_DELAY * 1000L;
   long heartBeatInterval;
+  private boolean heartbeatsDisabledForTests = false;
   private DataStorage storage = null;
   private HttpServer infoServer = null;
   DataNodeMetrics metrics;
   private InetSocketAddress selfAddr;
   
-  private static volatile DataNode datanodeObject = null;
   private volatile String hostName; // Host name of this datanode
   
   private static String dnThreadName;
@@ -398,8 +397,6 @@ public class DataNode extends Configured
            final SecureResources resources) throws IOException {
     super(conf);
 
-    DataNode.setDataNode(this);
-    
     try {
       hostName = getHostName(conf);
       startDataNode(conf, dataDirs, resources);
@@ -590,7 +587,7 @@ public class DataNode extends Configured
       reason = "verification is supported only with FSDataset";
     } 
     if (reason == null) {
-      directoryScanner = new DirectoryScanner((FSDataset) data, conf);
+      directoryScanner = new DirectoryScanner(this, (FSDataset) data, conf);
       directoryScanner.start();
     } else {
       LOG.info("Periodic Directory Tree Verification scan is disabled because " +
@@ -648,6 +645,12 @@ public class DataNode extends Configured
     bpos.reportBadBlocks(block);
   }
   
+  // used only for testing
+  void setHeartbeatsDisabledForTests(
+      boolean heartbeatsDisabledForTests) {
+    this.heartbeatsDisabledForTests = heartbeatsDisabledForTests;
+  }
+  
   /**
    * A thread per namenode to perform:
    * <ul>
@@ -793,7 +796,8 @@ public class DataNode extends Configured
         bpRegistration.storageInfo.clusterID = bpNSInfo.clusterID;
       } else {
         // read storage info, lock data dirs and transition fs state if necessary          
-        storage.recoverTransitionRead(blockPoolId, bpNSInfo, dataDirs, startOpt);
+        storage.recoverTransitionRead(DataNode.this, blockPoolId, bpNSInfo,
+            dataDirs, startOpt);
         LOG.info("setting up storage: nsid=" + storage.namespaceID + ";bpid="
             + blockPoolId + ";lv=" + storage.layoutVersion + ";nsInfo="
             + bpNSInfo);
@@ -1037,10 +1041,12 @@ public class DataNode extends Configured
             // -- Bytes remaining
             //
             lastHeartbeat = startTime;
-            DatanodeCommand[] cmds = sendHeartBeat();
-            metrics.addHeartbeat(now() - startTime);
-            if (!processCommand(cmds))
-              continue;
+            if (!heartbeatsDisabledForTests) {
+              DatanodeCommand[] cmds = sendHeartBeat();
+              metrics.addHeartbeat(now() - startTime);
+              if (!processCommand(cmds))
+                continue;
+            }
           }
 
           reportReceivedBlocks();
@@ -1324,7 +1330,7 @@ public class DataNode extends Configured
     synchronized UpgradeManagerDatanode getUpgradeManager() {
       if(upgradeManager == null)
         upgradeManager = 
-          new UpgradeManagerDatanode(DataNode.getDataNode(), blockPoolId);
+          new UpgradeManagerDatanode(DataNode.this, blockPoolId);
       
       return upgradeManager;
     }
@@ -1403,7 +1409,7 @@ public class DataNode extends Configured
       conf.getBoolean("dfs.datanode.simulateddatastorage", false);
 
     if (simulatedFSDataset) {
-      storage.createStorageID();
+      storage.createStorageID(getPort());
       // it would have been better to pass storage as a parameter to
       // constructor below - need to augment ReflectionUtils used below.
       conf.set(DFS_DATANODE_STORAGEID_KEY, getStorageId());
@@ -1416,7 +1422,7 @@ public class DataNode extends Configured
         throw new IOException(StringUtils.stringifyException(e));
       }
     } else {
-      data = new FSDataset(storage, conf);
+      data = new FSDataset(this, storage, conf);
     }
   }
 
@@ -1491,22 +1497,13 @@ public class DataNode extends Configured
            SocketChannel.open().socket() : new Socket();                                   
   }
 
-  private static void setDataNode(DataNode node) {
-    datanodeObject = node;
-  }
-
-  /** Return the DataNode object */
-  public static DataNode getDataNode() {
-    return datanodeObject;
-  } 
-
   public static InterDatanodeProtocol createInterDataNodeProtocolProxy(
       DatanodeID datanodeid, final Configuration conf, final int socketTimeout)
     throws IOException {
     final InetSocketAddress addr = NetUtils.createSocketAddr(
         datanodeid.getHost() + ":" + datanodeid.getIpcPort());
     if (InterDatanodeProtocol.LOG.isDebugEnabled()) {
-      InterDatanodeProtocol.LOG.info("InterDatanodeProtocol addr=" + addr);
+      InterDatanodeProtocol.LOG.debug("InterDatanodeProtocol addr=" + addr);
     }
     UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
     try {
@@ -1551,11 +1548,7 @@ public class DataNode extends Configured
     dnId.storageID = createNewStorageId(dnId.getPort());
   }
   
-  static String createNewStorageId() {
-    return createNewStorageId(datanodeObject.getPort());
-  }
-  
-  private static String createNewStorageId(int port) {
+  static String createNewStorageId(int port) {
     /* Return 
      * "DS-randInt-ipaddr-currentTimeMillis"
      * It is considered extermely rare for all these numbers to match
@@ -1728,9 +1721,8 @@ public class DataNode extends Configured
     return threadGroup == null ? 0 : threadGroup.activeCount();
   }
     
-  static UpgradeManagerDatanode getUpgradeManagerDatanode(String bpid) {
-    DataNode dn = getDataNode();
-    BPOfferService bpos = dn.blockPoolManager.get(bpid);
+  UpgradeManagerDatanode getUpgradeManagerDatanode(String bpid) {
+    BPOfferService bpos = blockPoolManager.get(bpid);
     if(bpos==null) {
       return null;
     }