You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2012/04/02 09:28:49 UTC

svn commit: r1308260 [1/4] - in /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/main/sbin/ hadoop-hdfs/ hadoop-hdfs/src/main/bin/ hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/ hadoop-hdfs/src/main/java/ hadoop...

Author: todd
Date: Mon Apr  2 07:28:42 2012
New Revision: 1308260

URL: http://svn.apache.org/viewvc?rev=1308260&view=rev
Log:
Merge trunk into auto-failover branch

Added:
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
      - copied unchanged from r1308235, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
Removed:
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-14-dfs-dir.tgz
Modified:
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/   (props changed)
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/HdfsTestDriver.java
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-dfs-dir.txt
    hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1306587-1308235

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh Mon Apr  2 07:28:42 2012
@@ -55,8 +55,8 @@ if [ "${1}" = "stop" ]; then
 fi
 
 if [ "${HTTPFS_SILENT}" != "true" ]; then
-  ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@"
+  exec ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@"
 else
-  ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@" > /dev/null
+  exec ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@" > /dev/null
 fi
 

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Apr  2 07:28:42 2012
@@ -117,6 +117,12 @@ Release 2.0.0 - UNRELEASED 
 
     HDFS-2303. Unbundle jsvc. (Roman Shaposhnik and Mingjie Lai via eli)
 
+    HDFS-3137. Bump LAST_UPGRADABLE_LAYOUT_VERSION to -16. (eli)
+    
+    HDFS-3138. Move DatanodeInfo#ipcPort to DatanodeID. (eli)
+
+    HDFS-3164. Move DatanodeInfo#hostName to DatanodeID. (eli)
+
   NEW FEATURES
 
     HDFS-2978. The NameNode should expose name dir statuses via JMX. (atm)
@@ -171,6 +177,8 @@ Release 2.0.0 - UNRELEASED 
     DistributedFileSystem to @InterfaceAudience.LimitedPrivate.
     (harsh via szetszwo)
 
+    HDFS-3167. CLI-based driver for MiniDFSCluster. (Henry Robinson via atm)
+
   IMPROVEMENTS
 
     HDFS-2018. Move all journal stream management code into one place.
@@ -279,6 +287,15 @@ Release 2.0.0 - UNRELEASED 
 
     HDFS-3155. Clean up FSDataset implemenation related code.  (szetszwo)
 
+    HDFS-3158. LiveNodes member of NameNodeMXBean should list non-DFS used
+    space and capacity per DN. (atm)
+
+    HDFS-3172. dfs.upgrade.permission is dead code. (eli)
+
+    HDFS-3171. The DatanodeID "name" field is overloaded. (eli)
+
+    HDFS-3144. Refactor DatanodeID#getName by use. (eli)
+
   OPTIMIZATIONS
 
     HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
@@ -366,6 +383,15 @@ Release 2.0.0 - UNRELEASED 
 
     HDFS-3143. TestGetBlocks.testGetBlocks is failing. (Arpit Gupta via atm)
 
+    HDFS-3142. TestHDFSCLI.testAll is failing. (Brandon Li via atm)
+
+    HDFS-3070. HDFS balancer doesn't ensure that hdfs-site.xml is loaded. (atm)
+
+    HDFS-2995. start-dfs.sh should only start the 2NN for namenodes
+    with dfs.namenode.secondary.http-address configured. (eli)
+
+    HDFS-3174. Fix assert in TestPendingDataNodeMessages. (eli)
+
   BREAKDOWN OF HDFS-1623 SUBTASKS
 
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
@@ -713,6 +739,9 @@ Release 0.23.2 - UNRELEASED 
 
     HDFS-3104. Add tests for HADOOP-8175. (Daryn Sharp via szetszwo)
 
+    HDFS-3066. Cap space usage of default log4j rolling policy.
+    (Patrick Hunt via eli)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -764,6 +793,9 @@ Release 0.23.2 - UNRELEASED 
 
     HDFS-3101. Cannot read empty file using WebHDFS.  (szetszwo)
 
+    HDFS-3160. httpfs should exec catalina instead of forking it.
+    (Roman Shaposhnik via eli)
+
 Release 0.23.1 - 2012-02-17 
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs Mon Apr  2 07:28:42 2012
@@ -120,7 +120,7 @@ export CLASSPATH=$CLASSPATH
 
 #turn security logger on the namenode
 if [ $COMMAND = "namenode" ]; then
-  HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,DRFAS}"
+  HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS}"
 else
   HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
 fi

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh Mon Apr  2 07:28:42 2012
@@ -76,11 +76,13 @@ fi
 
 SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-)
 
-echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
+if [ -n "$SECONDARY_NAMENODES" ]; then
+  echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
 
-"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-    --config "$HADOOP_CONF_DIR" \
-    --hostnames "$SECONDARY_NAMENODES" \
-    --script "$bin/hdfs" start secondarynamenode
+  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+      --config "$HADOOP_CONF_DIR" \
+      --hostnames "$SECONDARY_NAMENODES" \
+      --script "$bin/hdfs" start secondarynamenode
+fi
 
 # eof

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh Mon Apr  2 07:28:42 2012
@@ -52,11 +52,13 @@ fi
 
 SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-)
 
-echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
+if [ -n "$SECONDARY_NAMENODES" ]; then
+  echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
 
-"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-    --config "$HADOOP_CONF_DIR" \
-    --hostnames "$SECONDARY_NAMENODES" \
-    --script "$bin/hdfs" stop secondarynamenode
+  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+      --config "$HADOOP_CONF_DIR" \
+      --hostnames "$SECONDARY_NAMENODES" \
+      --script "$bin/hdfs" stop secondarynamenode
+fi
 
 # eof

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml Mon Apr  2 07:28:42 2012
@@ -239,11 +239,6 @@ to the web server.</p>
 	<br />The name of the group of super-users.
 	</li>
 
-	<li><code>dfs.namenode.upgrade.permission = 0777</code>
-	<br />The choice of initial mode during upgrade. The <em>x</em> permission is <em>never</em> set for files. 
-		For configuration files, the decimal value <em>511<sub>10</sub></em> may be used.
-    </li>
-    
 	<li><code>fs.permissions.umask-mode = 022</code>
     <br />The <code>umask</code> used when creating files and directories. For configuration files, the decimal 
 		value <em>18<sub>10</sub></em> may be used.

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1306587-1308235

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java Mon Apr  2 07:28:42 2012
@@ -240,7 +240,7 @@ class BlockReaderLocal implements BlockR
   private static BlockLocalPathInfo getBlockPathInfo(ExtendedBlock blk,
       DatanodeInfo node, Configuration conf, int timeout,
       Token<BlockTokenIdentifier> token) throws IOException {
-    LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.ipcPort);
+    LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.getIpcPort());
     BlockLocalPathInfo pathinfo = null;
     ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(node,
         conf, timeout);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Mon Apr  2 07:28:42 2012
@@ -1340,7 +1340,8 @@ public class DFSClient implements java.i
           //connect to a datanode
           sock = socketFactory.createSocket();
           NetUtils.connect(sock,
-              NetUtils.createSocketAddr(datanodes[j].getName()), timeout);
+              NetUtils.createSocketAddr(datanodes[j].getXferAddr()),
+              timeout);
           sock.setSoTimeout(timeout);
 
           out = new DataOutputStream(
@@ -1349,7 +1350,7 @@ public class DFSClient implements java.i
           in = new DataInputStream(NetUtils.getInputStream(sock));
 
           if (LOG.isDebugEnabled()) {
-            LOG.debug("write to " + datanodes[j].getName() + ": "
+            LOG.debug("write to " + datanodes[j] + ": "
                 + Op.BLOCK_CHECKSUM + ", block=" + block);
           }
           // get block MD5
@@ -1364,7 +1365,7 @@ public class DFSClient implements java.i
               if (LOG.isDebugEnabled()) {
                 LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
                     + "for file " + src + " for block " + block
-                    + " from datanode " + datanodes[j].getName()
+                    + " from datanode " + datanodes[j]
                     + ". Will retry the block once.");
               }
               lastRetriedIndex = i;
@@ -1374,7 +1375,7 @@ public class DFSClient implements java.i
               break;
             } else {
               throw new IOException("Bad response " + reply + " for block "
-                  + block + " from datanode " + datanodes[j].getName());
+                  + block + " from datanode " + datanodes[j]);
             }
           }
           
@@ -1409,12 +1410,10 @@ public class DFSClient implements java.i
               LOG.debug("set bytesPerCRC=" + bytesPerCRC
                   + ", crcPerBlock=" + crcPerBlock);
             }
-            LOG.debug("got reply from " + datanodes[j].getName()
-                + ": md5=" + md5);
+            LOG.debug("got reply from " + datanodes[j] + ": md5=" + md5);
           }
         } catch (IOException ie) {
-          LOG.warn("src=" + src + ", datanodes[" + j + "].getName()="
-              + datanodes[j].getName(), ie);
+          LOG.warn("src=" + src + ", datanodes["+j+"]=" + datanodes[j], ie);
         } finally {
           IOUtils.closeStream(in);
           IOUtils.closeStream(out);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Mon Apr  2 07:28:42 2012
@@ -107,8 +107,6 @@ public class DFSConfigKeys extends Commo
   public static final long    DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT = 3600;
   public static final String  DFS_NAMENODE_CHECKPOINT_TXNS_KEY = "dfs.namenode.checkpoint.txns";
   public static final long    DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT = 40000;
-  public static final String  DFS_NAMENODE_UPGRADE_PERMISSION_KEY = "dfs.namenode.upgrade.permission";
-  public static final int     DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT = 00777;
   public static final String  DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY = "dfs.namenode.heartbeat.recheck-interval";
   public static final int     DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT = 5*60*1000;
   public static final String  DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource";

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java Mon Apr  2 07:28:42 2012
@@ -543,7 +543,7 @@ public class DFSInputStream extends FSIn
         return reader.doRead(blockReader, off, len);
       } catch ( ChecksumException ce ) {
         DFSClient.LOG.warn("Found Checksum error for "
-            + getCurrentBlock() + " from " + currentNode.getName()
+            + getCurrentBlock() + " from " + currentNode
             + " at " + ce.getPos());        
         ioe = ce;
         retryCurrentNode = false;
@@ -671,7 +671,7 @@ public class DFSInputStream extends FSIn
       try {
         DatanodeInfo chosenNode = bestNode(nodes, deadNodes);
         InetSocketAddress targetAddr = 
-                          NetUtils.createSocketAddr(chosenNode.getName());
+          NetUtils.createSocketAddr(chosenNode.getXferAddr());
         return new DNAddrPair(chosenNode, targetAddr);
       } catch (IOException ie) {
         String blockInfo = block.getBlock() + " file=" + src;
@@ -746,7 +746,7 @@ public class DFSInputStream extends FSIn
       } catch (ChecksumException e) {
         DFSClient.LOG.warn("fetchBlockByteRange(). Got a checksum exception for " +
                  src + " at " + block.getBlock() + ":" + 
-                 e.getPos() + " from " + chosenNode.getName());
+                 e.getPos() + " from " + chosenNode);
         // we want to remember what we have tried
         addIntoCorruptedBlockMap(block.getBlock(), chosenNode, corruptedBlockMap);
       } catch (AccessControlException ex) {

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java Mon Apr  2 07:28:42 2012
@@ -667,7 +667,7 @@ class DFSOutputStream extends FSOutputSu
                 throw new IOException("Bad response " + reply +
                     " for block " + block +
                     " from datanode " + 
-                    targets[i].getName());
+                    targets[i]);
               }
             }
             
@@ -898,7 +898,7 @@ class DFSOutputStream extends FSOutputSu
         if (errorIndex >= 0) {
           StringBuilder pipelineMsg = new StringBuilder();
           for (int j = 0; j < nodes.length; j++) {
-            pipelineMsg.append(nodes[j].getName());
+            pipelineMsg.append(nodes[j]);
             if (j < nodes.length - 1) {
               pipelineMsg.append(", ");
             }
@@ -911,7 +911,7 @@ class DFSOutputStream extends FSOutputSu
           }
           DFSClient.LOG.warn("Error Recovery for block " + block +
               " in pipeline " + pipelineMsg + 
-              ": bad datanode " + nodes[errorIndex].getName());
+              ": bad datanode " + nodes[errorIndex]);
           failed.add(nodes[errorIndex]);
 
           DatanodeInfo[] newnodes = new DatanodeInfo[nodes.length-1];
@@ -1005,7 +1005,7 @@ class DFSOutputStream extends FSOutputSu
       String firstBadLink = "";
       if (DFSClient.LOG.isDebugEnabled()) {
         for (int i = 0; i < nodes.length; i++) {
-          DFSClient.LOG.debug("pipeline = " + nodes[i].getName());
+          DFSClient.LOG.debug("pipeline = " + nodes[i]);
         }
       }
 
@@ -1061,7 +1061,7 @@ class DFSOutputStream extends FSOutputSu
         // find the datanode that matches
         if (firstBadLink.length() != 0) {
           for (int i = 0; i < nodes.length; i++) {
-            if (nodes[i].getName().equals(firstBadLink)) {
+            if (nodes[i].getXferAddr().equals(firstBadLink)) {
               errorIndex = i;
               break;
             }
@@ -1165,9 +1165,10 @@ class DFSOutputStream extends FSOutputSu
   static Socket createSocketForPipeline(final DatanodeInfo first,
       final int length, final DFSClient client) throws IOException {
     if(DFSClient.LOG.isDebugEnabled()) {
-      DFSClient.LOG.debug("Connecting to datanode " + first.getName());
+      DFSClient.LOG.debug("Connecting to datanode " + first);
     }
-    final InetSocketAddress isa = NetUtils.createSocketAddr(first.getName());
+    final InetSocketAddress isa =
+      NetUtils.createSocketAddr(first.getXferAddr());
     final Socket sock = client.socketFactory.createSocket();
     final int timeout = client.getDatanodeReadTimeout(length);
     NetUtils.connect(sock, isa, timeout);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Mon Apr  2 07:28:42 2012
@@ -295,16 +295,16 @@ public class DFSUtil {
       assert idx < nrBlocks : "Incorrect index";
       DatanodeInfo[] locations = blk.getLocations();
       String[] hosts = new String[locations.length];
-      String[] names = new String[locations.length];
+      String[] xferAddrs = new String[locations.length];
       String[] racks = new String[locations.length];
       for (int hCnt = 0; hCnt < locations.length; hCnt++) {
         hosts[hCnt] = locations[hCnt].getHostName();
-        names[hCnt] = locations[hCnt].getName();
-        NodeBase node = new NodeBase(names[hCnt], 
+        xferAddrs[hCnt] = locations[hCnt].getXferAddr();
+        NodeBase node = new NodeBase(xferAddrs[hCnt], 
                                      locations[hCnt].getNetworkLocation());
         racks[hCnt] = node.toString();
       }
-      blkLocations[idx] = new BlockLocation(names, hosts, racks,
+      blkLocations[idx] = new BlockLocation(xferAddrs, hosts, racks,
                                             blk.getStartOffset(),
                                             blk.getBlockSize(),
                                             blk.isCorrupt());

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Mon Apr  2 07:28:42 2012
@@ -688,7 +688,7 @@ public class DistributedFileSystem exten
     lblocks[0] = new LocatedBlock(dataBlock, dataNode);
     LOG.info("Found checksum error in data stream at block="
         + dataBlock + " on datanode="
-        + dataNode[0].getName());
+        + dataNode[0]);
 
     // Find block in checksum stream
     DFSClient.DFSDataInputStream dfsSums = (DFSClient.DFSDataInputStream) sums;
@@ -700,8 +700,7 @@ public class DistributedFileSystem exten
     DatanodeInfo[] sumsNode = {dfsSums.getCurrentDatanode()}; 
     lblocks[1] = new LocatedBlock(sumsBlock, sumsNode);
     LOG.info("Found checksum error in checksum stream at block="
-        + sumsBlock + " on datanode="
-        + sumsNode[0].getName());
+        + sumsBlock + " on datanode=" + sumsNode[0]);
 
     // Ask client to delete blocks.
     dfs.reportChecksumFailure(f.toString(), lblocks);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java Mon Apr  2 07:28:42 2012
@@ -86,7 +86,6 @@ public class HdfsConfiguration extends C
     deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
     deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
     deprecate("fs.checkpoint.period", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY);
-    deprecate("dfs.upgrade.permission", DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY);
     deprecate("heartbeat.recheck.interval", DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY);
     deprecate("dfs.https.client.keystore.resource", DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY);
     deprecate("dfs.https.need.client.auth", DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java Mon Apr  2 07:28:42 2012
@@ -24,7 +24,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.DeprecatedUTF8;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableComparable;
 
 /**
@@ -32,22 +32,32 @@ import org.apache.hadoop.io.WritableComp
  * Datanodes are identified by how they can be contacted (hostname
  * and ports) and their storage ID, a unique number that associates
  * the Datanodes blocks with a particular Datanode.
+ *
+ * {@link DatanodeInfo#getName()} should be used to get the network
+ * location (for topology) of a datanode, instead of using
+ * {@link DatanodeID#getXferAddr()} here. Helpers are defined below
+ * for each context in which a DatanodeID is used.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class DatanodeID implements WritableComparable<DatanodeID> {
   public static final DatanodeID[] EMPTY_ARRAY = {}; 
 
-  public String name;       // hostname:port (data transfer port)
-  public String storageID;  // unique per cluster storageID
-  protected int infoPort;   // info server port
-  public int ipcPort;       // ipc server port
+  protected String ipAddr;     // IP address
+  protected String hostName;   // hostname
+  protected String storageID;  // unique per cluster storageID
+  protected int xferPort;      // data streaming port
+  protected int infoPort;      // info server port
+  protected int ipcPort;       // IPC server port
 
   /** Equivalent to DatanodeID(""). */
   public DatanodeID() {this("");}
 
-  /** Equivalent to DatanodeID(nodeName, "", -1, -1). */
-  public DatanodeID(String nodeName) {this(nodeName, "", -1, -1);}
+  /** Equivalent to DatanodeID(ipAddr, "", -1, -1, -1). */
+  public DatanodeID(String ipAddr) {this(ipAddr, "", "", -1, -1, -1);}
+
+  /** Equivalent to DatanodeID(ipAddr, "", xferPort, -1, -1). */
+  public DatanodeID(String ipAddr, int xferPort) {this(ipAddr, "", "", xferPort, -1, -1);}
 
   /**
    * DatanodeID copy constructor
@@ -55,29 +65,43 @@ public class DatanodeID implements Writa
    * @param from
    */
   public DatanodeID(DatanodeID from) {
-    this(from.getName(),
+    this(from.getIpAddr(),
+        from.getHostName(),
         from.getStorageID(),
+        from.getXferPort(),
         from.getInfoPort(),
         from.getIpcPort());
   }
   
   /**
    * Create DatanodeID
-   * @param nodeName (hostname:portNumber) 
+   * @param ipAddr IP
+   * @param hostName hostname
    * @param storageID data storage ID
+   * @param xferPort data transfer port
    * @param infoPort info server port 
    * @param ipcPort ipc server port
    */
-  public DatanodeID(String nodeName, String storageID,
-      int infoPort, int ipcPort) {
-    this.name = nodeName;
+  public DatanodeID(String ipAddr, String hostName, String storageID,
+      int xferPort, int infoPort, int ipcPort) {
+    this.ipAddr = ipAddr;
+    this.hostName = hostName;
     this.storageID = storageID;
+    this.xferPort = xferPort;
     this.infoPort = infoPort;
     this.ipcPort = ipcPort;
   }
   
-  public void setName(String name) {
-    this.name = name;
+  public void setIpAddr(String ipAddr) {
+    this.ipAddr = ipAddr;
+  }
+
+  public void setHostName(String hostName) {
+    this.hostName = hostName;
+  }
+
+  public void setXferPort(int xferPort) {
+    this.xferPort = xferPort;
   }
 
   public void setInfoPort(int infoPort) {
@@ -87,60 +111,79 @@ public class DatanodeID implements Writa
   public void setIpcPort(int ipcPort) {
     this.ipcPort = ipcPort;
   }
-  
+
+  public void setStorageID(String storageID) {
+    this.storageID = storageID;
+  }
+
   /**
-   * @return hostname:portNumber.
+   * @return ipAddr;
    */
-  public String getName() {
-    return name;
+  public String getIpAddr() {
+    return ipAddr;
   }
-  
+
   /**
-   * @return data storage ID.
+   * @return hostname
    */
-  public String getStorageID() {
-    return this.storageID;
+  public String getHostName() {
+    return hostName;
   }
 
   /**
-   * @return infoPort (the port at which the HTTP server bound to)
+   * @return IP:xferPort string
    */
-  public int getInfoPort() {
-    return infoPort;
+  public String getXferAddr() {
+    return ipAddr + ":" + xferPort;
   }
 
   /**
-   * @return ipcPort (the port at which the IPC server bound to)
+   * @return IP:ipcPort string
    */
-  public int getIpcPort() {
-    return ipcPort;
+  public String getIpcAddr() {
+    return ipAddr + ":" + ipcPort;
   }
 
   /**
-   * sets the data storage ID.
+   * @return IP:infoPort string
    */
-  public void setStorageID(String storageID) {
-    this.storageID = storageID;
+  public String getInfoAddr() {
+    return ipAddr + ":" + infoPort;
   }
 
   /**
-   * @return hostname and no :portNumber.
+   * @return hostname:xferPort
    */
-  public String getHost() {
-    int colon = name.indexOf(":");
-    if (colon < 0) {
-      return name;
-    } else {
-      return name.substring(0, colon);
-    }
+  public String getXferAddrWithHostname() {
+    return hostName + ":" + xferPort;
   }
-  
-  public int getPort() {
-    int colon = name.indexOf(":");
-    if (colon < 0) {
-      return 50010; // default port.
-    }
-    return Integer.parseInt(name.substring(colon+1));
+
+  /**
+   * @return data storage ID.
+   */
+  public String getStorageID() {
+    return storageID;
+  }
+
+  /**
+   * @return xferPort (the port for data streaming)
+   */
+  public int getXferPort() {
+    return xferPort;
+  }
+
+  /**
+   * @return infoPort (the port at which the HTTP server bound to)
+   */
+  public int getInfoPort() {
+    return infoPort;
+  }
+
+  /**
+   * @return ipcPort (the port at which the IPC server bound to)
+   */
+  public int getIpcPort() {
+    return ipcPort;
   }
 
   public boolean equals(Object to) {
@@ -150,16 +193,16 @@ public class DatanodeID implements Writa
     if (!(to instanceof DatanodeID)) {
       return false;
     }
-    return (name.equals(((DatanodeID)to).getName()) &&
+    return (getXferAddr().equals(((DatanodeID)to).getXferAddr()) &&
             storageID.equals(((DatanodeID)to).getStorageID()));
   }
   
   public int hashCode() {
-    return name.hashCode()^ storageID.hashCode();
+    return getXferAddr().hashCode()^ storageID.hashCode();
   }
   
   public String toString() {
-    return name;
+    return getXferAddr();
   }
   
   /**
@@ -167,39 +210,44 @@ public class DatanodeID implements Writa
    * Note that this does not update storageID.
    */
   public void updateRegInfo(DatanodeID nodeReg) {
-    name = nodeReg.getName();
+    ipAddr = nodeReg.getIpAddr();
+    hostName = nodeReg.getHostName();
+    xferPort = nodeReg.getXferPort();
     infoPort = nodeReg.getInfoPort();
     ipcPort = nodeReg.getIpcPort();
-    // update any more fields added in future.
   }
     
-  /** Comparable.
-   * Basis of compare is the String name (host:portNumber) only.
+  /**
+   * Compare based on data transfer address.
+   *
    * @param that
-   * @return as specified by Comparable.
+   * @return as specified by Comparable
    */
   public int compareTo(DatanodeID that) {
-    return name.compareTo(that.getName());
+    return getXferAddr().compareTo(that.getXferAddr());
   }
 
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
   @Override
   public void write(DataOutput out) throws IOException {
-    DeprecatedUTF8.writeString(out, name);
-    DeprecatedUTF8.writeString(out, storageID);
+    Text.writeString(out, ipAddr);
+    Text.writeString(out, hostName);
+    Text.writeString(out, storageID);
+    out.writeShort(xferPort);
     out.writeShort(infoPort);
+    out.writeShort(ipcPort);
   }
 
   @Override
   public void readFields(DataInput in) throws IOException {
-    name = DeprecatedUTF8.readString(in);
-    storageID = DeprecatedUTF8.readString(in);
-    // the infoPort read could be negative, if the port is a large number (more
+    ipAddr = Text.readString(in);
+    hostName = Text.readString(in);
+    storageID = Text.readString(in);
+    // The port read could be negative, if the port is a large number (more
     // than 15 bits in storage size (but less than 16 bits).
     // So chop off the first two bytes (and hence the signed bits) before 
     // setting the field.
-    this.infoPort = in.readShort() & 0x0000ffff;
+    xferPort = in.readShort() & 0x0000ffff;
+    infoPort = in.readShort() & 0x0000ffff;
+    ipcPort = in.readShort() & 0x0000ffff;
   }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java Mon Apr  2 07:28:42 2012
@@ -51,9 +51,6 @@ public class DatanodeInfo extends Datano
   protected long lastUpdate;
   protected int xceiverCount;
   protected String location = NetworkTopology.DEFAULT_RACK;
-
-  // The FQDN of the IP associated with the Datanode's hostname
-  protected String hostName = null;
   
   // Datanode administrative states
   public enum AdminStates {
@@ -110,30 +107,27 @@ public class DatanodeInfo extends Datano
     this.adminState = null;    
   }
   
-  public DatanodeInfo(DatanodeID nodeID, String location, String hostName) {
+  public DatanodeInfo(DatanodeID nodeID, String location) {
     this(nodeID);
     this.location = location;
-    this.hostName = hostName;
   }
   
-  public DatanodeInfo(DatanodeID nodeID, String location, String hostName,
+  public DatanodeInfo(DatanodeID nodeID, String location,
       final long capacity, final long dfsUsed, final long remaining,
       final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
       final AdminStates adminState) {
-    this(nodeID.getName(), nodeID.getStorageID(), nodeID.getInfoPort(), nodeID
-        .getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed, lastUpdate,
-        xceiverCount, location, hostName, adminState);
+    this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getStorageID(), nodeID.getXferPort(),
+        nodeID.getInfoPort(), nodeID.getIpcPort(), capacity, dfsUsed, remaining,
+        blockPoolUsed, lastUpdate, xceiverCount, location, adminState);
   }
 
   /** Constructor */
-  public DatanodeInfo(final String name, final String storageID,
-      final int infoPort, final int ipcPort,
+  public DatanodeInfo(final String name, final String hostName,
+      final String storageID, final int xferPort, final int infoPort, final int ipcPort,
       final long capacity, final long dfsUsed, final long remaining,
       final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
-      final String networkLocation, final String hostName,
-      final AdminStates adminState) {
-    super(name, storageID, infoPort, ipcPort);
-
+      final String networkLocation, final AdminStates adminState) {
+    super(name, hostName, storageID, xferPort, infoPort, ipcPort);
     this.capacity = capacity;
     this.dfsUsed = dfsUsed;
     this.remaining = remaining;
@@ -141,10 +135,14 @@ public class DatanodeInfo extends Datano
     this.lastUpdate = lastUpdate;
     this.xceiverCount = xceiverCount;
     this.location = networkLocation;
-    this.hostName = hostName;
     this.adminState = adminState;
   }
   
+  /** Network location name */
+  public String getName() {
+    return getXferAddr();
+  }
+  
   /** The raw capacity. */
   public long getCapacity() { return capacity; }
   
@@ -221,15 +219,7 @@ public class DatanodeInfo extends Datano
   public synchronized void setNetworkLocation(String location) {
     this.location = NodeBase.normalize(location);
   }
-  
-  public String getHostName() {
-    return (hostName == null || hostName.length()==0) ? getHost() : hostName;
-  }
-  
-  public void setHostName(String host) {
-    hostName = host;
-  }
-  
+    
   /** A formatted string for reporting the status of the DataNode. */
   public String getDatanodeReport() {
     StringBuilder buffer = new StringBuilder();
@@ -239,9 +229,9 @@ public class DatanodeInfo extends Datano
     long nonDFSUsed = getNonDfsUsed();
     float usedPercent = getDfsUsedPercent();
     float remainingPercent = getRemainingPercent();
-    String lookupName = NetUtils.getHostNameOfIP(name);
+    String lookupName = NetUtils.getHostNameOfIP(getName());
 
-    buffer.append("Name: "+ name);
+    buffer.append("Name: "+ getName());
     if (lookupName != null) {
       buffer.append(" (" + lookupName + ")");
     }
@@ -275,7 +265,7 @@ public class DatanodeInfo extends Datano
     long c = getCapacity();
     long r = getRemaining();
     long u = getDfsUsed();
-    buffer.append(name);
+    buffer.append(ipAddr);
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
       buffer.append(" "+location);
     }
@@ -380,10 +370,6 @@ public class DatanodeInfo extends Datano
   @Override
   public void write(DataOutput out) throws IOException {
     super.write(out);
-
-    //TODO: move it to DatanodeID once DatanodeID is not stored in FSImage
-    out.writeShort(ipcPort);
-
     out.writeLong(capacity);
     out.writeLong(dfsUsed);
     out.writeLong(remaining);
@@ -391,17 +377,12 @@ public class DatanodeInfo extends Datano
     out.writeLong(lastUpdate);
     out.writeInt(xceiverCount);
     Text.writeString(out, location);
-    Text.writeString(out, hostName == null? "": hostName);
     WritableUtils.writeEnum(out, getAdminState());
   }
 
   @Override
   public void readFields(DataInput in) throws IOException {
     super.readFields(in);
-
-    //TODO: move it to DatanodeID once DatanodeID is not stored in FSImage
-    this.ipcPort = in.readShort() & 0x0000ffff;
-
     this.capacity = in.readLong();
     this.dfsUsed = in.readLong();
     this.remaining = in.readLong();
@@ -409,7 +390,6 @@ public class DatanodeInfo extends Datano
     this.lastUpdate = in.readLong();
     this.xceiverCount = in.readInt();
     this.location = Text.readString(in);
-    this.hostName = Text.readString(in);
     setAdminState(WritableUtils.readEnum(in, AdminStates.class));
   }
 

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java Mon Apr  2 07:28:42 2012
@@ -84,8 +84,10 @@ public abstract class HdfsProtoUtil {
   private static HdfsProtos.DatanodeIDProto toProto(
       DatanodeID dni) {
     return HdfsProtos.DatanodeIDProto.newBuilder()
-      .setName(dni.getName())
+      .setIpAddr(dni.getIpAddr())
+      .setHostName(dni.getHostName())
       .setStorageID(dni.getStorageID())
+      .setXferPort(dni.getXferPort())
       .setInfoPort(dni.getInfoPort())
       .setIpcPort(dni.getIpcPort())
       .build();
@@ -93,8 +95,10 @@ public abstract class HdfsProtoUtil {
   
   private static DatanodeID fromProto(HdfsProtos.DatanodeIDProto idProto) {
     return new DatanodeID(
-        idProto.getName(),
+        idProto.getIpAddr(),
+        idProto.getHostName(),
         idProto.getStorageID(),
+        idProto.getXferPort(),
         idProto.getInfoPort(),
         idProto.getIpcPort());
   }
@@ -111,7 +115,6 @@ public abstract class HdfsProtoUtil {
       .setLastUpdate(dni.getLastUpdate())
       .setXceiverCount(dni.getXceiverCount())
       .setLocation(dni.getNetworkLocation())
-      .setHostName(dni.getHostName())
       .setAdminState(HdfsProtos.DatanodeInfoProto.AdminState.valueOf(
           dni.getAdminState().name()))
       .build();
@@ -119,7 +122,7 @@ public abstract class HdfsProtoUtil {
 
   public static DatanodeInfo fromProto(HdfsProtos.DatanodeInfoProto dniProto) {
     DatanodeInfo dniObj = new DatanodeInfo(fromProto(dniProto.getId()),
-        dniProto.getLocation(), dniProto.getHostName());
+        dniProto.getLocation());
 
     dniObj.setCapacity(dniProto.getCapacity());
     dniObj.setDfsUsed(dniProto.getDfsUsed());

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java Mon Apr  2 07:28:42 2012
@@ -45,9 +45,8 @@ public class UnregisteredNodeException e
    * @param storedNode data-node stored in the system with this storage id
    */
   public UnregisteredNodeException(DatanodeID nodeID, DatanodeInfo storedNode) {
-    super("Data node " + nodeID.getName() 
-          + " is attempting to report storage ID "
+    super("Data node " + nodeID + " is attempting to report storage ID " 
           + nodeID.getStorageID() + ". Node " 
-          + storedNode.getName() + " is expected to serve this storage.");
+          + storedNode + " is expected to serve this storage.");
   }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java Mon Apr  2 07:28:42 2012
@@ -97,8 +97,7 @@ public class ClientDatanodeProtocolTrans
    */
   public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
       Configuration conf, int socketTimeout) throws IOException {
-    InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getHost()
-        + ":" + datanodeid.getIpcPort());
+    InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getIpcAddr());
     rpcProxy = createClientDatanodeProtocolProxy(addr,
         UserGroupInformation.getCurrentUser(), conf,
         NetUtils.getDefaultSocketFactory(conf), socketTimeout);
@@ -107,8 +106,7 @@ public class ClientDatanodeProtocolTrans
   static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
       DatanodeID datanodeid, Configuration conf, int socketTimeout,
       LocatedBlock locatedBlock) throws IOException {
-    InetSocketAddress addr = NetUtils.createSocketAddr(
-      datanodeid.getHost() + ":" + datanodeid.getIpcPort());
+    InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getIpcAddr());
     if (LOG.isDebugEnabled()) {
       LOG.debug("ClientDatanodeProtocol addr=" + addr);
     }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Mon Apr  2 07:28:42 2012
@@ -204,14 +204,18 @@ public class PBHelper {
 
   // DatanodeId
   public static DatanodeID convert(DatanodeIDProto dn) {
-    return new DatanodeID(dn.getName(), dn.getStorageID(), dn.getInfoPort(),
-        dn.getIpcPort());
+    return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getStorageID(),
+        dn.getXferPort(), dn.getInfoPort(), dn.getIpcPort());
   }
 
   public static DatanodeIDProto convert(DatanodeID dn) {
-    return DatanodeIDProto.newBuilder().setName(dn.getName())
-        .setInfoPort(dn.getInfoPort()).setIpcPort(dn.getIpcPort())
-        .setStorageID(dn.getStorageID()).build();
+    return DatanodeIDProto.newBuilder()
+        .setIpAddr(dn.getIpAddr())
+        .setHostName(dn.getHostName())
+        .setStorageID(dn.getStorageID())
+        .setXferPort(dn.getXferPort())
+        .setInfoPort(dn.getInfoPort())
+        .setIpcPort(dn.getIpcPort()).build();
   }
 
   // Arrays of DatanodeId
@@ -442,7 +446,6 @@ public class PBHelper {
     return new DatanodeInfo(
         PBHelper.convert(di.getId()),
         di.hasLocation() ? di.getLocation() : null , 
-        di.hasHostName() ? di.getHostName() : null,
         di.getCapacity(),  di.getDfsUsed(),  di.getRemaining(),
         di.getBlockPoolUsed()  ,  di.getLastUpdate() , di.getXceiverCount() ,
         PBHelper.convert(di.getAdminState())); 
@@ -451,9 +454,6 @@ public class PBHelper {
   static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) {
     if (di == null) return null;
     DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder();
-    if (di.getHostName() != null) {
-      builder.setHostName(di.getHostName());
-    }
     if (di.getNetworkLocation() != null) {
       builder.setLocation(di.getNetworkLocation());
     }
@@ -503,7 +503,6 @@ public class PBHelper {
     builder.setAdminState(PBHelper.convert(info.getAdminState()));
     builder.setCapacity(info.getCapacity())
         .setDfsUsed(info.getDfsUsed())
-        .setHostName(info.getHostName())
         .setId(PBHelper.convert((DatanodeID)info))
         .setLastUpdate(info.getLastUpdate())
         .setLocation(info.getNetworkLocation())
@@ -610,8 +609,8 @@ public class PBHelper {
     DatanodeRegistrationProto.Builder builder = DatanodeRegistrationProto
         .newBuilder();
     return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration))
-        .setStorageInfo(PBHelper.convert(registration.storageInfo))
-        .setKeys(PBHelper.convert(registration.exportedKeys)).build();
+        .setStorageInfo(PBHelper.convert(registration.getStorageInfo()))
+        .setKeys(PBHelper.convert(registration.getExportedKeys())).build();
   }
 
   public static DatanodeRegistration convert(DatanodeRegistrationProto proto) {

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java Mon Apr  2 07:28:42 2012
@@ -51,6 +51,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -304,8 +305,9 @@ public class Balancer {
       DataOutputStream out = null;
       DataInputStream in = null;
       try {
-        sock.connect(NetUtils.createSocketAddr(
-            target.datanode.getName()), HdfsServerConstants.READ_TIMEOUT);
+        sock.connect(
+            NetUtils.createSocketAddr(target.datanode.getXferAddr()),
+            HdfsServerConstants.READ_TIMEOUT);
         sock.setKeepAlive(true);
         out = new DataOutputStream( new BufferedOutputStream(
             sock.getOutputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE));
@@ -586,7 +588,7 @@ public class Balancer {
     /** Add a node task */
     private void addNodeTask(NodeTask task) {
       assert (task.datanode != this) :
-        "Source and target are the same " + datanode.getName();
+        "Source and target are the same " + datanode;
       incScheduledSize(task.getSize());
       nodeTasks.add(task);
     }
@@ -1006,7 +1008,7 @@ public class Balancer {
         targetCandidates.remove();
       }
       LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from "
-          +source.datanode.getName() + " to " + target.datanode.getName());
+          +source.datanode + " to " + target.datanode);
       return true;
     }
     return false;
@@ -1054,7 +1056,7 @@ public class Balancer {
         sourceCandidates.remove();
       }
       LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from "
-          +source.datanode.getName() + " to " + target.datanode.getName());
+          +source.datanode + " to " + target.datanode);
       return true;
     }
     return false;
@@ -1550,7 +1552,7 @@ public class Balancer {
    */
   public static void main(String[] args) {
     try {
-      System.exit(ToolRunner.run(null, new Cli(), args));
+      System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args));
     } catch (Throwable e) {
       LOG.error("Exiting balancer due an exception", e);
       System.exit(-1);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Mon Apr  2 07:28:42 2012
@@ -808,9 +808,9 @@ public class BlockManager {
     final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
     if (node == null) {
       NameNode.stateChangeLog.warn("BLOCK* getBlocks: "
-          + "Asking for blocks from an unrecorded node " + datanode.getName());
+          + "Asking for blocks from an unrecorded node " + datanode);
       throw new HadoopIllegalArgumentException(
-          "Datanode " + datanode.getName() + " not found.");
+          "Datanode " + datanode + " not found.");
     }
 
     int numBlocks = node.numBlocks();
@@ -882,7 +882,7 @@ public class BlockManager {
         .hasNext();) {
       DatanodeDescriptor node = it.next();
       invalidateBlocks.add(b, node, false);
-      datanodes.append(node.getName()).append(" ");
+      datanodes.append(node).append(" ");
     }
     if (datanodes.length() != 0) {
       NameNode.stateChangeLog.info("BLOCK* addToInvalidates: "
@@ -921,7 +921,7 @@ public class BlockManager {
     if (node == null) {
       throw new IOException("Cannot mark block " + 
                             storedBlock.getBlockName() +
-                            " as corrupt because datanode " + dn.getName() +
+                            " as corrupt because datanode " + dn +
                             " does not exist. ");
     }
 
@@ -955,11 +955,11 @@ public class BlockManager {
   private void invalidateBlock(Block blk, DatanodeInfo dn)
       throws IOException {
     NameNode.stateChangeLog.info("BLOCK* invalidateBlock: "
-                                 + blk + " on " + dn.getName());
+                                 + blk + " on " + dn);
     DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
     if (node == null) {
       throw new IOException("Cannot invalidate block " + blk
-          + " because datanode " + dn.getName() + " does not exist.");
+          + " because datanode " + dn + " does not exist.");
     }
 
     // Check how many copies we have of the block
@@ -977,11 +977,11 @@ public class BlockManager {
       removeStoredBlock(blk, node);
       if(NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug("BLOCK* invalidateBlocks: "
-            + blk + " on " + dn.getName() + " listed for deletion.");
+            + blk + " on " + dn + " listed for deletion.");
       }
     } else {
       NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: " + blk + " on "
-          + dn.getName() + " is the only copy and was not deleted.");
+          + dn + " is the only copy and was not deleted.");
     }
   }
 
@@ -1224,11 +1224,11 @@ public class BlockManager {
           StringBuilder targetList = new StringBuilder("datanode(s)");
           for (int k = 0; k < targets.length; k++) {
             targetList.append(' ');
-            targetList.append(targets[k].getName());
+            targetList.append(targets[k]);
           }
           NameNode.stateChangeLog.info(
                   "BLOCK* ask "
-                  + rw.srcNode.getName() + " to replicate "
+                  + rw.srcNode + " to replicate "
                   + rw.block + " to " + targetList);
         }
       }
@@ -1410,15 +1410,15 @@ public class BlockManager {
     try {
       final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID);
       if (node == null || !node.isAlive) {
-        throw new IOException("ProcessReport from dead or unregistered node: "
-                              + nodeID.getName());
+        throw new IOException(
+            "ProcessReport from dead or unregistered node: " + nodeID);
       }
 
       // To minimize startup time, we discard any second (or later) block reports
       // that we receive while still in startup phase.
       if (namesystem.isInStartupSafeMode() && !node.isFirstBlockReport()) {
         NameNode.stateChangeLog.info("BLOCK* processReport: "
-            + "discarded non-initial block report from " + nodeID.getName()
+            + "discarded non-initial block report from " + nodeID
             + " because namenode still in startup phase");
         return;
       }
@@ -1451,7 +1451,7 @@ public class BlockManager {
     // Log the block report processing stats from Namenode perspective
     NameNode.getNameNodeMetrics().addBlockReport((int) (endTime - startTime));
     NameNode.stateChangeLog.info("BLOCK* processReport: from "
-        + nodeID.getName() + ", blocks: " + newReport.getNumberOfBlocks()
+        + nodeID + ", blocks: " + newReport.getNumberOfBlocks()
         + ", processing time: " + (endTime - startTime) + " msecs");
   }
 
@@ -1511,7 +1511,7 @@ public class BlockManager {
     }
     for (Block b : toInvalidate) {
       NameNode.stateChangeLog.info("BLOCK* processReport: block "
-          + b + " on " + node.getName() + " size " + b.getNumBytes()
+          + b + " on " + node + " size " + b.getNumBytes()
           + " does not belong to any file.");
       addToInvalidates(b, node);
     }
@@ -1662,7 +1662,7 @@ public class BlockManager {
     
     if(LOG.isDebugEnabled()) {
       LOG.debug("Reported block " + block
-          + " on " + dn.getName() + " size " + block.getNumBytes()
+          + " on " + dn + " size " + block.getNumBytes()
           + " replicaState = " + reportedState);
     }
   
@@ -1837,7 +1837,7 @@ assert storedBlock.findDatanode(dn) < 0 
           // closed. So, ignore this report, assuming we will get a
           // FINALIZED replica later. See HDFS-2791
           LOG.info("Received an RBW replica for block " + storedBlock +
-              " on " + dn.getName() + ": ignoring it, since the block is " +
+              " on " + dn + ": ignoring it, since the block is " +
               "complete with the same generation stamp.");
           return null;
         } else {
@@ -1850,7 +1850,7 @@ assert storedBlock.findDatanode(dn) < 0 
     default:
       String msg = "Unexpected replica state " + reportedState
       + " for block: " + storedBlock + 
-      " on " + dn.getName() + " size " + storedBlock.getNumBytes();
+      " on " + dn + " size " + storedBlock.getNumBytes();
       // log here at WARN level since this is really a broken HDFS
       // invariant
       LOG.warn(msg);
@@ -1949,7 +1949,7 @@ assert storedBlock.findDatanode(dn) < 0 
     if (storedBlock == null || storedBlock.getINode() == null) {
       // If this block does not belong to anyfile, then we are done.
       NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on "
-          + node.getName() + " size " + block.getNumBytes()
+          + node + " size " + block.getNumBytes()
           + " but it does not belong to any file.");
       // we could add this block to invalidate set of this datanode.
       // it will happen in next block report otherwise.
@@ -1972,7 +1972,7 @@ assert storedBlock.findDatanode(dn) < 0 
       curReplicaDelta = 0;
       NameNode.stateChangeLog.warn("BLOCK* addStoredBlock: "
           + "Redundant addStoredBlock request received for " + storedBlock
-          + " on " + node.getName() + " size " + storedBlock.getNumBytes());
+          + " on " + node + " size " + storedBlock.getNumBytes());
     }
 
     // Now check for completion of blocks and safe block count
@@ -2035,7 +2035,7 @@ assert storedBlock.findDatanode(dn) < 0 
     
     StringBuilder sb = new StringBuilder(500);
     sb.append("BLOCK* addStoredBlock: blockMap updated: ")
-      .append(node.getName())
+      .append(node)
       .append(" is added to ");
     storedBlock.appendStringTo(sb);
     sb.append(" size " )
@@ -2069,7 +2069,7 @@ assert storedBlock.findDatanode(dn) < 0 
       } catch (IOException e) {
         NameNode.stateChangeLog.info("NameNode.invalidateCorruptReplicas " +
                                       "error in deleting bad block " + blk +
-                                      " on " + node + e);
+                                      " on " + node, e);
         gotException = true;
       }
     }
@@ -2335,7 +2335,7 @@ assert storedBlock.findDatanode(dn) < 0 
       //
       addToInvalidates(b, cur);
       NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: "
-                +"("+cur.getName()+", "+b+") is added to invalidated blocks set.");
+                +"("+cur+", "+b+") is added to invalidated blocks set.");
     }
   }
 
@@ -2350,7 +2350,7 @@ assert storedBlock.findDatanode(dn) < 0 
       excessBlocksCount++;
       if(NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug("BLOCK* addToExcessReplicate:"
-            + " (" + dn.getName() + ", " + block
+            + " (" + dn + ", " + block
             + ") is added to excessReplicateMap");
       }
     }
@@ -2363,7 +2363,7 @@ assert storedBlock.findDatanode(dn) < 0 
   public void removeStoredBlock(Block block, DatanodeDescriptor node) {
     if(NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: "
-          + block + " from " + node.getName());
+          + block + " from " + node);
     }
     assert (namesystem.hasWriteLock());
     {
@@ -2476,7 +2476,7 @@ assert storedBlock.findDatanode(dn) < 0 
     }
     for (Block b : toInvalidate) {
       NameNode.stateChangeLog.info("BLOCK* addBlock: block "
-          + b + " on " + node.getName() + " size " + b.getNumBytes()
+          + b + " on " + node + " size " + b.getNumBytes()
           + " does not belong to any file.");
       addToInvalidates(b, node);
     }
@@ -2504,7 +2504,7 @@ assert storedBlock.findDatanode(dn) < 0 
         NameNode.stateChangeLog
             .warn("BLOCK* processIncrementalBlockReport"
                 + " is received from dead or unregistered node "
-                + nodeID.getName());
+                + nodeID);
         throw new IOException(
             "Got incremental block report from unregistered or dead node");
       }
@@ -2526,7 +2526,7 @@ assert storedBlock.findDatanode(dn) < 0 
           break;
         default:
           String msg = 
-            "Unknown block status code reported by " + nodeID.getName() +
+            "Unknown block status code reported by " + nodeID +
             ": " + rdbi;
           NameNode.stateChangeLog.warn(msg);
           assert false : msg; // if assertions are enabled, throw.
@@ -2535,14 +2535,14 @@ assert storedBlock.findDatanode(dn) < 0 
         if (NameNode.stateChangeLog.isDebugEnabled()) {
           NameNode.stateChangeLog.debug("BLOCK* block "
               + (rdbi.getStatus()) + ": " + rdbi.getBlock()
-              + " is received from " + nodeID.getName());
+              + " is received from " + nodeID);
         }
       }
     } finally {
       namesystem.writeUnlock();
       NameNode.stateChangeLog
           .debug("*BLOCK* NameNode.processIncrementalBlockReport: " + "from "
-              + nodeID.getName()
+              + nodeID
               +  " receiving: " + receiving + ", "
               + " received: " + received + ", "
               + " deleted: " + deleted);
@@ -2618,7 +2618,7 @@ assert storedBlock.findDatanode(dn) < 0 
     StringBuilder nodeList = new StringBuilder();
     while (nodeIter.hasNext()) {
       DatanodeDescriptor node = nodeIter.next();
-      nodeList.append(node.name);
+      nodeList.append(node);
       nodeList.append(" ");
     }
     LOG.info("Block: " + block + ", Expected Replicas: "
@@ -2628,7 +2628,7 @@ assert storedBlock.findDatanode(dn) < 0 
         + ", excess replicas: " + num.excessReplicas()
         + ", Is Open File: " + fileINode.isUnderConstruction()
         + ", Datanodes having this block: " + nodeList + ", Current Datanode: "
-        + srcNode.name + ", Is current datanode decommissioning: "
+        + srcNode + ", Is current datanode decommissioning: "
         + srcNode.isDecommissionInProgress());
   }
   

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java Mon Apr  2 07:28:42 2012
@@ -65,14 +65,14 @@ public class CorruptReplicasMap{
       nodes.add(dn);
       NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
                                    blk.getBlockName() +
-                                   " added as corrupt on " + dn.getName() +
+                                   " added as corrupt on " + dn +
                                    " by " + Server.getRemoteIp() +
                                    reasonText);
     } else {
       NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
                                    "duplicate requested for " + 
                                    blk.getBlockName() + " to add as corrupt " +
-                                   "on " + dn.getName() +
+                                   "on " + dn +
                                    " by " + Server.getRemoteIp() +
                                    reasonText);
     }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java Mon Apr  2 07:28:42 2012
@@ -175,19 +175,7 @@ public class DatanodeDescriptor extends 
    */
   public DatanodeDescriptor(DatanodeID nodeID, 
                             String networkLocation) {
-    this(nodeID, networkLocation, null);
-  }
-  
-  /** DatanodeDescriptor constructor
-   * 
-   * @param nodeID id of the data node
-   * @param networkLocation location of the data node in network
-   * @param hostName it could be different from host specified for DatanodeID
-   */
-  public DatanodeDescriptor(DatanodeID nodeID, 
-                            String networkLocation,
-                            String hostName) {
-    this(nodeID, networkLocation, hostName, 0L, 0L, 0L, 0L, 0, 0);
+    this(nodeID, networkLocation, 0L, 0L, 0L, 0L, 0, 0);
   }
   
   /** DatanodeDescriptor constructor
@@ -223,14 +211,13 @@ public class DatanodeDescriptor extends 
    */
   public DatanodeDescriptor(DatanodeID nodeID,
                             String networkLocation,
-                            String hostName,
                             long capacity,
                             long dfsUsed,
                             long remaining,
                             long bpused,
                             int xceiverCount,
                             int failedVolumes) {
-    super(nodeID, networkLocation, hostName);
+    super(nodeID, networkLocation);
     updateHeartbeat(capacity, dfsUsed, remaining, bpused, xceiverCount, 
         failedVolumes);
   }
@@ -436,23 +423,6 @@ public class DatanodeDescriptor extends 
     }
   }
 
-  /** Serialization for FSEditLog */
-  public void readFieldsFromFSEditLog(DataInput in) throws IOException {
-    this.name = DeprecatedUTF8.readString(in);
-    this.storageID = DeprecatedUTF8.readString(in);
-    this.infoPort = in.readShort() & 0x0000ffff;
-
-    this.capacity = in.readLong();
-    this.dfsUsed = in.readLong();
-    this.remaining = in.readLong();
-    this.blockPoolUsed = in.readLong();
-    this.lastUpdate = in.readLong();
-    this.xceiverCount = in.readInt();
-    this.location = Text.readString(in);
-    this.hostName = Text.readString(in);
-    setAdminState(WritableUtils.readEnum(in, AdminStates.class));
-  }
-  
   /**
    * @return Approximate number of blocks currently scheduled to be written 
    * to this datanode.