You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/02/10 04:04:12 UTC
svn commit: r1242642 [1/2] - in
/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project: ./
hadoop-hdfs-httpfs/ hadoop-hdfs/ hadoop-hdfs/src/main/java/
hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/
hadoop-hdfs/src/main/java/org/apache/hadoop/hdf...
Author: szetszwo
Date: Fri Feb 10 03:04:05 2012
New Revision: 1242642
URL: http://svn.apache.org/viewvc?rev=1242642&view=rev
Log:
Merge r1241554 through r1242605 from 0.23.
Modified:
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/pom.xml
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/pom.xml
Propchange: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 03:04:05 2012
@@ -1,5 +1,5 @@
-/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs:1227776-1241553
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1182189,1182205,1182214,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189613,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204366,1204370,1204376,1204388,1204544,1204707,1204709,1204825,1205146,1205260,1205626,1205697,1206178,1206786,1206830,1207585,
1207694,1208140,1208153,1208313,1210208,1210657,1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213040,1213143,1213537,1213586,1213592-1213593,1213808,1213813,1213954,1213985,1214027,1214033,1214046,1214102-1214103,1214128,1215364,1215366,1220315,1220510,1221106,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227887,1227964,1229347,1229877,1229897,1230398,1231569,1231572,1231627,1231640,1233584,1233605,1234555,1235135,1235137,1235956,1236456,1238700,1238779,1238969,1239752,1240020,1240653,1240897,1240928,1241007,1241519
+/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs:1227776-1242605
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1182189,1182205,1182214,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189613,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204366,1204370,1204376,1204388,1204544,1204707,1204709,1204825,1205146,1205260,1205626,1205697,1206178,1206786,1206830,1207585,
1207694,1208140,1208153,1208313,1210208,1210657,1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213040,1213143,1213537,1213586,1213592-1213593,1213808,1213813,1213954,1213985,1214027,1214033,1214046,1214102-1214103,1214128,1215364,1215366,1220315,1220510,1221106,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227887,1227964,1229347,1229877,1229897,1230398,1231569,1231572,1231627,1231640,1233584,1233605,1234555,1235135,1235137,1235956,1236456,1238700,1238779,1238969,1239752,1240020,1240653,1240897,1240928,1241007,1241519,1242087
/hadoop/core/branches/branch-0.19/hdfs:713112
/hadoop/hdfs/branches/HDFS-1052:987665-1095512
/hadoop/hdfs/branches/HDFS-265:796829-820463
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml Fri Feb 10 03:04:05 2012
@@ -19,12 +19,12 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
- <version>0.23.1-SNAPSHOT</version>
+ <version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-httpfs</artifactId>
- <version>0.23.1-SNAPSHOT</version>
+ <version>0.23.2-SNAPSHOT</version>
<packaging>war</packaging>
<name>Apache Hadoop HttpFS</name>
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Feb 10 03:04:05 2012
@@ -119,7 +119,28 @@ Release 0.23-PB - Unreleased
HDFS-2768. BackupNode stop can not close proxy connections because
it is not a proxy instance. (Uma Maheswara Rao G via eli)
-Release 0.23.1 - UNRELEASED
+Release 0.23.2 - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ HDFS-2887. FSVolume, is a part of FSDatasetInterface implementation, should
+ not be referred outside FSDataset. A new FSVolumeInterface is defined.
+ The BlockVolumeChoosingPolicy.chooseVolume(..) method signature is also
+ updated. (szetszwo)
+
+ NEW FEATURES
+
+ IMPROVEMENTS
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+ HDFS-2923. Namenode IPC handler count uses the wrong configuration key
+ (todd)
+
+ HDFS-2764. TestBackupNode is racy. (atm)
+
+Release 0.23.1 - 2012-02-08
INCOMPATIBLE CHANGES
@@ -134,6 +155,10 @@ Release 0.23.1 - UNRELEASED
HDFS-2545. Change WebHDFS to support multiple namenodes in federation.
(szetszwo)
+ HDFS-2178. Contributing Hoop to HDFS, replacement for HDFS proxy
+ with read/write capabilities. (tucu)
+
+
IMPROVEMENTS
HDFS-2560. Refactor BPOfferService to be a static inner class (todd)
@@ -167,9 +192,6 @@ Release 0.23.1 - UNRELEASED
HDFS-2604. Add a log message to show if WebHDFS is enabled and a
configuration section in the forrest doc. (szetszwo)
- HDFS-2178. Contributing Hoop to HDFS, replacement for HDFS proxy
- with read/write capabilities. (tucu)
-
HDFS-2511. Add dev script to generate HDFS protobufs. (tucu)
HDFS-2654. Make BlockReaderLocal not extend RemoteBlockReader2. (eli)
@@ -180,7 +202,8 @@ Release 0.23.1 - UNRELEASED
HDFS-2335. DataNodeCluster and NNStorage always pull fresh entropy.
(Uma Maheswara Rao G via eli)
- HDFS-2574. Remove references to some deprecated properties in conf templates and defaults files. (Joe Crobak via harsh)
+ HDFS-2574. Remove references to some deprecated properties in conf
+ templates and defaults files. (Joe Crobak via harsh)
HDFS-2722. HttpFs should not be using an int for block size. (harsh)
@@ -188,21 +211,22 @@ Release 0.23.1 - UNRELEASED
suresh)
HDFS-2349. Corruption detected during block transfers between DNs
- should log a WARN instead of INFO. (harsh)
+ should log a WARN instead of INFO. (harsh)
- HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh)
-
- HDFS-2729. Update BlockManager's comments regarding the invalid block set (harsh)
+ HDFS-2729. Update BlockManager's comments regarding the invalid block
+ set (harsh)
- HDFS-2726. Fix a logging issue under DFSClient's createBlockOutputStream method (harsh)
+ HDFS-2726. Fix a logging issue under DFSClient's createBlockOutputStream
+ method (harsh)
HDFS-554. Use System.arraycopy in BlockInfo.ensureCapacity. (harsh)
- HDFS-1314. Make dfs.blocksize accept size-indicating prefixes (Sho Shimauchi via harsh)
+ HDFS-1314. Make dfs.blocksize accept size-indicating prefixes.
+ (Sho Shimauchi via harsh)
HDFS-69. Improve the 'dfsadmin' commandline help. (harsh)
- HDFS-2788. HdfsServerConstants#DN_KEEPALIVE_TIMEOUT is dead code (eli)
+ HDFS-2788. HdfsServerConstants#DN_KEEPALIVE_TIMEOUT is dead code. (eli)
HDFS-362. FSEditLog should not writes long and short as UTF8, and should
not use ArrayWritable for writing non-array items. (Uma Maheswara Rao G
@@ -215,7 +239,7 @@ Release 0.23.1 - UNRELEASED
HDFS-2818. Fix a missing space issue in HDFS webapps' title tags. (Devaraj K via harsh)
- HDFS-2397. Undeprecate SecondaryNameNode (eli)
+ HDFS-2397. Undeprecate SecondaryNameNode. (eli)
HDFS-2814. NamenodeMXBean does not account for svn revision in the version
information. (Hitesh Shah via jitendra)
@@ -228,6 +252,9 @@ Release 0.23.1 - UNRELEASED
HDFS-2868. Expose xceiver counts via the DataNode MXBean. (harsh)
+ HDFS-2786. Fix host-based token incompatibilities in DFSUtil. (Kihwal
+ Lee via jitendra)
+
OPTIMIZATIONS
HDFS-2130. Switch default checksum to CRC32C. (todd)
@@ -319,11 +346,14 @@ Release 0.23.1 - UNRELEASED
HDFS-442. dfsthroughput in test jar throws NPE (harsh)
- HDFS-2836. HttpFSServer still has 2 javadoc warnings in trunk (revans2 via tucu)
+ HDFS-2836. HttpFSServer still has 2 javadoc warnings in trunk.
+ (revans2 via tucu)
- HDFS-2837. mvn javadoc:javadoc not seeing LimitedPrivate class (revans2 via tucu)
+ HDFS-2837. mvn javadoc:javadoc not seeing LimitedPrivate class.
+ (revans2 via tucu)
- HDFS-2840. TestHostnameFilter should work with localhost or localhost.localdomain (tucu)
+ HDFS-2840. TestHostnameFilter should work with localhost or
+ localhost.localdomain (tucu)
HDFS-2791. If block report races with closing of file, replica is
incorrectly marked corrupt. (todd)
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/pom.xml Fri Feb 10 03:04:05 2012
@@ -17,12 +17,12 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project-dist</artifactId>
- <version>0.23.1-SNAPSHOT</version>
+ <version>0.23.2-SNAPSHOT</version>
<relativePath>../../hadoop-project-dist</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
- <version>0.23.1-SNAPSHOT</version>
+ <version>0.23.2-SNAPSHOT</version>
<description>Apache Hadoop HDFS</description>
<name>Apache Hadoop HDFS</name>
<packaging>jar</packaging>
Propchange: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 03:04:05 2012
@@ -1,5 +1,5 @@
-/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1227776-1241553
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1182189,1182205,1182214,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189613,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204366,1204370,1204376,1204388,1204544,1204707,1205146,1205260,1205697,1206786,1206830,1207694,1208140,1208153,12
08313,1210208,1210657,1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213040,1213143,1213537,1213586,1213592-1213593,1213808,1213813,1213954,1213985,1214027,1214033,1214046,1214102-1214103,1214128,1215364,1215366,1220315,1220510,1221106,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227887,1227964,1229347,1229877,1229897,1230398,1231569,1231572,1231627,1231640,1233584,1233605,1234555,1235135,1235137,1235956,1236456,1238700,1238779,1238969,1239752,1240020,1240653,1240897,1240928,1241007,1241519
+/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1227776-1242605
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1182189,1182205,1182214,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189613,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204366,1204370,1204376,1204388,1204544,1204707,1205146,1205260,1205697,1206786,1206830,1207694,1208140,1208153,12
08313,1210208,1210657,1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213040,1213143,1213537,1213586,1213592-1213593,1213808,1213813,1213954,1213985,1214027,1214033,1214046,1214102-1214103,1214128,1215364,1215366,1220315,1220510,1221106,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227887,1227964,1229347,1229877,1229897,1230398,1231569,1231572,1231627,1231640,1233584,1233605,1234555,1235135,1235137,1235956,1236456,1238700,1238779,1238969,1239752,1240020,1240653,1240897,1240928,1241007,1241519,1242087
/hadoop/core/branches/branch-0.19/hdfs/src/java:713112
/hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
/hadoop/hdfs/branches/HDFS-1052/src/java:987665-1095512
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Fri Feb 10 03:04:05 2012
@@ -609,19 +609,6 @@ public class DFSUtil {
}
/**
- * @param address address of format host:port
- * @return InetSocketAddress for the address
- */
- public static InetSocketAddress getSocketAddress(String address) {
- int colon = address.indexOf(":");
- if (colon < 0) {
- return new InetSocketAddress(address, 0);
- }
- return new InetSocketAddress(address.substring(0, colon),
- Integer.parseInt(address.substring(colon + 1)));
- }
-
- /**
* Round bytes to GiB (gibibyte)
* @param bytes number of bytes
* @return number of GiB
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java Fri Feb 10 03:04:05 2012
@@ -498,7 +498,7 @@ public class JspHelper {
String namenodeAddressInUrl = request.getParameter(NAMENODE_ADDRESS);
InetSocketAddress namenodeAddress = null;
if (namenodeAddressInUrl != null) {
- namenodeAddress = DFSUtil.getSocketAddress(namenodeAddressInUrl);
+ namenodeAddress = NetUtils.createSocketAddr(namenodeAddressInUrl);
} else if (context != null) {
namenodeAddress = NameNodeHttpServer.getNameNodeAddressFromContext(
context);
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java Fri Feb 10 03:04:05 2012
@@ -46,15 +46,14 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
/**
* Performs two types of scanning:
* <li> Gets block files from the data directories and reconciles the
- * difference between the blocks on the disk and in memory in
- * {@link FSDataset}</li>
+ * difference between the blocks on the disk and in memory.</li>
* <li> Scans the data directories for block files under a block pool
* and verifies that the files are not corrupt</li>
* This keeps track of blocks and their last verification times.
@@ -78,7 +77,7 @@ class BlockPoolSliceScanner {
private long scanPeriod = DEFAULT_SCAN_PERIOD_HOURS * 3600 * 1000;
private DataNode datanode;
- private FSDataset dataset;
+ private final FSDatasetInterface dataset;
// sorted set
private TreeSet<BlockScanInfo> blockInfoSet;
@@ -137,8 +136,8 @@ class BlockPoolSliceScanner {
}
}
- BlockPoolSliceScanner(DataNode datanode, FSDataset dataset, Configuration conf,
- String bpid) {
+ BlockPoolSliceScanner(DataNode datanode, FSDatasetInterface dataset,
+ Configuration conf, String bpid) {
this.datanode = datanode;
this.dataset = dataset;
this.blockPoolId = bpid;
@@ -220,16 +219,16 @@ class BlockPoolSliceScanner {
* otherwise, pick the first directory.
*/
File dir = null;
- List<FSVolume> volumes = dataset.volumes.getVolumes();
- for (FSDataset.FSVolume vol : dataset.volumes.getVolumes()) {
- File bpDir = vol.getBlockPoolSlice(blockPoolId).getDirectory();
+ List<FSVolumeInterface> volumes = dataset.getVolumes();
+ for (FSVolumeInterface vol : volumes) {
+ File bpDir = vol.getDirectory(blockPoolId);
if (LogFileHandler.isFilePresent(bpDir, verificationLogFile)) {
dir = bpDir;
break;
}
}
if (dir == null) {
- dir = volumes.get(0).getBlockPoolSlice(blockPoolId).getDirectory();
+ dir = volumes.get(0).getDirectory(blockPoolId);
}
try {
@@ -577,8 +576,8 @@ class BlockPoolSliceScanner {
bytesLeft += len;
}
- static File getCurrentFile(FSVolume vol, String bpid) throws IOException {
- return LogFileHandler.getCurrentFile(vol.getBlockPoolSlice(bpid).getDirectory(),
+ static File getCurrentFile(FSVolumeInterface vol, String bpid) throws IOException {
+ return LogFileHandler.getCurrentFile(vol.getDirectory(bpid),
BlockPoolSliceScanner.verificationLogFile);
}
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java Fri Feb 10 03:04:05 2012
@@ -22,7 +22,7 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/**************************************************
* BlockVolumeChoosingPolicy allows a DataNode to
@@ -46,7 +46,7 @@ public interface BlockVolumeChoosingPoli
* @return the chosen volume to store the block.
* @throws IOException when disks are unavailable or are full.
*/
- public FSVolume chooseVolume(List<FSVolume> volumes, long blockSize)
+ public FSVolumeInterface chooseVolume(List<FSVolumeInterface> volumes, long blockSize)
throws IOException;
}
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java Fri Feb 10 03:04:05 2012
@@ -27,12 +27,12 @@ import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
/**
* DataBlockScanner manages block scanning for all the block pools. For each
@@ -44,7 +44,7 @@ import org.apache.commons.logging.LogFac
public class DataBlockScanner implements Runnable {
public static final Log LOG = LogFactory.getLog(DataBlockScanner.class);
private final DataNode datanode;
- private final FSDataset dataset;
+ private final FSDatasetInterface dataset;
private final Configuration conf;
/**
@@ -55,7 +55,7 @@ public class DataBlockScanner implements
new TreeMap<String, BlockPoolSliceScanner>();
Thread blockScannerThread = null;
- DataBlockScanner(DataNode datanode, FSDataset dataset, Configuration conf) {
+ DataBlockScanner(DataNode datanode, FSDatasetInterface dataset, Configuration conf) {
this.datanode = datanode;
this.dataset = dataset;
this.conf = conf;
@@ -135,7 +135,7 @@ public class DataBlockScanner implements
.iterator();
while (bpidIterator.hasNext()) {
String bpid = bpidIterator.next();
- for (FSDataset.FSVolume vol : dataset.volumes.getVolumes()) {
+ for (FSDatasetInterface.FSVolumeInterface vol : dataset.getVolumes()) {
try {
File currFile = BlockPoolSliceScanner.getCurrentFile(vol, bpid);
if (currFile.exists()) {
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri Feb 10 03:04:05 2012
@@ -125,7 +125,6 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.Util;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods;
@@ -582,11 +581,11 @@ public class DataNode extends Configured
if (conf.getInt(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT) < 0) {
reason = "verification is turned off by configuration";
- } else if (!(data instanceof FSDataset)) {
- reason = "verifcation is supported only with FSDataset";
+ } else if ("SimulatedFSDataset".equals(data.getClass().getSimpleName())) {
+ reason = "verifcation is not supported by SimulatedFSDataset";
}
if (reason == null) {
- blockScanner = new DataBlockScanner(this, (FSDataset)data, conf);
+ blockScanner = new DataBlockScanner(this, data, conf);
blockScanner.start();
} else {
LOG.info("Periodic Block Verification scan is disabled because " +
@@ -611,11 +610,11 @@ public class DataNode extends Configured
if (conf.getInt(DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT) < 0) {
reason = "verification is turned off by configuration";
- } else if (!(data instanceof FSDataset)) {
- reason = "verification is supported only with FSDataset";
+ } else if ("SimulatedFSDataset".equals(data.getClass().getSimpleName())) {
+ reason = "verifcation is not supported by SimulatedFSDataset";
}
if (reason == null) {
- directoryScanner = new DirectoryScanner(this, (FSDataset) data, conf);
+ directoryScanner = new DirectoryScanner(this, data, conf);
directoryScanner.start();
} else {
LOG.info("Periodic Directory Tree Verification scan is disabled because " +
@@ -2237,16 +2236,7 @@ public class DataNode extends Configured
*/
@Override // DataNodeMXBean
public String getVolumeInfo() {
- final Map<String, Object> info = new HashMap<String, Object>();
- Collection<VolumeInfo> volumes = ((FSDataset)this.data).getVolumeInfo();
- for (VolumeInfo v : volumes) {
- final Map<String, Object> innerInfo = new HashMap<String, Object>();
- innerInfo.put("usedSpace", v.usedSpace);
- innerInfo.put("freeSpace", v.freeSpace);
- innerInfo.put("reservedSpace", v.reservedSpace);
- info.put(v.directory, innerInfo);
- }
- return JSON.toString(info);
+ return JSON.toString(data.getVolumeInfoMap());
}
@Override // DataNodeMXBean
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Fri Feb 10 03:04:05 2012
@@ -751,7 +751,7 @@ public class DataStorage extends Storage
Matcher matcher = PRE_GENSTAMP_META_FILE_PATTERN.matcher(oldFileName);
if (matcher.matches()) {
//return the current metadata file name
- return FSDataset.getMetaFileName(matcher.group(1),
+ return DatanodeUtil.getMetaFileName(matcher.group(1),
GenerationStamp.GRANDFATHER_GENERATION_STAMP);
}
return oldFileName;
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java Fri Feb 10 03:04:05 2012
@@ -53,18 +53,30 @@ import org.apache.hadoop.util.StringUtil
@InterfaceAudience.Private
public class DatanodeJspHelper {
private static DFSClient getDFSClient(final UserGroupInformation user,
- final InetSocketAddress addr,
+ final String addr,
final Configuration conf
) throws IOException,
InterruptedException {
return
user.doAs(new PrivilegedExceptionAction<DFSClient>() {
public DFSClient run() throws IOException {
- return new DFSClient(addr, conf);
+ return new DFSClient(NetUtils.createSocketAddr(addr), conf);
}
});
}
+ /**
+ * Internal convenience method for canonicalizing host name.
+ * @param addr name:port or name
+ * @return canonicalized host name
+ */
+ private static String canonicalize(String addr) {
+ // default port 1 is supplied to allow addr without port.
+ // the port will be ignored.
+ return NetUtils.createSocketAddr(addr, 1).getAddress()
+ .getCanonicalHostName();
+ }
+
private static final SimpleDateFormat lsDateFormat =
new SimpleDateFormat("yyyy-MM-dd HH:mm");
@@ -102,8 +114,7 @@ public class DatanodeJspHelper {
return;
}
- InetSocketAddress namenodeAddress = DFSUtil.getSocketAddress(nnAddr);
- DFSClient dfs = getDFSClient(ugi, namenodeAddress, conf);
+ DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
String target = dir;
final HdfsFileStatus targetStatus = dfs.getFileInfo(target);
if (targetStatus == null) { // not exists
@@ -125,8 +136,7 @@ public class DatanodeJspHelper {
out.print("Empty file");
} else {
DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf);
- String fqdn = InetAddress.getByName(chosenNode.getHost())
- .getCanonicalHostName();
+ String fqdn = canonicalize(chosenNode.getHost());
String datanodeAddr = chosenNode.getName();
int datanodePort = Integer.parseInt(datanodeAddr.substring(
datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
@@ -210,9 +220,8 @@ public class DatanodeJspHelper {
JspHelper.addTableFooter(out);
}
}
- String namenodeHost = namenodeAddress.getHostName();
out.print("<br><a href=\"http://"
- + InetAddress.getByName(namenodeHost).getCanonicalHostName() + ":"
+ + canonicalize(nnAddr) + ":"
+ namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
dfs.close();
}
@@ -282,8 +291,7 @@ public class DatanodeJspHelper {
}
long blockSize = Long.parseLong(blockSizeStr);
- final InetSocketAddress namenodeAddress = DFSUtil.getSocketAddress(nnAddr);
- final DFSClient dfs = getDFSClient(ugi, namenodeAddress, conf);
+ final DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
Long.MAX_VALUE).getLocatedBlocks();
// Add the various links for looking at the file contents
@@ -305,8 +313,7 @@ public class DatanodeJspHelper {
dfs.close();
return;
}
- String fqdn = InetAddress.getByName(chosenNode.getHost())
- .getCanonicalHostName();
+ String fqdn = canonicalize(chosenNode.getHost());
String tailUrl = "http://" + fqdn + ":" + chosenNode.getInfoPort()
+ "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8")
+ "&namenodeInfoPort=" + namenodeInfoPort
@@ -345,9 +352,7 @@ public class DatanodeJspHelper {
// generate a table and dump the info
out.println("\n<table>");
- String namenodeHost = namenodeAddress.getHostName();
- String namenodeHostName = InetAddress.getByName(namenodeHost).getCanonicalHostName();
-
+ String nnCanonicalName = canonicalize(nnAddr);
for (LocatedBlock cur : blocks) {
out.print("<tr>");
final String blockidstring = Long.toString(cur.getBlock().getBlockId());
@@ -358,7 +363,7 @@ public class DatanodeJspHelper {
String datanodeAddr = locs[j].getName();
datanodePort = Integer.parseInt(datanodeAddr.substring(datanodeAddr
.indexOf(':') + 1, datanodeAddr.length()));
- fqdn = InetAddress.getByName(locs[j].getHost()).getCanonicalHostName();
+ fqdn = canonicalize(locs[j].getHost());
String blockUrl = "http://" + fqdn + ":" + locs[j].getInfoPort()
+ "/browseBlock.jsp?blockId=" + blockidstring
+ "&blockSize=" + blockSize
@@ -370,7 +375,7 @@ public class DatanodeJspHelper {
+ JspHelper.getDelegationTokenUrlParam(tokenString)
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
- String blockInfoUrl = "http://" + namenodeHostName + ":"
+ String blockInfoUrl = "http://" + nnCanonicalName + ":"
+ namenodeInfoPort
+ "/block_info_xml.jsp?blockId=" + blockidstring;
out.print("<td> </td><td><a href=\"" + blockUrl + "\">"
@@ -382,7 +387,7 @@ public class DatanodeJspHelper {
out.println("</table>");
out.print("<hr>");
out.print("<br><a href=\"http://"
- + InetAddress.getByName(namenodeHost).getCanonicalHostName() + ":"
+ + nnCanonicalName + ":"
+ namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
dfs.close();
}
@@ -419,8 +424,7 @@ public class DatanodeJspHelper {
return;
}
- final DFSClient dfs = getDFSClient(ugi,
- DFSUtil.getSocketAddress(nnAddr), conf);
+ final DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
String bpid = null;
Token<BlockTokenIdentifier> blockToken = BlockTokenSecretManager.DUMMY_TOKEN;
@@ -518,8 +522,7 @@ public class DatanodeJspHelper {
String datanodeAddr = d.getName();
nextDatanodePort = Integer.parseInt(datanodeAddr.substring(
datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
- nextHost = InetAddress.getByName(d.getHost())
- .getCanonicalHostName();
+ nextHost = d.getHost();
nextPort = d.getInfoPort();
}
}
@@ -533,7 +536,7 @@ public class DatanodeJspHelper {
}
String nextUrl = null;
if (nextBlockIdStr != null) {
- nextUrl = "http://" + nextHost + ":" + nextPort
+ nextUrl = "http://" + canonicalize(nextHost) + ":" + nextPort
+ "/browseBlock.jsp?blockId=" + nextBlockIdStr
+ "&blockSize=" + nextBlockSize
+ "&startOffset=" + nextStartOffset
@@ -573,8 +576,7 @@ public class DatanodeJspHelper {
String datanodeAddr = d.getName();
prevDatanodePort = Integer.parseInt(datanodeAddr.substring(
datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
- prevHost = InetAddress.getByName(d.getHost())
- .getCanonicalHostName();
+ prevHost = d.getHost();
prevPort = d.getInfoPort();
}
}
@@ -591,7 +593,7 @@ public class DatanodeJspHelper {
String prevUrl = null;
if (prevBlockIdStr != null) {
- prevUrl = "http://" + prevHost + ":" + prevPort
+ prevUrl = "http://" + canonicalize(prevHost) + ":" + prevPort
+ "/browseBlock.jsp?blockId=" + prevBlockIdStr
+ "&blockSize=" + prevBlockSize
+ "&startOffset=" + prevStartOffset
@@ -669,8 +671,7 @@ public class DatanodeJspHelper {
+ "\">");
// fetch the block from the datanode that has the last block for this file
- final DFSClient dfs = getDFSClient(ugi, DFSUtil.getSocketAddress(nnAddr),
- conf);
+ final DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
Long.MAX_VALUE).getLocatedBlocks();
if (blocks == null || blocks.size() == 0) {
@@ -710,6 +711,6 @@ public class DatanodeJspHelper {
final DataNode datanode, final Configuration conf,
final UserGroupInformation ugi) throws IOException, InterruptedException {
final String nnAddr = request.getParameter(JspHelper.NAMENODE_ADDRESS);
- return getDFSClient(ugi, DFSUtil.getSocketAddress(nnAddr), conf);
+ return getDFSClient(ugi, nnAddr, conf);
}
}
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java Fri Feb 10 03:04:05 2012
@@ -18,7 +18,9 @@
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
+import java.io.FilenameFilter;
import java.io.IOException;
+import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block;
@@ -26,6 +28,10 @@ import org.apache.hadoop.hdfs.protocol.B
/** Provide utility methods for Datanode. */
@InterfaceAudience.Private
class DatanodeUtil {
+ static final String METADATA_EXTENSION = ".meta";
+
+ static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
+
private final static String DISK_ERROR = "Possible disk error on file creation: ";
/** Get the cause of an I/O exception if caused by a possible disk error
@@ -64,4 +70,37 @@ class DatanodeUtil {
}
return f;
}
+
+ static String getMetaFileName(String blockFileName, long genStamp) {
+ return blockFileName + "_" + genStamp + METADATA_EXTENSION;
+ }
+
+ static File getMetaFile(File f, long genStamp) {
+ return new File(getMetaFileName(f.getAbsolutePath(), genStamp));
+ }
+
+ /** Find the corresponding meta data file from a given block file */
+ static File findMetaFile(final File blockFile) throws IOException {
+ final String prefix = blockFile.getName() + "_";
+ final File parent = blockFile.getParentFile();
+ File[] matches = parent.listFiles(new FilenameFilter() {
+ public boolean accept(File dir, String name) {
+ return dir.equals(parent)
+ && name.startsWith(prefix) && name.endsWith(METADATA_EXTENSION);
+ }
+ });
+
+ if (matches == null || matches.length == 0) {
+ throw new IOException("Meta file not found, blockFile=" + blockFile);
+ }
+ else if (matches.length > 1) {
+ throw new IOException("Found more than one meta files: "
+ + Arrays.asList(matches));
+ }
+ return matches[0];
+ }
+
+ static File getUnlinkTmpFile(File f) {
+ return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
+ }
}
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java Fri Feb 10 03:04:05 2012
@@ -43,20 +43,19 @@ import org.apache.hadoop.hdfs.DFSConfigK
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.util.Daemon;
/**
* Periodically scans the data directories for block and block metadata files.
- * Reconciles the differences with block information maintained in
- * {@link FSDataset}
+ * Reconciles the differences with block information maintained in the dataset.
*/
@InterfaceAudience.Private
public class DirectoryScanner implements Runnable {
private static final Log LOG = LogFactory.getLog(DirectoryScanner.class);
private final DataNode datanode;
- private final FSDataset dataset;
+ private final FSDatasetInterface dataset;
private final ExecutorService reportCompileThreadPool;
private final ScheduledExecutorService masterThread;
private final long scanPeriodMsecs;
@@ -158,13 +157,13 @@ public class DirectoryScanner implements
private final long blockId;
private final File metaFile;
private final File blockFile;
- private final FSVolume volume;
+ private final FSVolumeInterface volume;
ScanInfo(long blockId) {
this(blockId, null, null, null);
}
- ScanInfo(long blockId, File blockFile, File metaFile, FSVolume vol) {
+ ScanInfo(long blockId, File blockFile, File metaFile, FSVolumeInterface vol) {
this.blockId = blockId;
this.metaFile = metaFile;
this.blockFile = blockFile;
@@ -183,7 +182,7 @@ public class DirectoryScanner implements
return blockId;
}
- FSVolume getVolume() {
+ FSVolumeInterface getVolume() {
return volume;
}
@@ -220,7 +219,7 @@ public class DirectoryScanner implements
}
}
- DirectoryScanner(DataNode dn, FSDataset dataset, Configuration conf) {
+ DirectoryScanner(DataNode dn, FSDatasetInterface dataset, Configuration conf) {
this.datanode = dn;
this.dataset = dataset;
int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
@@ -269,7 +268,7 @@ public class DirectoryScanner implements
return;
}
- String[] bpids = dataset.getBPIdlist();
+ String[] bpids = dataset.getBlockPoolList();
for(String bpid : bpids) {
UpgradeManagerDatanode um =
datanode.getUpgradeManagerDatanode(bpid);
@@ -411,17 +410,29 @@ public class DirectoryScanner implements
diffRecord.add(new ScanInfo(blockId));
}
+ /** Is the given volume still valid in the dataset? */
+ private static boolean isValid(final FSDatasetInterface dataset,
+ final FSVolumeInterface volume) {
+ for (FSVolumeInterface vol : dataset.getVolumes()) {
+ if (vol == volume) {
+ return true;
+ }
+ }
+ return false;
+ }
+
/** Get lists of blocks on the disk sorted by blockId, per blockpool */
private Map<String, ScanInfo[]> getDiskReport() {
// First get list of data directories
- List<FSVolume> volumes = dataset.volumes.getVolumes();
+ final List<FSVolumeInterface> volumes = dataset.getVolumes();
ArrayList<ScanInfoPerBlockPool> dirReports =
new ArrayList<ScanInfoPerBlockPool>(volumes.size());
Map<Integer, Future<ScanInfoPerBlockPool>> compilersInProgress =
new HashMap<Integer, Future<ScanInfoPerBlockPool>>();
for (int i = 0; i < volumes.size(); i++) {
- if (!dataset.volumes.isValid(volumes.get(i))) { // volume is still valid
+ if (!isValid(dataset, volumes.get(i))) {
+ // volume is invalid
dirReports.add(i, null);
} else {
ReportCompiler reportCompiler =
@@ -446,7 +457,8 @@ public class DirectoryScanner implements
// Compile consolidated report for all the volumes
ScanInfoPerBlockPool list = new ScanInfoPerBlockPool();
for (int i = 0; i < volumes.size(); i++) {
- if (dataset.volumes.isValid(volumes.get(i))) { // volume is still valid
+ if (isValid(dataset, volumes.get(i))) {
+ // volume is still valid
list.addAll(dirReports.get(i));
}
}
@@ -461,9 +473,9 @@ public class DirectoryScanner implements
private static class ReportCompiler
implements Callable<ScanInfoPerBlockPool> {
- private FSVolume volume;
+ private FSVolumeInterface volume;
- public ReportCompiler(FSVolume volume) {
+ public ReportCompiler(FSVolumeInterface volume) {
this.volume = volume;
}
@@ -473,14 +485,14 @@ public class DirectoryScanner implements
ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length);
for (String bpid : bpList) {
LinkedList<ScanInfo> report = new LinkedList<ScanInfo>();
- File bpFinalizedDir = volume.getBlockPoolSlice(bpid).getFinalizedDir();
+ File bpFinalizedDir = volume.getFinalizedDir(bpid);
result.put(bpid, compileReport(volume, bpFinalizedDir, report));
}
return result;
}
/** Compile list {@link ScanInfo} for the blocks in the directory <dir> */
- private LinkedList<ScanInfo> compileReport(FSVolume vol, File dir,
+ private LinkedList<ScanInfo> compileReport(FSVolumeInterface vol, File dir,
LinkedList<ScanInfo> report) {
File[] files;
try {
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Fri Feb 10 03:04:05 2012
@@ -23,7 +23,6 @@ import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
-import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
import java.io.RandomAccessFile;
@@ -81,14 +80,13 @@ class FSDataset implements FSDatasetInte
* A node type that can be built into a tree reflecting the
* hierarchy of blocks on the local disk.
*/
- class FSDir {
- File dir;
+ private class FSDir {
+ final File dir;
int numBlocks = 0;
FSDir children[];
int lastChildIdx = 0;
- /**
- */
- public FSDir(File dir)
+
+ private FSDir(File dir)
throws IOException {
this.dir = dir;
this.children = null;
@@ -113,7 +111,7 @@ class FSDataset implements FSDatasetInte
}
}
- public File addBlock(Block b, File src) throws IOException {
+ private File addBlock(Block b, File src) throws IOException {
//First try without creating subdirectories
File file = addBlock(b, src, false, false);
return (file != null) ? file : addBlock(b, src, true, true);
@@ -161,7 +159,7 @@ class FSDataset implements FSDatasetInte
return children[ lastChildIdx ].addBlock(b, src, true, false);
}
- void getVolumeMap(String bpid, ReplicasMap volumeMap, FSVolume volume)
+ private void getVolumeMap(String bpid, ReplicasMap volumeMap, FSVolume volume)
throws IOException {
if (children != null) {
for (int i = 0; i < children.length; i++) {
@@ -207,7 +205,7 @@ class FSDataset implements FSDatasetInte
* check if a data diretory is healthy
* @throws DiskErrorException
*/
- public void checkDirTree() throws DiskErrorException {
+ private void checkDirTree() throws DiskErrorException {
DiskChecker.checkDir(dir);
if (children != null) {
@@ -217,7 +215,7 @@ class FSDataset implements FSDatasetInte
}
}
- void clearPath(File f) {
+ private void clearPath(File f) {
String root = dir.getAbsolutePath();
String dir = f.getAbsolutePath();
if (dir.startsWith(root)) {
@@ -270,7 +268,8 @@ class FSDataset implements FSDatasetInte
}
return false;
}
-
+
+ @Override
public String toString() {
return "FSDir{" +
"dir=" + dir +
@@ -284,7 +283,7 @@ class FSDataset implements FSDatasetInte
* Taken together, all BlockPoolSlices sharing a block pool ID across a
* cluster represent a single block pool.
*/
- class BlockPoolSlice {
+ private class BlockPoolSlice {
private final String bpid;
private final FSVolume volume; // volume to which this BlockPool belongs to
private final File currentDir; // StorageDirectory/current/bpid/current
@@ -342,11 +341,7 @@ class FSDataset implements FSDatasetInte
File getDirectory() {
return currentDir.getParentFile();
}
-
- File getCurrentDir() {
- return currentDir;
- }
-
+
File getFinalizedDir() {
return finalizedDir.dir;
}
@@ -387,7 +382,7 @@ class FSDataset implements FSDatasetInte
File addBlock(Block b, File f) throws IOException {
File blockFile = finalizedDir.addBlock(b, f);
- File metaFile = getMetaFile(blockFile , b.getGenerationStamp());
+ File metaFile = DatanodeUtil.getMetaFile(blockFile, b.getGenerationStamp());
dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
return blockFile;
}
@@ -455,7 +450,7 @@ class FSDataset implements FSDatasetInte
DataInputStream checksumIn = null;
InputStream blockIn = null;
try {
- File metaFile = new File(getMetaFileName(blockFile.toString(), genStamp));
+ final File metaFile = DatanodeUtil.getMetaFile(blockFile, genStamp);
long blockFileLen = blockFile.length();
long metaFileLen = metaFile.length();
int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
@@ -521,7 +516,7 @@ class FSDataset implements FSDatasetInte
}
}
- class FSVolume {
+ class FSVolume implements FSVolumeInterface {
private final Map<String, BlockPoolSlice> map = new HashMap<String, BlockPoolSlice>();
private final File currentDir; // <StorageDirectory>/current
private final DF usage;
@@ -534,11 +529,6 @@ class FSDataset implements FSDatasetInte
File parent = currentDir.getParentFile();
this.usage = new DF(parent, conf);
}
-
- /** Return storage directory corresponding to the volume */
- File getDir() {
- return currentDir.getParentFile();
- }
File getCurrentDir() {
return currentDir;
@@ -583,8 +573,9 @@ class FSDataset implements FSDatasetInte
long remaining = usage.getCapacity() - reserved;
return remaining > 0 ? remaining : 0;
}
-
- long getAvailable() throws IOException {
+
+ @Override
+ public long getAvailable() throws IOException {
long remaining = getCapacity()-getDfsUsed();
long available = usage.getAvailable();
if (remaining>available) {
@@ -600,19 +591,30 @@ class FSDataset implements FSDatasetInte
String getMount() throws IOException {
return usage.getMount();
}
-
- BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
+
+ private BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
BlockPoolSlice bp = map.get(bpid);
if (bp == null) {
throw new IOException("block pool " + bpid + " is not found");
}
return bp;
}
-
+
+ @Override
+ public File getDirectory(String bpid) throws IOException {
+ return getBlockPoolSlice(bpid).getDirectory();
+ }
+
+ @Override
+ public File getFinalizedDir(String bpid) throws IOException {
+ return getBlockPoolSlice(bpid).getFinalizedDir();
+ }
+
/**
* Make a deep copy of the list of currently active BPIDs
*/
- String[] getBlockPoolList() {
+ @Override
+ public String[] getBlockPoolList() {
synchronized(FSDataset.this) {
return map.keySet().toArray(new String[map.keySet().size()]);
}
@@ -681,7 +683,8 @@ class FSDataset implements FSDatasetInte
BlockPoolSlice bp = getBlockPoolSlice(bpid);
bp.clearPath(f);
}
-
+
+ @Override
public String toString() {
return currentDir.getAbsolutePath();
}
@@ -773,21 +776,18 @@ class FSDataset implements FSDatasetInte
* Read access to this unmodifiable list is not synchronized.
* This list is replaced on modification holding "this" lock.
*/
- private volatile List<FSVolume> volumes = null;
+ private volatile List<FSVolumeInterface> volumes = null;
+
BlockVolumeChoosingPolicy blockChooser;
int numFailedVolumes;
- FSVolumeSet(FSVolume[] volumes, int failedVols, BlockVolumeChoosingPolicy blockChooser) {
- List<FSVolume> list = Arrays.asList(volumes);
- this.volumes = Collections.unmodifiableList(list);
+ FSVolumeSet(List<FSVolumeInterface> volumes, int failedVols,
+ BlockVolumeChoosingPolicy blockChooser) {
+ this.volumes = Collections.unmodifiableList(volumes);
this.blockChooser = blockChooser;
this.numFailedVolumes = failedVols;
}
- private int numberOfVolumes() {
- return volumes.size();
- }
-
private int numberOfFailedVolumes() {
return numFailedVolumes;
}
@@ -800,36 +800,36 @@ class FSDataset implements FSDatasetInte
* @return next volume to store the block in.
*/
synchronized FSVolume getNextVolume(long blockSize) throws IOException {
- return blockChooser.chooseVolume(volumes, blockSize);
+ return (FSVolume)blockChooser.chooseVolume(volumes, blockSize);
}
private long getDfsUsed() throws IOException {
long dfsUsed = 0L;
- for (FSVolume vol : volumes) {
- dfsUsed += vol.getDfsUsed();
+ for (FSVolumeInterface v : volumes) {
+ dfsUsed += ((FSVolume)v).getDfsUsed();
}
return dfsUsed;
}
private long getBlockPoolUsed(String bpid) throws IOException {
long dfsUsed = 0L;
- for (FSVolume vol : volumes) {
- dfsUsed += vol.getBlockPoolUsed(bpid);
+ for (FSVolumeInterface v : volumes) {
+ dfsUsed += ((FSVolume)v).getBlockPoolUsed(bpid);
}
return dfsUsed;
}
private long getCapacity() throws IOException {
long capacity = 0L;
- for (FSVolume vol : volumes) {
- capacity += vol.getCapacity();
+ for (FSVolumeInterface v : volumes) {
+ capacity += ((FSVolume)v).getCapacity();
}
return capacity;
}
private long getRemaining() throws IOException {
long remaining = 0L;
- for (FSVolume vol : volumes) {
+ for (FSVolumeInterface vol : volumes) {
remaining += vol.getAvailable();
}
return remaining;
@@ -837,15 +837,15 @@ class FSDataset implements FSDatasetInte
private void getVolumeMap(ReplicasMap volumeMap)
throws IOException {
- for (FSVolume vol : volumes) {
- vol.getVolumeMap(volumeMap);
+ for (FSVolumeInterface v : volumes) {
+ ((FSVolume)v).getVolumeMap(volumeMap);
}
}
private void getVolumeMap(String bpid, ReplicasMap volumeMap)
throws IOException {
- for (FSVolume vol : volumes) {
- vol.getVolumeMap(bpid, volumeMap);
+ for (FSVolumeInterface v : volumes) {
+ ((FSVolume)v).getVolumeMap(bpid, volumeMap);
}
}
@@ -861,10 +861,10 @@ class FSDataset implements FSDatasetInte
ArrayList<FSVolume> removedVols = null;
// Make a copy of volumes for performing modification
- List<FSVolume> volumeList = new ArrayList<FSVolume>(getVolumes());
+ final List<FSVolumeInterface> volumeList = new ArrayList<FSVolumeInterface>(volumes);
for (int idx = 0; idx < volumeList.size(); idx++) {
- FSVolume fsv = volumeList.get(idx);
+ FSVolume fsv = (FSVolume)volumeList.get(idx);
try {
fsv.checkDirs();
} catch (DiskErrorException e) {
@@ -881,8 +881,8 @@ class FSDataset implements FSDatasetInte
// Remove null volumes from the volumes array
if (removedVols != null && removedVols.size() > 0) {
- List<FSVolume> newVols = new ArrayList<FSVolume>();
- for (FSVolume vol : volumeList) {
+ List<FSVolumeInterface> newVols = new ArrayList<FSVolumeInterface>();
+ for (FSVolumeInterface vol : volumeList) {
if (vol != null) {
newVols.add(vol);
}
@@ -895,44 +895,30 @@ class FSDataset implements FSDatasetInte
return removedVols;
}
-
+
+ @Override
public String toString() {
return volumes.toString();
}
- boolean isValid(FSVolume volume) {
- for (FSVolume vol : volumes) {
- if (vol == volume) {
- return true;
- }
- }
- return false;
- }
private void addBlockPool(String bpid, Configuration conf)
throws IOException {
- for (FSVolume v : volumes) {
- v.addBlockPool(bpid, conf);
+ for (FSVolumeInterface v : volumes) {
+ ((FSVolume)v).addBlockPool(bpid, conf);
}
}
private void removeBlockPool(String bpid) {
- for (FSVolume v : volumes) {
- v.shutdownBlockPool(bpid);
+ for (FSVolumeInterface v : volumes) {
+ ((FSVolume)v).shutdownBlockPool(bpid);
}
}
-
- /**
- * @return unmodifiable list of volumes
- */
- public List<FSVolume> getVolumes() {
- return volumes;
- }
private void shutdown() {
- for (FSVolume volume : volumes) {
+ for (FSVolumeInterface volume : volumes) {
if(volume != null) {
- volume.shutdown();
+ ((FSVolume)volume).shutdown();
}
}
}
@@ -944,35 +930,20 @@ class FSDataset implements FSDatasetInte
//
//////////////////////////////////////////////////////
- //Find better place?
- static final String METADATA_EXTENSION = ".meta";
- static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
-
private static boolean isUnlinkTmpFile(File f) {
String name = f.getName();
- return name.endsWith(UNLINK_BLOCK_SUFFIX);
- }
-
- static File getUnlinkTmpFile(File f) {
- return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
+ return name.endsWith(DatanodeUtil.UNLINK_BLOCK_SUFFIX);
}
private static File getOrigFile(File unlinkTmpFile) {
String fileName = unlinkTmpFile.getName();
return new File(unlinkTmpFile.getParentFile(),
- fileName.substring(0, fileName.length()-UNLINK_BLOCK_SUFFIX.length()));
- }
-
- static String getMetaFileName(String blockFileName, long genStamp) {
- return blockFileName + "_" + genStamp + METADATA_EXTENSION;
- }
-
- static File getMetaFile(File f , long genStamp) {
- return new File(getMetaFileName(f.getAbsolutePath(), genStamp));
+ fileName.substring(0,
+ fileName.length() - DatanodeUtil.UNLINK_BLOCK_SUFFIX.length()));
}
protected File getMetaFile(ExtendedBlock b) throws IOException {
- return getMetaFile(getBlockFile(b), b.getGenerationStamp());
+ return DatanodeUtil.getMetaFile(getBlockFile(b), b.getGenerationStamp());
}
/** Find the metadata file for the specified block file.
@@ -994,34 +965,13 @@ class FSDataset implements FSDatasetInte
" does not have a metafile!");
return GenerationStamp.GRANDFATHER_GENERATION_STAMP;
}
-
- /** Find the corresponding meta data file from a given block file */
- private static File findMetaFile(final File blockFile) throws IOException {
- final String prefix = blockFile.getName() + "_";
- final File parent = blockFile.getParentFile();
- File[] matches = parent.listFiles(new FilenameFilter() {
- public boolean accept(File dir, String name) {
- return dir.equals(parent)
- && name.startsWith(prefix) && name.endsWith(METADATA_EXTENSION);
- }
- });
-
- if (matches == null || matches.length == 0) {
- throw new IOException("Meta file not found, blockFile=" + blockFile);
- }
- else if (matches.length > 1) {
- throw new IOException("Found more than one meta files: "
- + Arrays.asList(matches));
- }
- return matches[0];
- }
/** Find the corresponding meta data file from a given block file */
private static long parseGenerationStamp(File blockFile, File metaFile
) throws IOException {
String metaname = metaFile.getName();
String gs = metaname.substring(blockFile.getName().length() + 1,
- metaname.length() - METADATA_EXTENSION.length());
+ metaname.length() - DatanodeUtil.METADATA_EXTENSION.length());
try {
return Long.parseLong(gs);
} catch(NumberFormatException nfe) {
@@ -1031,13 +981,18 @@ class FSDataset implements FSDatasetInte
}
@Override // FSDatasetInterface
+ public List<FSVolumeInterface> getVolumes() {
+ return volumes.volumes;
+ }
+
+ @Override // FSDatasetInterface
public synchronized Block getStoredBlock(String bpid, long blkid)
throws IOException {
File blockfile = getFile(bpid, blkid);
if (blockfile == null) {
return null;
}
- File metafile = findMetaFile(blockfile);
+ final File metafile = DatanodeUtil.findMetaFile(blockfile);
return new Block(blkid, blockfile.length(),
parseGenerationStamp(blockfile, metafile));
}
@@ -1101,7 +1056,7 @@ class FSDataset implements FSDatasetInte
/**
* An FSDataset has a directory where it loads its data files.
*/
- public FSDataset(DataNode datanode, DataStorage storage, Configuration conf)
+ FSDataset(DataNode datanode, DataStorage storage, Configuration conf)
throws IOException {
this.datanode = datanode;
this.maxBlocksPerDir =
@@ -1134,12 +1089,12 @@ class FSDataset implements FSDatasetInte
+ ", volume failures tolerated: " + volFailuresTolerated);
}
- FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
+ final List<FSVolumeInterface> volArray = new ArrayList<FSVolumeInterface>(
+ storage.getNumStorageDirs());
for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
- volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(),
- conf);
- DataNode.LOG.info("FSDataset added volume - "
- + storage.getStorageDir(idx).getCurrentDir());
+ final File dir = storage.getStorageDir(idx).getCurrentDir();
+ volArray.add(new FSVolume(dir, conf));
+ DataNode.LOG.info("FSDataset added volume - " + dir);
}
volumeMap = new ReplicasMap(this);
@@ -1185,7 +1140,7 @@ class FSDataset implements FSDatasetInte
*/
@Override // FSDatasetInterface
public boolean hasEnoughResource() {
- return volumes.numberOfVolumes() >= validVolsRequired;
+ return getVolumes().size() >= validVolsRequired;
}
/**
@@ -1368,8 +1323,8 @@ class FSDataset implements FSDatasetInte
private static File moveBlockFiles(Block b, File srcfile, File destdir
) throws IOException {
final File dstfile = new File(destdir, b.getBlockName());
- final File srcmeta = getMetaFile(srcfile, b.getGenerationStamp());
- final File dstmeta = getMetaFile(dstfile, b.getGenerationStamp());
+ final File srcmeta = DatanodeUtil.getMetaFile(srcfile, b.getGenerationStamp());
+ final File dstmeta = DatanodeUtil.getMetaFile(dstfile, b.getGenerationStamp());
if (!srcmeta.renameTo(dstmeta)) {
throw new IOException("Failed to move meta file for " + b
+ " from " + srcmeta + " to " + dstmeta);
@@ -1487,7 +1442,7 @@ class FSDataset implements FSDatasetInte
// construct a RBW replica with the new GS
File blkfile = replicaInfo.getBlockFile();
- FSVolume v = replicaInfo.getVolume();
+ FSVolume v = (FSVolume)replicaInfo.getVolume();
if (v.getAvailable() < estimateBlockLen - replicaInfo.getNumBytes()) {
throw new DiskOutOfSpaceException("Insufficient space for appending to "
+ replicaInfo);
@@ -1744,7 +1699,7 @@ class FSDataset implements FSDatasetInte
+ visible + ", temp=" + temp);
}
// check volume
- final FSVolume v = temp.getVolume();
+ final FSVolume v = (FSVolume)temp.getVolume();
if (v == null) {
throw new IOException("r.getVolume() = null, temp=" + temp);
}
@@ -1805,7 +1760,7 @@ class FSDataset implements FSDatasetInte
if ( vol == null ) {
ReplicaInfo replica = volumeMap.get(bpid, blk);
if (replica != null) {
- vol = volumeMap.get(bpid, blk).getVolume();
+ vol = (FSVolume)volumeMap.get(bpid, blk).getVolume();
}
if ( vol == null ) {
throw new IOException("Could not find volume for block " + blk);
@@ -1845,7 +1800,7 @@ class FSDataset implements FSDatasetInte
newReplicaInfo = (FinalizedReplica)
((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
} else {
- FSVolume v = replicaInfo.getVolume();
+ FSVolume v = (FSVolume)replicaInfo.getVolume();
File f = replicaInfo.getBlockFile();
if (v == null) {
throw new IOException("No volume for temporary file " + f +
@@ -1943,7 +1898,8 @@ class FSDataset implements FSDatasetInte
/**
* Get the list of finalized blocks from in-memory blockmap for a block pool.
*/
- synchronized List<Block> getFinalizedBlocks(String bpid) {
+ @Override
+ public synchronized List<Block> getFinalizedBlocks(String bpid) {
ArrayList<Block> finalized = new ArrayList<Block>(volumeMap.size(bpid));
for (ReplicaInfo b : volumeMap.replicas(bpid)) {
if(b.getState() == ReplicaState.FINALIZED) {
@@ -2016,7 +1972,7 @@ class FSDataset implements FSDatasetInte
}
//check replica's meta file
- final File metafile = getMetaFile(f, r.getGenerationStamp());
+ final File metafile = DatanodeUtil.getMetaFile(f, r.getGenerationStamp());
if (!metafile.exists()) {
throw new IOException("Metafile " + metafile + " does not exist, r=" + r);
}
@@ -2047,7 +2003,7 @@ class FSDataset implements FSDatasetInte
error = true;
continue;
}
- v = dinfo.getVolume();
+ v = (FSVolume)dinfo.getVolume();
if (f == null) {
DataNode.LOG.warn("Unexpected error trying to delete block "
+ invalidBlks[i] +
@@ -2081,7 +2037,7 @@ class FSDataset implements FSDatasetInte
}
volumeMap.remove(bpid, invalidBlks[i]);
}
- File metaFile = getMetaFile(f, invalidBlks[i].getGenerationStamp());
+ File metaFile = DatanodeUtil.getMetaFile(f, invalidBlks[i].getGenerationStamp());
// Delete the block asynchronously to make sure we can do it fast enough
asyncDiskService.deleteAsync(v, f, metaFile,
@@ -2238,8 +2194,9 @@ class FSDataset implements FSDatasetInte
* @param diskMetaFile Metadata file from on the disk
* @param vol Volume of the block file
*/
+ @Override
public void checkAndUpdate(String bpid, long blockId, File diskFile,
- File diskMetaFile, FSVolume vol) {
+ File diskMetaFile, FSVolumeInterface vol) {
Block corruptBlock = null;
ReplicaInfo memBlockInfo;
synchronized (this) {
@@ -2327,7 +2284,7 @@ class FSDataset implements FSDatasetInte
// Compare generation stamp
if (memBlockInfo.getGenerationStamp() != diskGS) {
- File memMetaFile = getMetaFile(diskFile,
+ File memMetaFile = DatanodeUtil.getMetaFile(diskFile,
memBlockInfo.getGenerationStamp());
if (memMetaFile.exists()) {
if (memMetaFile.compareTo(diskMetaFile) != 0) {
@@ -2562,18 +2519,15 @@ class FSDataset implements FSDatasetInte
volumes.removeBlockPool(bpid);
}
- /**
- * get list of all bpids
- * @return list of bpids
- */
- public String [] getBPIdlist() throws IOException {
+ @Override
+ public String[] getBlockPoolList() {
return volumeMap.getBlockPoolList();
}
/**
* Class for representing the Datanode volume information
*/
- static class VolumeInfo {
+ private static class VolumeInfo {
final String directory;
final long usedSpace;
final long freeSpace;
@@ -2586,10 +2540,11 @@ class FSDataset implements FSDatasetInte
this.reservedSpace = reservedSpace;
}
}
-
- Collection<VolumeInfo> getVolumeInfo() {
+
+ private Collection<VolumeInfo> getVolumeInfo() {
Collection<VolumeInfo> info = new ArrayList<VolumeInfo>();
- for (FSVolume volume : volumes.volumes) {
+ for (FSVolumeInterface v : volumes.volumes) {
+ final FSVolume volume = (FSVolume)v;
long used = 0;
long free = 0;
try {
@@ -2606,13 +2561,27 @@ class FSDataset implements FSDatasetInte
}
return info;
}
-
+
+ @Override
+ public Map<String, Object> getVolumeInfoMap() {
+ final Map<String, Object> info = new HashMap<String, Object>();
+ Collection<VolumeInfo> volumes = getVolumeInfo();
+ for (VolumeInfo v : volumes) {
+ final Map<String, Object> innerInfo = new HashMap<String, Object>();
+ innerInfo.put("usedSpace", v.usedSpace);
+ innerInfo.put("freeSpace", v.freeSpace);
+ innerInfo.put("reservedSpace", v.reservedSpace);
+ info.put(v.directory, innerInfo);
+ }
+ return info;
+ }
+
@Override //FSDatasetInterface
public synchronized void deleteBlockPool(String bpid, boolean force)
throws IOException {
if (!force) {
- for (FSVolume volume : volumes.volumes) {
- if (!volume.isBPDirEmpty(bpid)) {
+ for (FSVolumeInterface volume : volumes.volumes) {
+ if (!((FSVolume)volume).isBPDirEmpty(bpid)) {
DataNode.LOG.warn(bpid
+ " has some block files, cannot delete unless forced");
throw new IOException("Cannot delete block pool, "
@@ -2620,8 +2589,8 @@ class FSDataset implements FSDatasetInte
}
}
}
- for (FSVolume volume : volumes.volumes) {
- volume.deleteBPDirectories(bpid, force);
+ for (FSVolumeInterface volume : volumes.volumes) {
+ ((FSVolume)volume).deleteBPDirectories(bpid, force);
}
}
@@ -2629,7 +2598,7 @@ class FSDataset implements FSDatasetInte
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
throws IOException {
File datafile = getBlockFile(block);
- File metafile = getMetaFile(datafile, block.getGenerationStamp());
+ File metafile = DatanodeUtil.getMetaFile(datafile, block.getGenerationStamp());
BlockLocalPathInfo info = new BlockLocalPathInfo(block,
datafile.getAbsolutePath(), metafile.getAbsolutePath());
return info;
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java Fri Feb 10 03:04:05 2012
@@ -19,10 +19,13 @@ package org.apache.hadoop.hdfs.server.da
import java.io.Closeable;
+import java.io.File;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.util.List;
+import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@@ -46,8 +49,44 @@ import org.apache.hadoop.util.DiskChecke
*/
@InterfaceAudience.Private
public interface FSDatasetInterface extends FSDatasetMBean {
-
-
+ /**
+ * This is an interface for the underlying volume.
+ * @see org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume
+ */
+ interface FSVolumeInterface {
+ /** @return a list of block pools. */
+ public String[] getBlockPoolList();
+
+ /** @return the available storage space in bytes. */
+ public long getAvailable() throws IOException;
+
+ /** @return the directory for the block pool. */
+ public File getDirectory(String bpid) throws IOException;
+
+ /** @return the directory for the finalized blocks in the block pool. */
+ public File getFinalizedDir(String bpid) throws IOException;
+ }
+
+ /** @return a list of volumes. */
+ public List<FSVolumeInterface> getVolumes();
+
+ /** @return a volume information map (name => info). */
+ public Map<String, Object> getVolumeInfoMap();
+
+ /** @return a list of block pools. */
+ public String[] getBlockPoolList();
+
+ /** @return a list of finalized blocks for the given block pool. */
+ public List<Block> getFinalizedBlocks(String bpid);
+
+ /**
+ * Check whether the in-memory block record matches the block on the disk,
+ * and, in case that they are not matched, update the record or mark it
+ * as corrupted.
+ */
+ public void checkAndUpdate(String bpid, long blockId, File diskFile,
+ File diskMetaFile, FSVolumeInterface vol);
+
/**
* Returns the length of the metadata file of the specified block
* @param b - the block for which the metadata length is desired
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java Fri Feb 10 03:04:05 2012
@@ -21,7 +21,7 @@ import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/**
* This class describes a replica that has been finalized.
@@ -38,7 +38,7 @@ class FinalizedReplica extends ReplicaIn
* @param dir directory path where block and meta files are located
*/
FinalizedReplica(long blockId, long len, long genStamp,
- FSVolume vol, File dir) {
+ FSVolumeInterface vol, File dir) {
super(blockId, len, genStamp, vol, dir);
}
@@ -48,7 +48,7 @@ class FinalizedReplica extends ReplicaIn
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
- FinalizedReplica(Block block, FSVolume vol, File dir) {
+ FinalizedReplica(Block block, FSVolumeInterface vol, File dir) {
super(block, vol, dir);
}
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java Fri Feb 10 03:04:05 2012
@@ -21,7 +21,7 @@ import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/** This class represents replicas being written.
* Those are the replicas that
@@ -36,7 +36,7 @@ class ReplicaBeingWritten extends Replic
* @param dir directory path where block and meta files are located
*/
ReplicaBeingWritten(long blockId, long genStamp,
- FSVolume vol, File dir) {
+ FSVolumeInterface vol, File dir) {
super( blockId, genStamp, vol, dir);
}
@@ -48,7 +48,7 @@ class ReplicaBeingWritten extends Replic
* @param writer a thread that is writing to this replica
*/
ReplicaBeingWritten(Block block,
- FSVolume vol, File dir, Thread writer) {
+ FSVolumeInterface vol, File dir, Thread writer) {
super( block, vol, dir, writer);
}
@@ -62,7 +62,7 @@ class ReplicaBeingWritten extends Replic
* @param writer a thread that is writing to this replica
*/
ReplicaBeingWritten(long blockId, long len, long genStamp,
- FSVolume vol, File dir, Thread writer ) {
+ FSVolumeInterface vol, File dir, Thread writer ) {
super( blockId, len, genStamp, vol, dir, writer);
}
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java?rev=1242642&r1=1242641&r2=1242642&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java Fri Feb 10 03:04:05 2012
@@ -24,8 +24,8 @@ import java.io.RandomAccessFile;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DataChecksum;
@@ -53,7 +53,7 @@ class ReplicaInPipeline extends ReplicaI
* @param state replica state
*/
ReplicaInPipeline(long blockId, long genStamp,
- FSVolume vol, File dir) {
+ FSVolumeInterface vol, File dir) {
this( blockId, 0L, genStamp, vol, dir, Thread.currentThread());
}
@@ -65,7 +65,7 @@ class ReplicaInPipeline extends ReplicaI
* @param writer a thread that is writing to this replica
*/
ReplicaInPipeline(Block block,
- FSVolume vol, File dir, Thread writer) {
+ FSVolumeInterface vol, File dir, Thread writer) {
this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
vol, dir, writer);
}
@@ -80,7 +80,7 @@ class ReplicaInPipeline extends ReplicaI
* @param writer a thread that is writing to this replica
*/
ReplicaInPipeline(long blockId, long len, long genStamp,
- FSVolume vol, File dir, Thread writer ) {
+ FSVolumeInterface vol, File dir, Thread writer ) {
super( blockId, len, genStamp, vol, dir);
this.bytesAcked = len;
this.bytesOnDisk = len;